problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_29361
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-1735
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
per-user charts are scrunched
After #1711 the per-user charts are scrunched:

cc: @dominic @seanlinsley
fake_data paydays should include transfers
We have paydays with `make data`, but we don't have any transfers. We need transfers to see the user charts in dev (cf. #1717).
Charts are too wide in mobile browsers
<img width="33%" src="https://f.cloud.github.com/assets/688886/1649646/413a1054-5a18-11e3-8acb-2d1d5da19d72.png"> <img width="33%" src="https://f.cloud.github.com/assets/688886/1649647/413b9956-5a18-11e3-87c7-582ef4525553.png"> <img width="33%" src="https://f.cloud.github.com/assets/688886/1649655/efdc9cbc-5a18-11e3-81a3-9fab62fd0be7.png">
</issue>
<code>
[start of gittip/utils/fake_data.py]
1 from faker import Factory
2 from gittip import wireup, MAX_TIP, MIN_TIP
3 from gittip.models.participant import Participant
4
5 import decimal
6 import random
7 import string
8 import datetime
9
10
11 faker = Factory.create()
12
13 platforms = ['github', 'twitter', 'bitbucket']
14
15
16 def _fake_thing(db, tablename, **kw):
17 column_names = []
18 column_value_placeholders = []
19 column_values = []
20
21 for k,v in kw.items():
22 column_names.append(k)
23 column_value_placeholders.append("%s")
24 column_values.append(v)
25
26 column_names = ", ".join(column_names)
27 column_value_placeholders = ", ".join(column_value_placeholders)
28
29 db.run( "INSERT INTO {} ({}) VALUES ({})"
30 .format(tablename, column_names, column_value_placeholders)
31 , column_values
32 )
33 return kw
34
35
36 def fake_text_id(size=6, chars=string.ascii_lowercase + string.digits):
37 """Create a random text id.
38 """
39 return ''.join(random.choice(chars) for x in range(size))
40
41
42 def fake_balance(max_amount=100):
43 """Return a random amount between 0 and max_amount.
44 """
45 return random.random() * max_amount
46
47
48 def fake_int_id(nmax=2 ** 31 -1):
49 """Create a random int id.
50 """
51 return random.randint(0, nmax)
52
53
54 def fake_participant(db, number="singular", is_admin=False, anonymous=False):
55 """Create a fake User.
56 """
57 username = faker.firstName() + fake_text_id(3)
58 d = _fake_thing( db
59 , "participants"
60 , id=fake_int_id()
61 , username=username
62 , username_lower=username.lower()
63 , statement=faker.sentence()
64 , ctime=faker.dateTimeThisYear()
65 , is_admin=is_admin
66 , balance=fake_balance()
67 , anonymous=anonymous
68 , goal=fake_balance()
69 , balanced_account_uri=faker.uri()
70 , last_ach_result=''
71 , is_suspicious=False
72 , last_bill_result='' # Needed to not be suspicious
73 , claimed_time=faker.dateTimeThisYear()
74 , number=number
75 )
76 #Call participant constructor to perform other DB initialization
77 return Participant.from_username(username)
78
79
80
81 def fake_tip_amount():
82 amount = ((decimal.Decimal(random.random()) * (MAX_TIP - MIN_TIP))
83 + MIN_TIP)
84
85 decimal_amount = decimal.Decimal(amount).quantize(decimal.Decimal('.01'))
86
87 return decimal_amount
88
89
90 def fake_tip(db, tipper, tippee):
91 """Create a fake tip.
92 """
93 return _fake_thing( db
94 , "tips"
95 , id=fake_int_id()
96 , ctime=faker.dateTimeThisYear()
97 , mtime=faker.dateTimeThisMonth()
98 , tipper=tipper.username
99 , tippee=tippee.username
100 , amount=fake_tip_amount()
101 )
102
103
104 def fake_elsewhere(db, participant, platform=None):
105 """Create a fake elsewhere.
106 """
107 if platform is None:
108 platform = random.choice(platforms)
109
110 info_templates = {
111 "github": {
112 "name": participant.username,
113 "html_url": "https://github.com/" + participant.username,
114 "type": "User",
115 "login": participant.username
116 },
117 "twitter": {
118 "name": participant.username,
119 "html_url": "https://twitter.com/" + participant.username,
120 "screen_name": participant.username
121 },
122 "bitbucket": {
123 "display_name": participant.username,
124 "username": participant.username,
125 "is_team": "False",
126 "html_url": "https://bitbucket.org/" + participant.username,
127 }
128 }
129
130 _fake_thing( db
131 , "elsewhere"
132 , id=fake_int_id()
133 , platform=platform
134 , user_id=fake_text_id()
135 , is_locked=False
136 , participant=participant.username
137 , user_info=info_templates[platform]
138 )
139
140 def fake_transfer(db, tipper, tippee):
141 return _fake_thing( db
142 , "transfers"
143 , id=fake_int_id()
144 , timestamp=faker.dateTimeThisYear()
145 , tipper=tipper.username
146 , tippee=tippee.username
147 , amount=fake_tip_amount()
148 )
149
150
151 def populate_db(db, num_participants=300, num_tips=200, num_teams=5, num_transfers=300):
152 """Populate DB with fake data.
153 """
154 #Make the participants
155 participants = []
156 for i in xrange(num_participants):
157 participants.append(fake_participant(db))
158
159 #Make the "Elsewhere's"
160 for p in participants:
161 #All participants get between 1 and 3 elsewheres
162 num_elsewheres = random.randint(1, 3)
163 for platform_name in platforms[:num_elsewheres]:
164 fake_elsewhere(db, p, platform_name)
165
166 #Make teams
167 for i in xrange(num_teams):
168 t = fake_participant(db, number="plural")
169 #Add 1 to 3 members to the team
170 members = random.sample(participants, random.randint(1, 3))
171 for p in members:
172 t.add_member(p)
173
174 #Make the tips
175 tips = []
176 for i in xrange(num_tips):
177 tipper, tippee = random.sample(participants, 2)
178 tips.append(fake_tip(db, tipper, tippee))
179
180
181 #Make the transfers
182 transfers = []
183 for i in xrange(num_tips):
184 tipper, tippee = random.sample(participants, 2)
185 transfers.append(fake_transfer(db, tipper, tippee))
186
187
188 #Make some paydays
189 #First determine the boundaries - min and max date
190 min_date = min(min(x['ctime'] for x in tips), \
191 min(x['timestamp'] for x in transfers))
192 max_date = max(max(x['ctime'] for x in tips), \
193 max(x['timestamp'] for x in transfers))
194 #iterate through min_date, max_date one week at a time
195 date = min_date
196 while date < max_date:
197 end_date = date + datetime.timedelta(days=7)
198 week_tips = filter(lambda x: date <= x['ctime'] <= end_date, tips)
199 week_transfers = filter(lambda x: date <= x['timestamp'] <= end_date, transfers)
200 week_participants = filter(lambda x: x.ctime.replace(tzinfo=None) <= end_date, participants)
201 actives=set()
202 tippers=set()
203 for xfers in week_tips, week_transfers:
204 actives.update(x['tipper'] for x in xfers)
205 actives.update(x['tippee'] for x in xfers)
206 tippers.update(x['tipper'] for x in xfers)
207 payday = {
208 'id': fake_int_id(),
209 'ts_start': date,
210 'ts_end': end_date,
211 'ntips': len(week_tips),
212 'ntransfers': len(week_transfers),
213 'nparticipants': len(week_participants),
214 'ntippers': len(tippers),
215 'nactive': len(actives),
216 'transfer_volume': sum(x['amount'] for x in week_transfers)
217 }
218 #Make ach_volume and charge_volume between 0 and 10% of transfer volume
219 def rand_part():
220 return decimal.Decimal(random.random()* 0.1)
221 payday['ach_volume'] = -1 * payday['transfer_volume'] * rand_part()
222 payday['charge_volume'] = payday['transfer_volume'] * rand_part()
223 _fake_thing(db, "paydays", **payday)
224 date = end_date
225
226
227
228 def main():
229 populate_db(wireup.db())
230
231 if __name__ == '__main__':
232 main()
233
[end of gittip/utils/fake_data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gittip/utils/fake_data.py b/gittip/utils/fake_data.py
--- a/gittip/utils/fake_data.py
+++ b/gittip/utils/fake_data.py
@@ -51,6 +51,12 @@
return random.randint(0, nmax)
+def fake_sentence(start=1, stop=100):
+ """Create a sentence of random length.
+ """
+ return faker.sentence(random.randrange(start,stop))
+
+
def fake_participant(db, number="singular", is_admin=False, anonymous=False):
"""Create a fake User.
"""
@@ -60,7 +66,7 @@
, id=fake_int_id()
, username=username
, username_lower=username.lower()
- , statement=faker.sentence()
+ , statement=fake_sentence()
, ctime=faker.dateTimeThisYear()
, is_admin=is_admin
, balance=fake_balance()
@@ -148,7 +154,7 @@
)
-def populate_db(db, num_participants=300, num_tips=200, num_teams=5, num_transfers=300):
+def populate_db(db, num_participants=100, num_tips=200, num_teams=5, num_transfers=5000):
"""Populate DB with fake data.
"""
#Make the participants
@@ -180,7 +186,7 @@
#Make the transfers
transfers = []
- for i in xrange(num_tips):
+ for i in xrange(num_transfers):
tipper, tippee = random.sample(participants, 2)
transfers.append(fake_transfer(db, tipper, tippee))
|
{"golden_diff": "diff --git a/gittip/utils/fake_data.py b/gittip/utils/fake_data.py\n--- a/gittip/utils/fake_data.py\n+++ b/gittip/utils/fake_data.py\n@@ -51,6 +51,12 @@\n return random.randint(0, nmax)\n \n \n+def fake_sentence(start=1, stop=100):\n+ \"\"\"Create a sentence of random length.\n+ \"\"\"\n+ return faker.sentence(random.randrange(start,stop))\n+\n+\n def fake_participant(db, number=\"singular\", is_admin=False, anonymous=False):\n \"\"\"Create a fake User.\n \"\"\"\n@@ -60,7 +66,7 @@\n , id=fake_int_id()\n , username=username\n , username_lower=username.lower()\n- , statement=faker.sentence()\n+ , statement=fake_sentence()\n , ctime=faker.dateTimeThisYear()\n , is_admin=is_admin\n , balance=fake_balance()\n@@ -148,7 +154,7 @@\n )\n \n \n-def populate_db(db, num_participants=300, num_tips=200, num_teams=5, num_transfers=300):\n+def populate_db(db, num_participants=100, num_tips=200, num_teams=5, num_transfers=5000):\n \"\"\"Populate DB with fake data.\n \"\"\"\n #Make the participants\n@@ -180,7 +186,7 @@\n \n #Make the transfers\n transfers = []\n- for i in xrange(num_tips):\n+ for i in xrange(num_transfers):\n tipper, tippee = random.sample(participants, 2)\n transfers.append(fake_transfer(db, tipper, tippee))\n", "issue": "per-user charts are scrunched\nAfter #1711 the per-user charts are scrunched:\n\n\n\ncc: @dominic @seanlinsley \n\nfake_data paydays should include transfers\nWe have paydays with `make data`, but we don't have any transfers. We need transfers to see the user charts in dev (cf. #1717).\n\nCharts are too wide in mobile browsers\n<img width=\"33%\" src=\"https://f.cloud.github.com/assets/688886/1649646/413a1054-5a18-11e3-8acb-2d1d5da19d72.png\"> <img width=\"33%\" src=\"https://f.cloud.github.com/assets/688886/1649647/413b9956-5a18-11e3-87c7-582ef4525553.png\"> <img width=\"33%\" src=\"https://f.cloud.github.com/assets/688886/1649655/efdc9cbc-5a18-11e3-81a3-9fab62fd0be7.png\">\n\n", "before_files": [{"content": "from faker import Factory\nfrom gittip import wireup, MAX_TIP, MIN_TIP\nfrom gittip.models.participant import Participant\n\nimport decimal\nimport random\nimport string\nimport datetime\n\n\nfaker = Factory.create()\n\nplatforms = ['github', 'twitter', 'bitbucket']\n\n\ndef _fake_thing(db, tablename, **kw):\n column_names = []\n column_value_placeholders = []\n column_values = []\n\n for k,v in kw.items():\n column_names.append(k)\n column_value_placeholders.append(\"%s\")\n column_values.append(v)\n\n column_names = \", \".join(column_names)\n column_value_placeholders = \", \".join(column_value_placeholders)\n\n db.run( \"INSERT INTO {} ({}) VALUES ({})\"\n .format(tablename, column_names, column_value_placeholders)\n , column_values\n )\n return kw\n\n\ndef fake_text_id(size=6, chars=string.ascii_lowercase + string.digits):\n \"\"\"Create a random text id.\n \"\"\"\n return ''.join(random.choice(chars) for x in range(size))\n\n\ndef fake_balance(max_amount=100):\n \"\"\"Return a random amount between 0 and max_amount.\n \"\"\"\n return random.random() * max_amount\n\n\ndef fake_int_id(nmax=2 ** 31 -1):\n \"\"\"Create a random int id.\n \"\"\"\n return random.randint(0, nmax)\n\n\ndef fake_participant(db, number=\"singular\", is_admin=False, anonymous=False):\n \"\"\"Create a fake User.\n \"\"\"\n username = faker.firstName() + fake_text_id(3)\n d = _fake_thing( db\n , \"participants\"\n , id=fake_int_id()\n , username=username\n , username_lower=username.lower()\n , statement=faker.sentence()\n , ctime=faker.dateTimeThisYear()\n , is_admin=is_admin\n , balance=fake_balance()\n , anonymous=anonymous\n , goal=fake_balance()\n , balanced_account_uri=faker.uri()\n , last_ach_result=''\n , is_suspicious=False\n , last_bill_result='' # Needed to not be suspicious\n , claimed_time=faker.dateTimeThisYear()\n , number=number\n )\n #Call participant constructor to perform other DB initialization\n return Participant.from_username(username)\n\n\n\ndef fake_tip_amount():\n amount = ((decimal.Decimal(random.random()) * (MAX_TIP - MIN_TIP))\n + MIN_TIP)\n\n decimal_amount = decimal.Decimal(amount).quantize(decimal.Decimal('.01'))\n\n return decimal_amount\n\n\ndef fake_tip(db, tipper, tippee):\n \"\"\"Create a fake tip.\n \"\"\"\n return _fake_thing( db\n , \"tips\"\n , id=fake_int_id()\n , ctime=faker.dateTimeThisYear()\n , mtime=faker.dateTimeThisMonth()\n , tipper=tipper.username\n , tippee=tippee.username\n , amount=fake_tip_amount()\n )\n\n\ndef fake_elsewhere(db, participant, platform=None):\n \"\"\"Create a fake elsewhere.\n \"\"\"\n if platform is None:\n platform = random.choice(platforms)\n\n info_templates = {\n \"github\": {\n \"name\": participant.username,\n \"html_url\": \"https://github.com/\" + participant.username,\n \"type\": \"User\",\n \"login\": participant.username\n },\n \"twitter\": {\n \"name\": participant.username,\n \"html_url\": \"https://twitter.com/\" + participant.username,\n \"screen_name\": participant.username\n },\n \"bitbucket\": {\n \"display_name\": participant.username,\n \"username\": participant.username,\n \"is_team\": \"False\",\n \"html_url\": \"https://bitbucket.org/\" + participant.username,\n }\n }\n\n _fake_thing( db\n , \"elsewhere\"\n , id=fake_int_id()\n , platform=platform\n , user_id=fake_text_id()\n , is_locked=False\n , participant=participant.username\n , user_info=info_templates[platform]\n )\n\ndef fake_transfer(db, tipper, tippee):\n return _fake_thing( db\n , \"transfers\"\n , id=fake_int_id()\n , timestamp=faker.dateTimeThisYear()\n , tipper=tipper.username\n , tippee=tippee.username\n , amount=fake_tip_amount()\n )\n\n\ndef populate_db(db, num_participants=300, num_tips=200, num_teams=5, num_transfers=300):\n \"\"\"Populate DB with fake data.\n \"\"\"\n #Make the participants\n participants = []\n for i in xrange(num_participants):\n participants.append(fake_participant(db))\n\n #Make the \"Elsewhere's\"\n for p in participants:\n #All participants get between 1 and 3 elsewheres\n num_elsewheres = random.randint(1, 3)\n for platform_name in platforms[:num_elsewheres]:\n fake_elsewhere(db, p, platform_name)\n\n #Make teams\n for i in xrange(num_teams):\n t = fake_participant(db, number=\"plural\")\n #Add 1 to 3 members to the team\n members = random.sample(participants, random.randint(1, 3))\n for p in members:\n t.add_member(p)\n\n #Make the tips\n tips = []\n for i in xrange(num_tips):\n tipper, tippee = random.sample(participants, 2)\n tips.append(fake_tip(db, tipper, tippee))\n\n\n #Make the transfers\n transfers = []\n for i in xrange(num_tips):\n tipper, tippee = random.sample(participants, 2)\n transfers.append(fake_transfer(db, tipper, tippee))\n\n\n #Make some paydays\n #First determine the boundaries - min and max date\n min_date = min(min(x['ctime'] for x in tips), \\\n min(x['timestamp'] for x in transfers))\n max_date = max(max(x['ctime'] for x in tips), \\\n max(x['timestamp'] for x in transfers))\n #iterate through min_date, max_date one week at a time\n date = min_date\n while date < max_date:\n end_date = date + datetime.timedelta(days=7)\n week_tips = filter(lambda x: date <= x['ctime'] <= end_date, tips)\n week_transfers = filter(lambda x: date <= x['timestamp'] <= end_date, transfers)\n week_participants = filter(lambda x: x.ctime.replace(tzinfo=None) <= end_date, participants)\n actives=set()\n tippers=set()\n for xfers in week_tips, week_transfers:\n actives.update(x['tipper'] for x in xfers)\n actives.update(x['tippee'] for x in xfers)\n tippers.update(x['tipper'] for x in xfers)\n payday = {\n 'id': fake_int_id(),\n 'ts_start': date,\n 'ts_end': end_date,\n 'ntips': len(week_tips),\n 'ntransfers': len(week_transfers),\n 'nparticipants': len(week_participants),\n 'ntippers': len(tippers),\n 'nactive': len(actives),\n 'transfer_volume': sum(x['amount'] for x in week_transfers)\n }\n #Make ach_volume and charge_volume between 0 and 10% of transfer volume\n def rand_part():\n return decimal.Decimal(random.random()* 0.1)\n payday['ach_volume'] = -1 * payday['transfer_volume'] * rand_part()\n payday['charge_volume'] = payday['transfer_volume'] * rand_part()\n _fake_thing(db, \"paydays\", **payday)\n date = end_date\n\n\n\ndef main():\n populate_db(wireup.db())\n\nif __name__ == '__main__':\n main()\n", "path": "gittip/utils/fake_data.py"}]}
| 3,227 | 385 |
gh_patches_debug_63480
|
rasdani/github-patches
|
git_diff
|
ansible-collections__community.vmware-1686
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
vmware_migrate_vmk: tests fail with 7.0.3
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
I get this failure:
```
TASK [vmware_migrate_vmk : Create a new vmkernel] ********************************************************************************************************************************************************************************************************************************************************************************************************************************************
fatal: [testhost]: FAILED! => {"changed": false, "msg": "Failed to add vmk as IP address or Subnet Mask in the IP configuration are invalid or PortGroup does not exist : A specified parameter was not correct: Vim.Host.VirtualNic.Specification.Ip"}
```##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```paste below
```
</issue>
<code>
[start of plugins/modules/vmware_migrate_vmk.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
5 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
6 # SPDX-License-Identifier: GPL-3.0-or-later
7
8 from __future__ import absolute_import, division, print_function
9 __metaclass__ = type
10
11
12 DOCUMENTATION = r'''
13 ---
14 module: vmware_migrate_vmk
15 short_description: Migrate a VMK interface from VSS to VDS
16 description:
17 - Migrate a VMK interface from VSS to VDS
18 author:
19 - Joseph Callen (@jcpowermac)
20 - Russell Teague (@mtnbikenc)
21 options:
22 esxi_hostname:
23 description:
24 - ESXi hostname to be managed
25 required: true
26 type: str
27 device:
28 description:
29 - VMK interface name
30 required: true
31 type: str
32 current_switch_name:
33 description:
34 - Switch VMK interface is currently on
35 required: true
36 type: str
37 current_portgroup_name:
38 description:
39 - Portgroup name VMK interface is currently on
40 required: true
41 type: str
42 migrate_switch_name:
43 description:
44 - Switch name to migrate VMK interface to
45 required: true
46 type: str
47 migrate_portgroup_name:
48 description:
49 - Portgroup name to migrate VMK interface to
50 required: true
51 type: str
52 migrate_vlan_id:
53 version_added: '2.4.0'
54 description:
55 - VLAN to use for the VMK interface when migrating from VDS to VSS
56 - Will be ignored when migrating from VSS to VDS
57 type: int
58 extends_documentation_fragment:
59 - community.vmware.vmware.documentation
60
61 '''
62
63 EXAMPLES = r'''
64 - name: Migrate Management vmk
65 community.vmware.vmware_migrate_vmk:
66 hostname: "{{ vcenter_hostname }}"
67 username: "{{ vcenter_username }}"
68 password: "{{ vcenter_password }}"
69 esxi_hostname: "{{ esxi_hostname }}"
70 device: vmk1
71 current_switch_name: temp_vswitch
72 current_portgroup_name: esx-mgmt
73 migrate_switch_name: dvSwitch
74 migrate_portgroup_name: Management
75 delegate_to: localhost
76 '''
77 try:
78 from pyVmomi import vim, vmodl
79 HAS_PYVMOMI = True
80 except ImportError:
81 HAS_PYVMOMI = False
82
83 from ansible.module_utils.basic import AnsibleModule
84 from ansible_collections.community.vmware.plugins.module_utils.vmware import (
85 vmware_argument_spec, find_dvs_by_name, find_hostsystem_by_name,
86 connect_to_api, find_dvspg_by_name)
87
88
89 class VMwareMigrateVmk(object):
90
91 def __init__(self, module):
92 self.module = module
93 self.host_system = None
94 self.migrate_switch_name = self.module.params['migrate_switch_name']
95 self.migrate_portgroup_name = self.module.params['migrate_portgroup_name']
96 self.migrate_vlan_id = self.module.params['migrate_vlan_id']
97 self.device = self.module.params['device']
98 self.esxi_hostname = self.module.params['esxi_hostname']
99 self.current_portgroup_name = self.module.params['current_portgroup_name']
100 self.current_switch_name = self.module.params['current_switch_name']
101 self.content = connect_to_api(module)
102
103 def process_state(self):
104 try:
105 vmk_migration_states = {
106 'migrate_vss_vds': self.state_migrate_vss_vds,
107 'migrate_vds_vss': self.state_migrate_vds_vss,
108 'migrated': self.state_exit_unchanged
109 }
110
111 vmk_migration_states[self.check_vmk_current_state()]()
112
113 except vmodl.RuntimeFault as runtime_fault:
114 self.module.fail_json(msg=runtime_fault.msg)
115 except vmodl.MethodFault as method_fault:
116 self.module.fail_json(msg=method_fault.msg)
117 except Exception as e:
118 self.module.fail_json(msg=str(e))
119
120 def state_exit_unchanged(self):
121 self.module.exit_json(changed=False)
122
123 def create_host_vnic_config_vds_vss(self):
124 host_vnic_config = vim.host.VirtualNic.Config()
125 host_vnic_config.spec = vim.host.VirtualNic.Specification()
126 host_vnic_config.changeOperation = "edit"
127 host_vnic_config.device = self.device
128 host_vnic_config.spec.portgroup = self.migrate_portgroup_name
129 return host_vnic_config
130
131 def create_port_group_config_vds_vss(self):
132 port_group_config = vim.host.PortGroup.Config()
133 port_group_config.spec = vim.host.PortGroup.Specification()
134 port_group_config.changeOperation = "add"
135 port_group_config.spec.name = self.migrate_portgroup_name
136 port_group_config.spec.vlanId = self.migrate_vlan_id if self.migrate_vlan_id is not None else 0
137 port_group_config.spec.vswitchName = self.migrate_switch_name
138 port_group_config.spec.policy = vim.host.NetworkPolicy()
139 return port_group_config
140
141 def state_migrate_vds_vss(self):
142 host_network_system = self.host_system.configManager.networkSystem
143 config = vim.host.NetworkConfig()
144 config.portgroup = [self.create_port_group_config_vds_vss()]
145 host_network_system.UpdateNetworkConfig(config, "modify")
146 config = vim.host.NetworkConfig()
147 config.vnic = [self.create_host_vnic_config_vds_vss()]
148 host_network_system.UpdateNetworkConfig(config, "modify")
149 self.module.exit_json(changed=True)
150
151 def create_host_vnic_config(self, dv_switch_uuid, portgroup_key):
152 host_vnic_config = vim.host.VirtualNic.Config()
153 host_vnic_config.spec = vim.host.VirtualNic.Specification()
154
155 host_vnic_config.changeOperation = "edit"
156 host_vnic_config.device = self.device
157 host_vnic_config.portgroup = ""
158 host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection()
159 host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid
160 host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key
161
162 return host_vnic_config
163
164 def create_port_group_config(self):
165 port_group_config = vim.host.PortGroup.Config()
166 port_group_config.spec = vim.host.PortGroup.Specification()
167
168 port_group_config.changeOperation = "remove"
169 port_group_config.spec.name = self.current_portgroup_name
170 port_group_config.spec.vlanId = -1
171 port_group_config.spec.vswitchName = self.current_switch_name
172 port_group_config.spec.policy = vim.host.NetworkPolicy()
173
174 return port_group_config
175
176 def state_migrate_vss_vds(self):
177 host_network_system = self.host_system.configManager.networkSystem
178
179 dv_switch = find_dvs_by_name(self.content, self.migrate_switch_name)
180 pg = find_dvspg_by_name(dv_switch, self.migrate_portgroup_name)
181
182 config = vim.host.NetworkConfig()
183 config.portgroup = [self.create_port_group_config()]
184 config.vnic = [self.create_host_vnic_config(dv_switch.uuid, pg.key)]
185 host_network_system.UpdateNetworkConfig(config, "modify")
186 self.module.exit_json(changed=True)
187
188 def check_vmk_current_state(self):
189 self.host_system = find_hostsystem_by_name(self.content, self.esxi_hostname)
190
191 for vnic in self.host_system.configManager.networkSystem.networkInfo.vnic:
192 if vnic.device == self.device:
193 if vnic.spec.distributedVirtualPort is None:
194 std_vswitches = [vswitch.name for vswitch in self.host_system.configManager.networkSystem.networkInfo.vswitch]
195 if self.current_switch_name not in std_vswitches:
196 return "migrated"
197 if vnic.portgroup == self.current_portgroup_name:
198 return "migrate_vss_vds"
199 else:
200 dvs = find_dvs_by_name(self.content, self.current_switch_name)
201 if dvs is None:
202 return "migrated"
203 if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid:
204 return "migrate_vds_vss"
205
206
207 def main():
208
209 argument_spec = vmware_argument_spec()
210 argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'),
211 device=dict(required=True, type='str'),
212 current_switch_name=dict(required=True, type='str'),
213 current_portgroup_name=dict(required=True, type='str'),
214 migrate_switch_name=dict(required=True, type='str'),
215 migrate_portgroup_name=dict(required=True, type='str'),
216 migrate_vlan_id=dict(required=False, type='int')))
217
218 module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
219
220 if not HAS_PYVMOMI:
221 module.fail_json(msg='pyvmomi required for this module')
222
223 vmware_migrate_vmk = VMwareMigrateVmk(module)
224 vmware_migrate_vmk.process_state()
225
226
227 if __name__ == '__main__':
228 main()
229
[end of plugins/modules/vmware_migrate_vmk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugins/modules/vmware_migrate_vmk.py b/plugins/modules/vmware_migrate_vmk.py
--- a/plugins/modules/vmware_migrate_vmk.py
+++ b/plugins/modules/vmware_migrate_vmk.py
@@ -203,6 +203,8 @@
if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid:
return "migrate_vds_vss"
+ self.module.fail_json(msg='Unable to find the specified device %s.' % self.device)
+
def main():
|
{"golden_diff": "diff --git a/plugins/modules/vmware_migrate_vmk.py b/plugins/modules/vmware_migrate_vmk.py\n--- a/plugins/modules/vmware_migrate_vmk.py\n+++ b/plugins/modules/vmware_migrate_vmk.py\n@@ -203,6 +203,8 @@\n if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid:\n return \"migrate_vds_vss\"\n \n+ self.module.fail_json(msg='Unable to find the specified device %s.' % self.device)\n+\n \n def main():\n", "issue": "vmware_migrate_vmk: tests fail with 7.0.3\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and devel branch are affected too -->\r\n<!--- Complete *all* sections as described, this form is processed automatically -->\r\n\r\n##### SUMMARY\r\n\r\nI get this failure:\r\n\r\n```\r\nTASK [vmware_migrate_vmk : Create a new vmkernel] ********************************************************************************************************************************************************************************************************************************************************************************************************************************************\r\nfatal: [testhost]: FAILED! => {\"changed\": false, \"msg\": \"Failed to add vmk as IP address or Subnet Mask in the IP configuration are invalid or PortGroup does not exist : A specified parameter was not correct: Vim.Host.VirtualNic.Specification.Ip\"}\r\n```##### EXPECTED RESULTS\r\n<!--- Describe what you expected to happen when running the steps above -->\r\n\r\n\r\n##### ACTUAL RESULTS\r\n<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->\r\n\r\n<!--- Paste verbatim command output between quotes -->\r\n```paste below\r\n\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_migrate_vmk\nshort_description: Migrate a VMK interface from VSS to VDS\ndescription:\n - Migrate a VMK interface from VSS to VDS\nauthor:\n- Joseph Callen (@jcpowermac)\n- Russell Teague (@mtnbikenc)\noptions:\n esxi_hostname:\n description:\n - ESXi hostname to be managed\n required: true\n type: str\n device:\n description:\n - VMK interface name\n required: true\n type: str\n current_switch_name:\n description:\n - Switch VMK interface is currently on\n required: true\n type: str\n current_portgroup_name:\n description:\n - Portgroup name VMK interface is currently on\n required: true\n type: str\n migrate_switch_name:\n description:\n - Switch name to migrate VMK interface to\n required: true\n type: str\n migrate_portgroup_name:\n description:\n - Portgroup name to migrate VMK interface to\n required: true\n type: str\n migrate_vlan_id:\n version_added: '2.4.0'\n description:\n - VLAN to use for the VMK interface when migrating from VDS to VSS\n - Will be ignored when migrating from VSS to VDS\n type: int\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Migrate Management vmk\n community.vmware.vmware_migrate_vmk:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n esxi_hostname: \"{{ esxi_hostname }}\"\n device: vmk1\n current_switch_name: temp_vswitch\n current_portgroup_name: esx-mgmt\n migrate_switch_name: dvSwitch\n migrate_portgroup_name: Management\n delegate_to: localhost\n'''\ntry:\n from pyVmomi import vim, vmodl\n HAS_PYVMOMI = True\nexcept ImportError:\n HAS_PYVMOMI = False\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import (\n vmware_argument_spec, find_dvs_by_name, find_hostsystem_by_name,\n connect_to_api, find_dvspg_by_name)\n\n\nclass VMwareMigrateVmk(object):\n\n def __init__(self, module):\n self.module = module\n self.host_system = None\n self.migrate_switch_name = self.module.params['migrate_switch_name']\n self.migrate_portgroup_name = self.module.params['migrate_portgroup_name']\n self.migrate_vlan_id = self.module.params['migrate_vlan_id']\n self.device = self.module.params['device']\n self.esxi_hostname = self.module.params['esxi_hostname']\n self.current_portgroup_name = self.module.params['current_portgroup_name']\n self.current_switch_name = self.module.params['current_switch_name']\n self.content = connect_to_api(module)\n\n def process_state(self):\n try:\n vmk_migration_states = {\n 'migrate_vss_vds': self.state_migrate_vss_vds,\n 'migrate_vds_vss': self.state_migrate_vds_vss,\n 'migrated': self.state_exit_unchanged\n }\n\n vmk_migration_states[self.check_vmk_current_state()]()\n\n except vmodl.RuntimeFault as runtime_fault:\n self.module.fail_json(msg=runtime_fault.msg)\n except vmodl.MethodFault as method_fault:\n self.module.fail_json(msg=method_fault.msg)\n except Exception as e:\n self.module.fail_json(msg=str(e))\n\n def state_exit_unchanged(self):\n self.module.exit_json(changed=False)\n\n def create_host_vnic_config_vds_vss(self):\n host_vnic_config = vim.host.VirtualNic.Config()\n host_vnic_config.spec = vim.host.VirtualNic.Specification()\n host_vnic_config.changeOperation = \"edit\"\n host_vnic_config.device = self.device\n host_vnic_config.spec.portgroup = self.migrate_portgroup_name\n return host_vnic_config\n\n def create_port_group_config_vds_vss(self):\n port_group_config = vim.host.PortGroup.Config()\n port_group_config.spec = vim.host.PortGroup.Specification()\n port_group_config.changeOperation = \"add\"\n port_group_config.spec.name = self.migrate_portgroup_name\n port_group_config.spec.vlanId = self.migrate_vlan_id if self.migrate_vlan_id is not None else 0\n port_group_config.spec.vswitchName = self.migrate_switch_name\n port_group_config.spec.policy = vim.host.NetworkPolicy()\n return port_group_config\n\n def state_migrate_vds_vss(self):\n host_network_system = self.host_system.configManager.networkSystem\n config = vim.host.NetworkConfig()\n config.portgroup = [self.create_port_group_config_vds_vss()]\n host_network_system.UpdateNetworkConfig(config, \"modify\")\n config = vim.host.NetworkConfig()\n config.vnic = [self.create_host_vnic_config_vds_vss()]\n host_network_system.UpdateNetworkConfig(config, \"modify\")\n self.module.exit_json(changed=True)\n\n def create_host_vnic_config(self, dv_switch_uuid, portgroup_key):\n host_vnic_config = vim.host.VirtualNic.Config()\n host_vnic_config.spec = vim.host.VirtualNic.Specification()\n\n host_vnic_config.changeOperation = \"edit\"\n host_vnic_config.device = self.device\n host_vnic_config.portgroup = \"\"\n host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection()\n host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid\n host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key\n\n return host_vnic_config\n\n def create_port_group_config(self):\n port_group_config = vim.host.PortGroup.Config()\n port_group_config.spec = vim.host.PortGroup.Specification()\n\n port_group_config.changeOperation = \"remove\"\n port_group_config.spec.name = self.current_portgroup_name\n port_group_config.spec.vlanId = -1\n port_group_config.spec.vswitchName = self.current_switch_name\n port_group_config.spec.policy = vim.host.NetworkPolicy()\n\n return port_group_config\n\n def state_migrate_vss_vds(self):\n host_network_system = self.host_system.configManager.networkSystem\n\n dv_switch = find_dvs_by_name(self.content, self.migrate_switch_name)\n pg = find_dvspg_by_name(dv_switch, self.migrate_portgroup_name)\n\n config = vim.host.NetworkConfig()\n config.portgroup = [self.create_port_group_config()]\n config.vnic = [self.create_host_vnic_config(dv_switch.uuid, pg.key)]\n host_network_system.UpdateNetworkConfig(config, \"modify\")\n self.module.exit_json(changed=True)\n\n def check_vmk_current_state(self):\n self.host_system = find_hostsystem_by_name(self.content, self.esxi_hostname)\n\n for vnic in self.host_system.configManager.networkSystem.networkInfo.vnic:\n if vnic.device == self.device:\n if vnic.spec.distributedVirtualPort is None:\n std_vswitches = [vswitch.name for vswitch in self.host_system.configManager.networkSystem.networkInfo.vswitch]\n if self.current_switch_name not in std_vswitches:\n return \"migrated\"\n if vnic.portgroup == self.current_portgroup_name:\n return \"migrate_vss_vds\"\n else:\n dvs = find_dvs_by_name(self.content, self.current_switch_name)\n if dvs is None:\n return \"migrated\"\n if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid:\n return \"migrate_vds_vss\"\n\n\ndef main():\n\n argument_spec = vmware_argument_spec()\n argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'),\n device=dict(required=True, type='str'),\n current_switch_name=dict(required=True, type='str'),\n current_portgroup_name=dict(required=True, type='str'),\n migrate_switch_name=dict(required=True, type='str'),\n migrate_portgroup_name=dict(required=True, type='str'),\n migrate_vlan_id=dict(required=False, type='int')))\n\n module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)\n\n if not HAS_PYVMOMI:\n module.fail_json(msg='pyvmomi required for this module')\n\n vmware_migrate_vmk = VMwareMigrateVmk(module)\n vmware_migrate_vmk.process_state()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/vmware_migrate_vmk.py"}]}
| 3,323 | 119 |
gh_patches_debug_9427
|
rasdani/github-patches
|
git_diff
|
lra__mackup-1244
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Does Mackup still sync SSH keys by default?
Today I was using Mackup and I noticed this in the help documentation:
> By default, Mackup syncs all application data (including private keys!) via Dropbox, but may be configured to exclude applications or use a different backend with a .mackup.cfg file.
I really like Mackup—it saves a ton of time when setting up a new computer. However, the idea of automatically syncing SSH keys by default really scares me. A few years ago I accidentally exposed an SSH key and someone used it to charge a few thousand dollars to AWS for my company. I'd really like to avoid doing anything like this again in the future.
In reading through #512 and #109, it looks like this behavior was turned off. However, the help documentation doesn't seem to indicate that. So which one is correct? I feel strongly that synching private keys by default is not obvious behavior, and it has the potential to have some serious consequences.
Also, will Mackup sync other types of sensitive information in the future? What scares me most about this is not necessarily what Mackup is syncing today, but what it might add in the future that I don't notice.
Thanks!
</issue>
<code>
[start of mackup/main.py]
1 """Mackup.
2
3 Keep your application settings in sync.
4 Copyright (C) 2013-2015 Laurent Raufaste <http://glop.org/>
5
6 Usage:
7 mackup list
8 mackup [options] backup
9 mackup [options] restore
10 mackup [options] uninstall
11 mackup (-h | --help)
12 mackup --version
13
14 Options:
15 -h --help Show this screen.
16 -f --force Force every question asked to be answered with "Yes".
17 -n --dry-run Show steps without executing.
18 -v --verbose Show additional details.
19 --version Show version.
20
21 Modes of action:
22 1. list: display a list of all supported applications.
23 2. backup: sync your conf files to your synced storage, use this the 1st time
24 you use Mackup. (Note that by default this will sync private keys used by
25 GnuPG.)
26 3. restore: link the conf files already in your synced storage on your system,
27 use it on any new system you use.
28 4. uninstall: reset everything as it was before using Mackup.
29
30 By default, Mackup syncs all application data (including some private keys!)
31 via Dropbox, but may be configured to exclude applications or use a different
32 backend with a .mackup.cfg file.
33
34 See https://github.com/lra/mackup/tree/master/doc for more information.
35
36 """
37 from docopt import docopt
38 from .appsdb import ApplicationsDatabase
39 from .application import ApplicationProfile
40 from .constants import MACKUP_APP_NAME, VERSION
41 from .mackup import Mackup
42 from . import utils
43
44
45 class ColorFormatCodes:
46 BLUE = '\033[34m'
47 BOLD = '\033[1m'
48 NORMAL = '\033[0m'
49
50
51 def header(str):
52 return ColorFormatCodes.BLUE + str + ColorFormatCodes.NORMAL
53
54
55 def bold(str):
56 return ColorFormatCodes.BOLD + str + ColorFormatCodes.NORMAL
57
58
59 def main():
60 """Main function."""
61 # Get the command line arg
62 args = docopt(__doc__, version="Mackup {}".format(VERSION))
63
64 mckp = Mackup()
65 app_db = ApplicationsDatabase()
66
67 def printAppHeader(app_name):
68 if verbose:
69 print(("\n{0} {1} {0}").format(header("---"), bold(app_name)))
70
71 # If we want to answer mackup with "yes" for each question
72 if args['--force']:
73 utils.FORCE_YES = True
74
75 dry_run = args['--dry-run']
76
77 verbose = args['--verbose']
78
79 if args['backup']:
80 # Check the env where the command is being run
81 mckp.check_for_usable_backup_env()
82
83 # Backup each application
84 for app_name in sorted(mckp.get_apps_to_backup()):
85 app = ApplicationProfile(mckp,
86 app_db.get_files(app_name),
87 dry_run,
88 verbose)
89 printAppHeader(app_name)
90 app.backup()
91
92 elif args['restore']:
93 # Check the env where the command is being run
94 mckp.check_for_usable_restore_env()
95
96 # Restore the Mackup config before any other config, as we might need
97 # it to know about custom settings
98 mackup_app = ApplicationProfile(mckp,
99 app_db.get_files(MACKUP_APP_NAME),
100 dry_run,
101 verbose)
102 printAppHeader(MACKUP_APP_NAME)
103 mackup_app.restore()
104
105 # Initialize again the apps db, as the Mackup config might have changed
106 # it
107 mckp = Mackup()
108 app_db = ApplicationsDatabase()
109
110 # Restore the rest of the app configs, using the restored Mackup config
111 app_names = mckp.get_apps_to_backup()
112 # Mackup has already been done
113 app_names.discard(MACKUP_APP_NAME)
114
115 for app_name in sorted(app_names):
116 app = ApplicationProfile(mckp,
117 app_db.get_files(app_name),
118 dry_run,
119 verbose)
120 printAppHeader(app_name)
121 app.restore()
122
123 elif args['uninstall']:
124 # Check the env where the command is being run
125 mckp.check_for_usable_restore_env()
126
127 if dry_run or (
128 utils.confirm("You are going to uninstall Mackup.\n"
129 "Every configuration file, setting and dotfile"
130 " managed by Mackup will be unlinked and moved back"
131 " to their original place, in your home folder.\n"
132 "Are you sure ?")):
133
134 # Uninstall the apps except Mackup, which we'll uninstall last, to
135 # keep the settings as long as possible
136 app_names = mckp.get_apps_to_backup()
137 app_names.discard(MACKUP_APP_NAME)
138
139 for app_name in sorted(app_names):
140 app = ApplicationProfile(mckp,
141 app_db.get_files(app_name),
142 dry_run,
143 verbose)
144 printAppHeader(app_name)
145 app.uninstall()
146
147 # Restore the Mackup config before any other config, as we might
148 # need it to know about custom settings
149 mackup_app = ApplicationProfile(mckp,
150 app_db.get_files(MACKUP_APP_NAME),
151 dry_run,
152 verbose)
153 mackup_app.uninstall()
154
155 # Delete the Mackup folder in Dropbox
156 # Don't delete this as there might be other Macs that aren't
157 # uninstalled yet
158 # delete(mckp.mackup_folder)
159
160 print("\n"
161 "All your files have been put back into place. You can now"
162 " safely uninstall Mackup.\n"
163 "\n"
164 "Thanks for using Mackup !")
165
166 elif args['list']:
167 # Display the list of supported applications
168 mckp.check_for_usable_environment()
169 output = "Supported applications:\n"
170 for app_name in sorted(app_db.get_app_names()):
171 output += " - {}\n".format(app_name)
172 output += "\n"
173 output += ("{} applications supported in Mackup v{}"
174 .format(len(app_db.get_app_names()), VERSION))
175 print(output)
176
177 # Delete the tmp folder
178 mckp.clean_temp_folder()
179
[end of mackup/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mackup/main.py b/mackup/main.py
--- a/mackup/main.py
+++ b/mackup/main.py
@@ -27,8 +27,8 @@
use it on any new system you use.
4. uninstall: reset everything as it was before using Mackup.
-By default, Mackup syncs all application data (including some private keys!)
-via Dropbox, but may be configured to exclude applications or use a different
+By default, Mackup syncs all application data (except for private keys) via
+Dropbox, but may be configured to exclude applications or use a different
backend with a .mackup.cfg file.
See https://github.com/lra/mackup/tree/master/doc for more information.
|
{"golden_diff": "diff --git a/mackup/main.py b/mackup/main.py\n--- a/mackup/main.py\n+++ b/mackup/main.py\n@@ -27,8 +27,8 @@\n use it on any new system you use.\n 4. uninstall: reset everything as it was before using Mackup.\n \n-By default, Mackup syncs all application data (including some private keys!)\n-via Dropbox, but may be configured to exclude applications or use a different\n+By default, Mackup syncs all application data (except for private keys) via\n+Dropbox, but may be configured to exclude applications or use a different\n backend with a .mackup.cfg file.\n \n See https://github.com/lra/mackup/tree/master/doc for more information.\n", "issue": "Does Mackup still sync SSH keys by default?\nToday I was using Mackup and I noticed this in the help documentation:\r\n\r\n> By default, Mackup syncs all application data (including private keys!) via Dropbox, but may be configured to exclude applications or use a different backend with a .mackup.cfg file.\r\n\r\nI really like Mackup\u2014it saves a ton of time when setting up a new computer. However, the idea of automatically syncing SSH keys by default really scares me. A few years ago I accidentally exposed an SSH key and someone used it to charge a few thousand dollars to AWS for my company. I'd really like to avoid doing anything like this again in the future.\r\n\r\nIn reading through #512 and #109, it looks like this behavior was turned off. However, the help documentation doesn't seem to indicate that. So which one is correct? I feel strongly that synching private keys by default is not obvious behavior, and it has the potential to have some serious consequences.\r\n\r\nAlso, will Mackup sync other types of sensitive information in the future? What scares me most about this is not necessarily what Mackup is syncing today, but what it might add in the future that I don't notice.\r\n\r\nThanks!\n", "before_files": [{"content": "\"\"\"Mackup.\n\nKeep your application settings in sync.\nCopyright (C) 2013-2015 Laurent Raufaste <http://glop.org/>\n\nUsage:\n mackup list\n mackup [options] backup\n mackup [options] restore\n mackup [options] uninstall\n mackup (-h | --help)\n mackup --version\n\nOptions:\n -h --help Show this screen.\n -f --force Force every question asked to be answered with \"Yes\".\n -n --dry-run Show steps without executing.\n -v --verbose Show additional details.\n --version Show version.\n\nModes of action:\n 1. list: display a list of all supported applications.\n 2. backup: sync your conf files to your synced storage, use this the 1st time\n you use Mackup. (Note that by default this will sync private keys used by\n GnuPG.)\n 3. restore: link the conf files already in your synced storage on your system,\n use it on any new system you use.\n 4. uninstall: reset everything as it was before using Mackup.\n\nBy default, Mackup syncs all application data (including some private keys!)\nvia Dropbox, but may be configured to exclude applications or use a different\nbackend with a .mackup.cfg file.\n\nSee https://github.com/lra/mackup/tree/master/doc for more information.\n\n\"\"\"\nfrom docopt import docopt\nfrom .appsdb import ApplicationsDatabase\nfrom .application import ApplicationProfile\nfrom .constants import MACKUP_APP_NAME, VERSION\nfrom .mackup import Mackup\nfrom . import utils\n\n\nclass ColorFormatCodes:\n BLUE = '\\033[34m'\n BOLD = '\\033[1m'\n NORMAL = '\\033[0m'\n\n\ndef header(str):\n return ColorFormatCodes.BLUE + str + ColorFormatCodes.NORMAL\n\n\ndef bold(str):\n return ColorFormatCodes.BOLD + str + ColorFormatCodes.NORMAL\n\n\ndef main():\n \"\"\"Main function.\"\"\"\n # Get the command line arg\n args = docopt(__doc__, version=\"Mackup {}\".format(VERSION))\n\n mckp = Mackup()\n app_db = ApplicationsDatabase()\n\n def printAppHeader(app_name):\n if verbose:\n print((\"\\n{0} {1} {0}\").format(header(\"---\"), bold(app_name)))\n\n # If we want to answer mackup with \"yes\" for each question\n if args['--force']:\n utils.FORCE_YES = True\n\n dry_run = args['--dry-run']\n\n verbose = args['--verbose']\n\n if args['backup']:\n # Check the env where the command is being run\n mckp.check_for_usable_backup_env()\n\n # Backup each application\n for app_name in sorted(mckp.get_apps_to_backup()):\n app = ApplicationProfile(mckp,\n app_db.get_files(app_name),\n dry_run,\n verbose)\n printAppHeader(app_name)\n app.backup()\n\n elif args['restore']:\n # Check the env where the command is being run\n mckp.check_for_usable_restore_env()\n\n # Restore the Mackup config before any other config, as we might need\n # it to know about custom settings\n mackup_app = ApplicationProfile(mckp,\n app_db.get_files(MACKUP_APP_NAME),\n dry_run,\n verbose)\n printAppHeader(MACKUP_APP_NAME)\n mackup_app.restore()\n\n # Initialize again the apps db, as the Mackup config might have changed\n # it\n mckp = Mackup()\n app_db = ApplicationsDatabase()\n\n # Restore the rest of the app configs, using the restored Mackup config\n app_names = mckp.get_apps_to_backup()\n # Mackup has already been done\n app_names.discard(MACKUP_APP_NAME)\n\n for app_name in sorted(app_names):\n app = ApplicationProfile(mckp,\n app_db.get_files(app_name),\n dry_run,\n verbose)\n printAppHeader(app_name)\n app.restore()\n\n elif args['uninstall']:\n # Check the env where the command is being run\n mckp.check_for_usable_restore_env()\n\n if dry_run or (\n utils.confirm(\"You are going to uninstall Mackup.\\n\"\n \"Every configuration file, setting and dotfile\"\n \" managed by Mackup will be unlinked and moved back\"\n \" to their original place, in your home folder.\\n\"\n \"Are you sure ?\")):\n\n # Uninstall the apps except Mackup, which we'll uninstall last, to\n # keep the settings as long as possible\n app_names = mckp.get_apps_to_backup()\n app_names.discard(MACKUP_APP_NAME)\n\n for app_name in sorted(app_names):\n app = ApplicationProfile(mckp,\n app_db.get_files(app_name),\n dry_run,\n verbose)\n printAppHeader(app_name)\n app.uninstall()\n\n # Restore the Mackup config before any other config, as we might\n # need it to know about custom settings\n mackup_app = ApplicationProfile(mckp,\n app_db.get_files(MACKUP_APP_NAME),\n dry_run,\n verbose)\n mackup_app.uninstall()\n\n # Delete the Mackup folder in Dropbox\n # Don't delete this as there might be other Macs that aren't\n # uninstalled yet\n # delete(mckp.mackup_folder)\n\n print(\"\\n\"\n \"All your files have been put back into place. You can now\"\n \" safely uninstall Mackup.\\n\"\n \"\\n\"\n \"Thanks for using Mackup !\")\n\n elif args['list']:\n # Display the list of supported applications\n mckp.check_for_usable_environment()\n output = \"Supported applications:\\n\"\n for app_name in sorted(app_db.get_app_names()):\n output += \" - {}\\n\".format(app_name)\n output += \"\\n\"\n output += (\"{} applications supported in Mackup v{}\"\n .format(len(app_db.get_app_names()), VERSION))\n print(output)\n\n # Delete the tmp folder\n mckp.clean_temp_folder()\n", "path": "mackup/main.py"}]}
| 2,595 | 167 |
gh_patches_debug_16469
|
rasdani/github-patches
|
git_diff
|
google-deepmind__optax-465
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Better tests for utils.
Optax tests did not catch a problem with one of the type annotations in #367. This is due to `utils` not having good test coverage.
I'm marking this as "good first issue". Any tests for `utils` would be very welcome! No need to write tests for all of them at once, PRs with only a single test at a time are very welcome.
</issue>
<code>
[start of optax/_src/utils.py]
1 # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Utility functions for testing."""
16
17 from typing import Optional, Tuple, Sequence
18
19 import chex
20 import jax
21 import jax.numpy as jnp
22 import jax.scipy.stats.norm as multivariate_normal
23
24 from optax._src import linear_algebra
25 from optax._src import numerics
26
27
28 def tile_second_to_last_dim(a: chex.Array) -> chex.Array:
29 ones = jnp.ones_like(a)
30 a = jnp.expand_dims(a, axis=-1)
31 return jnp.expand_dims(ones, axis=-2) * a
32
33
34 def canonicalize_dtype(
35 dtype: Optional[chex.ArrayDType]) -> Optional[chex.ArrayDType]:
36 """Canonicalise a dtype, skip if None."""
37 if dtype is not None:
38 return jax.dtypes.canonicalize_dtype(dtype)
39 return dtype
40
41
42 def cast_tree(tree: chex.ArrayTree,
43 dtype: Optional[chex.ArrayDType]) -> chex.ArrayTree:
44 """Cast tree to given dtype, skip if None."""
45 if dtype is not None:
46 return jax.tree_util.tree_map(lambda t: t.astype(dtype), tree)
47 else:
48 return tree
49
50
51 def set_diags(a: chex.Array, new_diags: chex.Array) -> chex.Array:
52 """Set the diagonals of every DxD matrix in an input of shape NxDxD.
53
54 Args:
55 a: rank 3, tensor NxDxD.
56 new_diags: NxD matrix, the new diagonals of each DxD matrix.
57
58 Returns:
59 NxDxD tensor, with the same contents as `a` but with the diagonal
60 changed to `new_diags`.
61 """
62 n, d, d1 = a.shape
63 assert d == d1
64
65 indices1 = jnp.repeat(jnp.arange(n), d)
66 indices2 = jnp.tile(jnp.arange(d), n)
67 indices3 = indices2
68
69 # Use numpy array setting
70 a = a.at[indices1, indices2, indices3].set(new_diags.flatten())
71 return a
72
73
74 class MultiNormalDiagFromLogScale():
75 """MultiNormalDiag which directly exposes its input parameters."""
76
77 def __init__(self, loc: chex.Array, log_scale: chex.Array):
78 self._log_scale = log_scale
79 self._scale = jnp.exp(log_scale)
80 self._mean = loc
81 self._param_shape = jax.lax.broadcast_shapes(
82 self._mean.shape, self._scale.shape)
83
84 def sample(self, shape: Sequence[int],
85 seed: chex.PRNGKey) -> chex.Array:
86 sample_shape = tuple(shape) + self._param_shape
87 return jax.random.normal(
88 seed, shape=sample_shape) * self._scale + self._mean
89
90 def log_prob(self, x: chex.Array) -> chex.Array:
91 log_prob = multivariate_normal.logpdf(x, loc=self._mean, scale=self._scale)
92 # Sum over parameter axes.
93 sum_axis = [-(i + 1) for i in range(len(self._param_shape))]
94 return jnp.sum(log_prob, axis=sum_axis)
95
96 @property
97 def log_scale(self) -> chex.Array:
98 return self._log_scale
99
100 @property
101 def params(self) -> Sequence[chex.Array]:
102 return [self._mean, self._log_scale]
103
104
105 def multi_normal(loc: chex.Array,
106 log_scale: chex.Array) -> MultiNormalDiagFromLogScale:
107 return MultiNormalDiagFromLogScale(loc=loc, log_scale=log_scale)
108
109
110 @jax.custom_vjp
111 def _scale_gradient(inputs: chex.ArrayTree, scale: float) -> chex.ArrayTree:
112 """Internal gradient scaling implementation."""
113 del scale # Only used for the backward pass defined in _scale_gradient_bwd.
114 return inputs
115
116
117 def _scale_gradient_fwd(inputs: chex.ArrayTree,
118 scale: float) -> Tuple[chex.ArrayTree, float]:
119 return _scale_gradient(inputs, scale), scale
120
121
122 def _scale_gradient_bwd(scale: float,
123 g: chex.ArrayTree) -> Tuple[chex.ArrayTree, None]:
124 return (jax.tree_util.tree_map(lambda g_: g_ * scale, g), None)
125
126
127 _scale_gradient.defvjp(_scale_gradient_fwd, _scale_gradient_bwd)
128
129
130 def scale_gradient(inputs: chex.ArrayTree, scale: float) -> chex.ArrayTree:
131 """Scales gradients for the backwards pass.
132
133 Args:
134 inputs: A nested array.
135 scale: The scale factor for the gradient on the backwards pass.
136
137 Returns:
138 An array of the same structure as `inputs`, with scaled backward gradient.
139 """
140 # Special case scales of 1. and 0. for more efficiency.
141 if scale == 1.:
142 return inputs
143 elif scale == 0.:
144 return jax.lax.stop_gradient(inputs)
145 else:
146 return _scale_gradient(inputs, scale)
147
148
149 # TODO(b/183800387): remove legacy aliases.
150 safe_norm = numerics.safe_norm
151 safe_int32_increment = numerics.safe_int32_increment
152 global_norm = linear_algebra.global_norm
153
[end of optax/_src/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/optax/_src/utils.py b/optax/_src/utils.py
--- a/optax/_src/utils.py
+++ b/optax/_src/utils.py
@@ -59,8 +59,22 @@
NxDxD tensor, with the same contents as `a` but with the diagonal
changed to `new_diags`.
"""
+ a_dim, new_diags_dim = len(a.shape), len(new_diags.shape)
+ if a_dim != 3:
+ raise ValueError(f'Expected `a` to be a 3D tensor, got {a_dim}D instead')
+ if new_diags_dim != 2:
+ raise ValueError(
+ f'Expected `new_diags` to be a 2D array, got {new_diags_dim}D instead')
n, d, d1 = a.shape
- assert d == d1
+ n_diags, d_diags = new_diags.shape
+ if d != d1:
+ raise ValueError(
+ f'Shape mismatch: expected `a.shape` to be {(n, d, d)}, '
+ f'got {(n, d, d1)} instead')
+ if d_diags != d or n_diags != n:
+ raise ValueError(
+ f'Shape mismatch: expected `new_diags.shape` to be {(n, d)}, '
+ f'got {(n_diags, d_diags)} instead')
indices1 = jnp.repeat(jnp.arange(n), d)
indices2 = jnp.tile(jnp.arange(d), n)
|
{"golden_diff": "diff --git a/optax/_src/utils.py b/optax/_src/utils.py\n--- a/optax/_src/utils.py\n+++ b/optax/_src/utils.py\n@@ -59,8 +59,22 @@\n NxDxD tensor, with the same contents as `a` but with the diagonal\n changed to `new_diags`.\n \"\"\"\n+ a_dim, new_diags_dim = len(a.shape), len(new_diags.shape)\n+ if a_dim != 3:\n+ raise ValueError(f'Expected `a` to be a 3D tensor, got {a_dim}D instead')\n+ if new_diags_dim != 2:\n+ raise ValueError(\n+ f'Expected `new_diags` to be a 2D array, got {new_diags_dim}D instead')\n n, d, d1 = a.shape\n- assert d == d1\n+ n_diags, d_diags = new_diags.shape\n+ if d != d1:\n+ raise ValueError(\n+ f'Shape mismatch: expected `a.shape` to be {(n, d, d)}, '\n+ f'got {(n, d, d1)} instead')\n+ if d_diags != d or n_diags != n:\n+ raise ValueError(\n+ f'Shape mismatch: expected `new_diags.shape` to be {(n, d)}, '\n+ f'got {(n_diags, d_diags)} instead')\n \n indices1 = jnp.repeat(jnp.arange(n), d)\n indices2 = jnp.tile(jnp.arange(d), n)\n", "issue": "Better tests for utils.\nOptax tests did not catch a problem with one of the type annotations in #367. This is due to `utils` not having good test coverage. \r\n\r\nI'm marking this as \"good first issue\". Any tests for `utils` would be very welcome! No need to write tests for all of them at once, PRs with only a single test at a time are very welcome.\n", "before_files": [{"content": "# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utility functions for testing.\"\"\"\n\nfrom typing import Optional, Tuple, Sequence\n\nimport chex\nimport jax\nimport jax.numpy as jnp\nimport jax.scipy.stats.norm as multivariate_normal\n\nfrom optax._src import linear_algebra\nfrom optax._src import numerics\n\n\ndef tile_second_to_last_dim(a: chex.Array) -> chex.Array:\n ones = jnp.ones_like(a)\n a = jnp.expand_dims(a, axis=-1)\n return jnp.expand_dims(ones, axis=-2) * a\n\n\ndef canonicalize_dtype(\n dtype: Optional[chex.ArrayDType]) -> Optional[chex.ArrayDType]:\n \"\"\"Canonicalise a dtype, skip if None.\"\"\"\n if dtype is not None:\n return jax.dtypes.canonicalize_dtype(dtype)\n return dtype\n\n\ndef cast_tree(tree: chex.ArrayTree,\n dtype: Optional[chex.ArrayDType]) -> chex.ArrayTree:\n \"\"\"Cast tree to given dtype, skip if None.\"\"\"\n if dtype is not None:\n return jax.tree_util.tree_map(lambda t: t.astype(dtype), tree)\n else:\n return tree\n\n\ndef set_diags(a: chex.Array, new_diags: chex.Array) -> chex.Array:\n \"\"\"Set the diagonals of every DxD matrix in an input of shape NxDxD.\n\n Args:\n a: rank 3, tensor NxDxD.\n new_diags: NxD matrix, the new diagonals of each DxD matrix.\n\n Returns:\n NxDxD tensor, with the same contents as `a` but with the diagonal\n changed to `new_diags`.\n \"\"\"\n n, d, d1 = a.shape\n assert d == d1\n\n indices1 = jnp.repeat(jnp.arange(n), d)\n indices2 = jnp.tile(jnp.arange(d), n)\n indices3 = indices2\n\n # Use numpy array setting\n a = a.at[indices1, indices2, indices3].set(new_diags.flatten())\n return a\n\n\nclass MultiNormalDiagFromLogScale():\n \"\"\"MultiNormalDiag which directly exposes its input parameters.\"\"\"\n\n def __init__(self, loc: chex.Array, log_scale: chex.Array):\n self._log_scale = log_scale\n self._scale = jnp.exp(log_scale)\n self._mean = loc\n self._param_shape = jax.lax.broadcast_shapes(\n self._mean.shape, self._scale.shape)\n\n def sample(self, shape: Sequence[int],\n seed: chex.PRNGKey) -> chex.Array:\n sample_shape = tuple(shape) + self._param_shape\n return jax.random.normal(\n seed, shape=sample_shape) * self._scale + self._mean\n\n def log_prob(self, x: chex.Array) -> chex.Array:\n log_prob = multivariate_normal.logpdf(x, loc=self._mean, scale=self._scale)\n # Sum over parameter axes.\n sum_axis = [-(i + 1) for i in range(len(self._param_shape))]\n return jnp.sum(log_prob, axis=sum_axis)\n\n @property\n def log_scale(self) -> chex.Array:\n return self._log_scale\n\n @property\n def params(self) -> Sequence[chex.Array]:\n return [self._mean, self._log_scale]\n\n\ndef multi_normal(loc: chex.Array,\n log_scale: chex.Array) -> MultiNormalDiagFromLogScale:\n return MultiNormalDiagFromLogScale(loc=loc, log_scale=log_scale)\n\n\[email protected]_vjp\ndef _scale_gradient(inputs: chex.ArrayTree, scale: float) -> chex.ArrayTree:\n \"\"\"Internal gradient scaling implementation.\"\"\"\n del scale # Only used for the backward pass defined in _scale_gradient_bwd.\n return inputs\n\n\ndef _scale_gradient_fwd(inputs: chex.ArrayTree,\n scale: float) -> Tuple[chex.ArrayTree, float]:\n return _scale_gradient(inputs, scale), scale\n\n\ndef _scale_gradient_bwd(scale: float,\n g: chex.ArrayTree) -> Tuple[chex.ArrayTree, None]:\n return (jax.tree_util.tree_map(lambda g_: g_ * scale, g), None)\n\n\n_scale_gradient.defvjp(_scale_gradient_fwd, _scale_gradient_bwd)\n\n\ndef scale_gradient(inputs: chex.ArrayTree, scale: float) -> chex.ArrayTree:\n \"\"\"Scales gradients for the backwards pass.\n\n Args:\n inputs: A nested array.\n scale: The scale factor for the gradient on the backwards pass.\n\n Returns:\n An array of the same structure as `inputs`, with scaled backward gradient.\n \"\"\"\n # Special case scales of 1. and 0. for more efficiency.\n if scale == 1.:\n return inputs\n elif scale == 0.:\n return jax.lax.stop_gradient(inputs)\n else:\n return _scale_gradient(inputs, scale)\n\n\n# TODO(b/183800387): remove legacy aliases.\nsafe_norm = numerics.safe_norm\nsafe_int32_increment = numerics.safe_int32_increment\nglobal_norm = linear_algebra.global_norm\n", "path": "optax/_src/utils.py"}]}
| 2,255 | 348 |
gh_patches_debug_8272
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-463
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve datafuture representation
Here is an example: `<DataFuture at 0x7f305c0b1da0 state=finished returned /home/annawoodard/pnpfit/results/best-fit-cuB.root_file>`
I do not think we should append `_file` to the end of the filepath, it makes it confusing what the actual filepath is.
</issue>
<code>
[start of parsl/app/futures.py]
1 """This module implements DataFutures.
2
3 We have two basic types of futures:
4 1. DataFutures which represent data objects
5 2. AppFutures which represent the futures on App/Leaf tasks.
6 """
7 import os
8 import logging
9 from concurrent.futures import Future
10
11 from parsl.dataflow.futures import AppFuture
12 from parsl.app.errors import *
13 from parsl.data_provider.files import File
14
15 logger = logging.getLogger(__name__)
16
17 # Possible future states (for internal use by the futures package).
18 PENDING = 'PENDING'
19 RUNNING = 'RUNNING'
20 # The future was cancelled by the user...
21 CANCELLED = 'CANCELLED'
22 # ...and _Waiter.add_cancelled() was called by a worker.
23 CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
24 FINISHED = 'FINISHED'
25
26 _STATE_TO_DESCRIPTION_MAP = {
27 PENDING: "pending",
28 RUNNING: "running",
29 CANCELLED: "cancelled",
30 CANCELLED_AND_NOTIFIED: "cancelled",
31 FINISHED: "finished"
32 }
33
34
35 class DataFuture(Future):
36 """A datafuture points at an AppFuture.
37
38 We are simply wrapping a AppFuture, and adding the specific case where, if
39 the future is resolved i.e file exists, then the DataFuture is assumed to be
40 resolved.
41 """
42
43 def parent_callback(self, parent_fu):
44 """Callback from executor future to update the parent.
45
46 Args:
47 - parent_fu (Future): Future returned by the executor along with callback
48
49 Returns:
50 - None
51
52 Updates the super() with the result() or exception()
53 """
54 if parent_fu.done() is True:
55 e = parent_fu._exception
56 if e:
57 super().set_exception(e)
58 else:
59 super().set_result(parent_fu.result())
60 return
61
62 def __init__(self, fut, file_obj, parent=None, tid=None):
63 """Construct the DataFuture object.
64
65 If the file_obj is a string convert to a File.
66
67 Args:
68 - fut (AppFuture) : AppFuture that this DataFuture will track
69 - file_obj (string/File obj) : Something representing file(s)
70
71 Kwargs:
72 - parent ()
73 - tid (task_id) : Task id that this DataFuture tracks
74 """
75 super().__init__()
76 self._tid = tid
77 if isinstance(file_obj, str) and not isinstance(file_obj, File):
78 self.file_obj = File(file_obj)
79 else:
80 self.file_obj = file_obj
81 self.parent = parent
82 self._exception = None
83
84 if fut is None:
85 logger.debug("Setting result to filepath since no future was passed")
86 self.set_result = self.file_obj
87
88 else:
89 if isinstance(fut, Future):
90 self.parent = fut
91 self.parent.add_done_callback(self.parent_callback)
92 else:
93 raise NotFutureError("DataFuture can be created only with a FunctionFuture on None")
94
95 logger.debug("Creating DataFuture with parent: %s", parent)
96 logger.debug("Filepath: %s", self.filepath)
97
98 @property
99 def tid(self):
100 """Returns the task_id of the task that will resolve this DataFuture."""
101 return self._tid
102
103 @property
104 def filepath(self):
105 """Filepath of the File object this datafuture represents."""
106 return self.file_obj.filepath
107
108 @property
109 def filename(self):
110 """Filepath of the File object this datafuture represents."""
111 return self.filepath
112
113 def result(self, timeout=None):
114 """A blocking call that returns either the result or raises an exception.
115
116 Assumptions : A DataFuture always has a parent AppFuture. The AppFuture does callbacks when
117 setup.
118
119 Kwargs:
120 - timeout (int): Timeout in seconds
121
122 Returns:
123 - If App completed successfully returns the filepath.
124
125 Raises:
126 - Exception raised by app if failed.
127
128 """
129 if self.parent:
130 if self.parent.done():
131 # This explicit call to raise exceptions might be redundant.
132 # the result() call *should* raise an exception if there's one
133 e = self.parent._exception
134 if e:
135 raise e
136 else:
137 self.parent.result(timeout=timeout)
138 else:
139 self.parent.result(timeout=timeout)
140
141 return self.file_obj
142
143 def cancel(self):
144 """Cancel the task that this DataFuture is tracking.
145
146 Note: This may not work
147 """
148 if self.parent:
149 return self.parent.cancel
150 else:
151 return False
152
153 def cancelled(self):
154 if self.parent:
155 return self.parent.cancelled()
156 else:
157 return False
158
159 def running(self):
160 if self.parent:
161 return self.parent.running()
162 else:
163 return False
164
165 def done(self):
166 if self.parent:
167 return self.parent.done()
168 else:
169 return True
170
171 def exception(self, timeout=None):
172 if self.parent:
173 return self.parent.exception(timeout=timeout)
174 else:
175 return True
176
177 def add_done_callback(self, fn):
178 if self.parent:
179 return self.parent.add_done_callback(fn)
180 else:
181 return None
182
183 def __repr__(self):
184
185 # The DataFuture could be wrapping an AppFuture whose parent is a Future
186 # check to find the top level parent
187 if isinstance(self.parent, AppFuture):
188 parent = self.parent.parent
189 else:
190 parent = self.parent
191
192 if parent:
193 with parent._condition:
194 if parent._state == FINISHED:
195 if parent._exception:
196 return '<%s at %#x state=%s raised %s>' % (
197 self.__class__.__name__,
198 id(self),
199 _STATE_TO_DESCRIPTION_MAP[parent._state],
200 parent._exception.__class__.__name__)
201 else:
202 return '<%s at %#x state=%s returned %s>' % (
203 self.__class__.__name__,
204 id(self),
205 _STATE_TO_DESCRIPTION_MAP[parent._state],
206 self.filepath + '_file')
207 return '<%s at %#x state=%s>' % (
208 self.__class__.__name__,
209 id(self),
210 _STATE_TO_DESCRIPTION_MAP[parent._state])
211
212 else:
213 return '<%s at %#x state=%s>' % (
214 self.__class__.__name__,
215 id(self),
216 _STATE_TO_DESCRIPTION_MAP[self._state])
217
218
219 def testing_nonfuture():
220 fpath = '~/shuffled.txt'
221 df = DataFuture(None, fpath)
222 print(df)
223 print("Result: ", df.filepath)
224 assert df.filepath == os.path.abspath(os.path.expanduser(fpath))
225
226
227 if __name__ == "__main__":
228 # logging.basicConfig(filename='futures.testing.log',level=logging.DEBUG)
229 import sys
230 import random
231 logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
232 logger.debug("Begin Testing")
233
234 with open('shuffled.txt', 'w') as testfile:
235 nums = list(range(0, 10000))
236 random.shuffle(nums)
237 for item in nums:
238 testfile.write("{0}\n".format(item))
239
240 foo = Future()
241 df = DataFuture(foo, './shuffled.txt')
242 dx = DataFuture(foo, '~/shuffled.txt')
243
244 print(foo.done())
245 print(df.done())
246
247 testing_nonfuture()
248
[end of parsl/app/futures.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsl/app/futures.py b/parsl/app/futures.py
--- a/parsl/app/futures.py
+++ b/parsl/app/futures.py
@@ -203,7 +203,7 @@
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[parent._state],
- self.filepath + '_file')
+ self.filepath)
return '<%s at %#x state=%s>' % (
self.__class__.__name__,
id(self),
|
{"golden_diff": "diff --git a/parsl/app/futures.py b/parsl/app/futures.py\n--- a/parsl/app/futures.py\n+++ b/parsl/app/futures.py\n@@ -203,7 +203,7 @@\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state],\n- self.filepath + '_file')\n+ self.filepath)\n return '<%s at %#x state=%s>' % (\n self.__class__.__name__,\n id(self),\n", "issue": "Improve datafuture representation\nHere is an example: `<DataFuture at 0x7f305c0b1da0 state=finished returned /home/annawoodard/pnpfit/results/best-fit-cuB.root_file>`\r\n\r\nI do not think we should append `_file` to the end of the filepath, it makes it confusing what the actual filepath is.\n", "before_files": [{"content": "\"\"\"This module implements DataFutures.\n\nWe have two basic types of futures:\n 1. DataFutures which represent data objects\n 2. AppFutures which represent the futures on App/Leaf tasks.\n\"\"\"\nimport os\nimport logging\nfrom concurrent.futures import Future\n\nfrom parsl.dataflow.futures import AppFuture\nfrom parsl.app.errors import *\nfrom parsl.data_provider.files import File\n\nlogger = logging.getLogger(__name__)\n\n# Possible future states (for internal use by the futures package).\nPENDING = 'PENDING'\nRUNNING = 'RUNNING'\n# The future was cancelled by the user...\nCANCELLED = 'CANCELLED'\n# ...and _Waiter.add_cancelled() was called by a worker.\nCANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'\nFINISHED = 'FINISHED'\n\n_STATE_TO_DESCRIPTION_MAP = {\n PENDING: \"pending\",\n RUNNING: \"running\",\n CANCELLED: \"cancelled\",\n CANCELLED_AND_NOTIFIED: \"cancelled\",\n FINISHED: \"finished\"\n}\n\n\nclass DataFuture(Future):\n \"\"\"A datafuture points at an AppFuture.\n\n We are simply wrapping a AppFuture, and adding the specific case where, if\n the future is resolved i.e file exists, then the DataFuture is assumed to be\n resolved.\n \"\"\"\n\n def parent_callback(self, parent_fu):\n \"\"\"Callback from executor future to update the parent.\n\n Args:\n - parent_fu (Future): Future returned by the executor along with callback\n\n Returns:\n - None\n\n Updates the super() with the result() or exception()\n \"\"\"\n if parent_fu.done() is True:\n e = parent_fu._exception\n if e:\n super().set_exception(e)\n else:\n super().set_result(parent_fu.result())\n return\n\n def __init__(self, fut, file_obj, parent=None, tid=None):\n \"\"\"Construct the DataFuture object.\n\n If the file_obj is a string convert to a File.\n\n Args:\n - fut (AppFuture) : AppFuture that this DataFuture will track\n - file_obj (string/File obj) : Something representing file(s)\n\n Kwargs:\n - parent ()\n - tid (task_id) : Task id that this DataFuture tracks\n \"\"\"\n super().__init__()\n self._tid = tid\n if isinstance(file_obj, str) and not isinstance(file_obj, File):\n self.file_obj = File(file_obj)\n else:\n self.file_obj = file_obj\n self.parent = parent\n self._exception = None\n\n if fut is None:\n logger.debug(\"Setting result to filepath since no future was passed\")\n self.set_result = self.file_obj\n\n else:\n if isinstance(fut, Future):\n self.parent = fut\n self.parent.add_done_callback(self.parent_callback)\n else:\n raise NotFutureError(\"DataFuture can be created only with a FunctionFuture on None\")\n\n logger.debug(\"Creating DataFuture with parent: %s\", parent)\n logger.debug(\"Filepath: %s\", self.filepath)\n\n @property\n def tid(self):\n \"\"\"Returns the task_id of the task that will resolve this DataFuture.\"\"\"\n return self._tid\n\n @property\n def filepath(self):\n \"\"\"Filepath of the File object this datafuture represents.\"\"\"\n return self.file_obj.filepath\n\n @property\n def filename(self):\n \"\"\"Filepath of the File object this datafuture represents.\"\"\"\n return self.filepath\n\n def result(self, timeout=None):\n \"\"\"A blocking call that returns either the result or raises an exception.\n\n Assumptions : A DataFuture always has a parent AppFuture. The AppFuture does callbacks when\n setup.\n\n Kwargs:\n - timeout (int): Timeout in seconds\n\n Returns:\n - If App completed successfully returns the filepath.\n\n Raises:\n - Exception raised by app if failed.\n\n \"\"\"\n if self.parent:\n if self.parent.done():\n # This explicit call to raise exceptions might be redundant.\n # the result() call *should* raise an exception if there's one\n e = self.parent._exception\n if e:\n raise e\n else:\n self.parent.result(timeout=timeout)\n else:\n self.parent.result(timeout=timeout)\n\n return self.file_obj\n\n def cancel(self):\n \"\"\"Cancel the task that this DataFuture is tracking.\n\n Note: This may not work\n \"\"\"\n if self.parent:\n return self.parent.cancel\n else:\n return False\n\n def cancelled(self):\n if self.parent:\n return self.parent.cancelled()\n else:\n return False\n\n def running(self):\n if self.parent:\n return self.parent.running()\n else:\n return False\n\n def done(self):\n if self.parent:\n return self.parent.done()\n else:\n return True\n\n def exception(self, timeout=None):\n if self.parent:\n return self.parent.exception(timeout=timeout)\n else:\n return True\n\n def add_done_callback(self, fn):\n if self.parent:\n return self.parent.add_done_callback(fn)\n else:\n return None\n\n def __repr__(self):\n\n # The DataFuture could be wrapping an AppFuture whose parent is a Future\n # check to find the top level parent\n if isinstance(self.parent, AppFuture):\n parent = self.parent.parent\n else:\n parent = self.parent\n\n if parent:\n with parent._condition:\n if parent._state == FINISHED:\n if parent._exception:\n return '<%s at %#x state=%s raised %s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state],\n parent._exception.__class__.__name__)\n else:\n return '<%s at %#x state=%s returned %s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state],\n self.filepath + '_file')\n return '<%s at %#x state=%s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state])\n\n else:\n return '<%s at %#x state=%s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[self._state])\n\n\ndef testing_nonfuture():\n fpath = '~/shuffled.txt'\n df = DataFuture(None, fpath)\n print(df)\n print(\"Result: \", df.filepath)\n assert df.filepath == os.path.abspath(os.path.expanduser(fpath))\n\n\nif __name__ == \"__main__\":\n # logging.basicConfig(filename='futures.testing.log',level=logging.DEBUG)\n import sys\n import random\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n logger.debug(\"Begin Testing\")\n\n with open('shuffled.txt', 'w') as testfile:\n nums = list(range(0, 10000))\n random.shuffle(nums)\n for item in nums:\n testfile.write(\"{0}\\n\".format(item))\n\n foo = Future()\n df = DataFuture(foo, './shuffled.txt')\n dx = DataFuture(foo, '~/shuffled.txt')\n\n print(foo.done())\n print(df.done())\n\n testing_nonfuture()\n", "path": "parsl/app/futures.py"}]}
| 2,841 | 115 |
gh_patches_debug_14244
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-1188
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change the scrapy short description
The Scrapy short description says:
> Scrapy, a fast high-level screen scraping and web crawling framework.
I think would be better:
> Scrapy, a fast high-level web crawling and screen scraping framework.
Because it highlights first its difference with simple screen scraping tools (i.e. Nokogiri. Mechanize, etc).
Screen scraping can be done even with curl and grep, but I don't think you could do web crawling with such simple tools.
Perhaps this can be an alternative:
> Scrapy, a fast and scalable web crawling and screen scraping framework.
Also the term "web data mining" can be a good fit for Scrapy (along with Scrapely and similar tools) and help to shape its roadmap.
</issue>
<code>
[start of setup.py]
1 from os.path import dirname, join
2 from setuptools import setup, find_packages
3
4
5 with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:
6 version = f.read().decode('ascii').strip()
7
8
9 setup(
10 name='Scrapy',
11 version=version,
12 url='http://scrapy.org',
13 description='A high-level Web Crawling and Screen Scraping framework',
14 long_description=open('README.rst').read(),
15 author='Scrapy developers',
16 maintainer='Pablo Hoffman',
17 maintainer_email='[email protected]',
18 license='BSD',
19 packages=find_packages(exclude=('tests', 'tests.*')),
20 include_package_data=True,
21 zip_safe=False,
22 entry_points={
23 'console_scripts': ['scrapy = scrapy.cmdline:execute']
24 },
25 classifiers=[
26 'Framework :: Scrapy',
27 'Development Status :: 5 - Production/Stable',
28 'Environment :: Console',
29 'Intended Audience :: Developers',
30 'License :: OSI Approved :: BSD License',
31 'Operating System :: OS Independent',
32 'Programming Language :: Python',
33 'Programming Language :: Python :: 2',
34 'Programming Language :: Python :: 2.7',
35 'Topic :: Internet :: WWW/HTTP',
36 'Topic :: Software Development :: Libraries :: Application Frameworks',
37 'Topic :: Software Development :: Libraries :: Python Modules',
38 ],
39 install_requires=[
40 'Twisted>=10.0.0',
41 'w3lib>=1.8.0',
42 'queuelib',
43 'lxml',
44 'pyOpenSSL',
45 'cssselect>=0.9',
46 'six>=1.5.2',
47 ],
48 )
49
[end of setup.py]
[start of scrapy/__init__.py]
1 """
2 Scrapy - a web crawling and screen scraping framework written for Python
3 """
4
5 __all__ = ['__version__', 'version_info', 'optional_features', 'twisted_version',
6 'Spider', 'Request', 'FormRequest', 'Selector', 'Item', 'Field']
7
8 # Scrapy version
9 import pkgutil
10 __version__ = pkgutil.get_data(__package__, 'VERSION').decode('ascii').strip()
11 version_info = tuple(int(v) if v.isdigit() else v
12 for v in __version__.split('.'))
13 del pkgutil
14
15 # Check minimum required Python version
16 import sys
17 if sys.version_info < (2, 7):
18 print("Scrapy %s requires Python 2.7" % __version__)
19 sys.exit(1)
20
21 # Ignore noisy twisted deprecation warnings
22 import warnings
23 warnings.filterwarnings('ignore', category=DeprecationWarning, module='twisted')
24 del warnings
25
26 # Apply monkey patches to fix issues in external libraries
27 from . import _monkeypatches
28 del _monkeypatches
29
30 # WARNING: optional_features set is deprecated and will be removed soon. Do not use.
31 optional_features = set()
32 # TODO: backwards compatibility, remove for Scrapy 0.20
33 optional_features.add('ssl')
34 try:
35 import boto
36 del boto
37 except ImportError:
38 pass
39 else:
40 optional_features.add('boto')
41
42 from twisted import version as _txv
43 twisted_version = (_txv.major, _txv.minor, _txv.micro)
44 if twisted_version >= (11, 1, 0):
45 optional_features.add('http11')
46
47 # Declare top-level shortcuts
48 from scrapy.spider import Spider
49 from scrapy.http import Request, FormRequest
50 from scrapy.selector import Selector
51 from scrapy.item import Item, Field
52
53 del sys
54
[end of scrapy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/__init__.py b/scrapy/__init__.py
--- a/scrapy/__init__.py
+++ b/scrapy/__init__.py
@@ -1,5 +1,5 @@
"""
-Scrapy - a web crawling and screen scraping framework written for Python
+Scrapy - a web crawling and web scraping framework written for Python
"""
__all__ = ['__version__', 'version_info', 'optional_features', 'twisted_version',
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@
name='Scrapy',
version=version,
url='http://scrapy.org',
- description='A high-level Web Crawling and Screen Scraping framework',
+ description='A high-level Web Crawling and Web Scraping framework',
long_description=open('README.rst').read(),
author='Scrapy developers',
maintainer='Pablo Hoffman',
|
{"golden_diff": "diff --git a/scrapy/__init__.py b/scrapy/__init__.py\n--- a/scrapy/__init__.py\n+++ b/scrapy/__init__.py\n@@ -1,5 +1,5 @@\n \"\"\"\n-Scrapy - a web crawling and screen scraping framework written for Python\n+Scrapy - a web crawling and web scraping framework written for Python\n \"\"\"\n \n __all__ = ['__version__', 'version_info', 'optional_features', 'twisted_version',\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,7 +10,7 @@\n name='Scrapy',\n version=version,\n url='http://scrapy.org',\n- description='A high-level Web Crawling and Screen Scraping framework',\n+ description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n", "issue": "Change the scrapy short description\nThe Scrapy short description says:\n\n> Scrapy, a fast high-level screen scraping and web crawling framework.\n\nI think would be better:\n\n> Scrapy, a fast high-level web crawling and screen scraping framework.\n\nBecause it highlights first its difference with simple screen scraping tools (i.e. Nokogiri. Mechanize, etc).\n\nScreen scraping can be done even with curl and grep, but I don't think you could do web crawling with such simple tools.\n\nPerhaps this can be an alternative:\n\n> Scrapy, a fast and scalable web crawling and screen scraping framework.\n\nAlso the term \"web data mining\" can be a good fit for Scrapy (along with Scrapely and similar tools) and help to shape its roadmap.\n\n", "before_files": [{"content": "from os.path import dirname, join\nfrom setuptools import setup, find_packages\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='http://scrapy.org',\n description='A high-level Web Crawling and Screen Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=[\n 'Twisted>=10.0.0',\n 'w3lib>=1.8.0',\n 'queuelib',\n 'lxml',\n 'pyOpenSSL',\n 'cssselect>=0.9',\n 'six>=1.5.2',\n ],\n)\n", "path": "setup.py"}, {"content": "\"\"\"\nScrapy - a web crawling and screen scraping framework written for Python\n\"\"\"\n\n__all__ = ['__version__', 'version_info', 'optional_features', 'twisted_version',\n 'Spider', 'Request', 'FormRequest', 'Selector', 'Item', 'Field']\n\n# Scrapy version\nimport pkgutil\n__version__ = pkgutil.get_data(__package__, 'VERSION').decode('ascii').strip()\nversion_info = tuple(int(v) if v.isdigit() else v\n for v in __version__.split('.'))\ndel pkgutil\n\n# Check minimum required Python version\nimport sys\nif sys.version_info < (2, 7):\n print(\"Scrapy %s requires Python 2.7\" % __version__)\n sys.exit(1)\n\n# Ignore noisy twisted deprecation warnings\nimport warnings\nwarnings.filterwarnings('ignore', category=DeprecationWarning, module='twisted')\ndel warnings\n\n# Apply monkey patches to fix issues in external libraries\nfrom . import _monkeypatches\ndel _monkeypatches\n\n# WARNING: optional_features set is deprecated and will be removed soon. Do not use.\noptional_features = set()\n# TODO: backwards compatibility, remove for Scrapy 0.20\noptional_features.add('ssl')\ntry:\n import boto\n del boto\nexcept ImportError:\n pass\nelse:\n optional_features.add('boto')\n\nfrom twisted import version as _txv\ntwisted_version = (_txv.major, _txv.minor, _txv.micro)\nif twisted_version >= (11, 1, 0):\n optional_features.add('http11')\n\n# Declare top-level shortcuts\nfrom scrapy.spider import Spider\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.selector import Selector\nfrom scrapy.item import Item, Field\n\ndel sys\n", "path": "scrapy/__init__.py"}]}
| 1,637 | 210 |
gh_patches_debug_59247
|
rasdani/github-patches
|
git_diff
|
projectmesa__mesa-1860
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mesa.visualization.chartmodule doesn't work
As shown in the picture, I run the boltzmann_wealth_model in the mesa example, but the line chart is not displayed normally. Can anyone help me?
<img width="788" alt="屏幕截图 2023-11-04 183542" src="https://github.com/projectmesa/mesa/assets/75169342/89ba1b20-4011-471b-909e-5fea97da6b73">
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import re
3 from codecs import open
4
5 from setuptools import find_packages, setup
6
7 requires = [
8 "click",
9 "cookiecutter",
10 "matplotlib",
11 "mesa_viz_tornado",
12 "networkx",
13 "numpy",
14 "pandas",
15 "solara",
16 "tqdm",
17 ]
18
19 extras_require = {
20 "dev": [
21 "black",
22 "ruff~=0.1.1", # Update periodically
23 "coverage",
24 "pytest >= 4.6",
25 "pytest-cov",
26 "sphinx",
27 ],
28 # Explicitly install ipykernel for Python 3.8.
29 # See https://stackoverflow.com/questions/28831854/how-do-i-add-python3-kernel-to-jupyter-ipython
30 # Could be removed in the future
31 "docs": [
32 "sphinx",
33 "ipython",
34 "ipykernel",
35 "pydata_sphinx_theme",
36 "seaborn",
37 "myst-nb",
38 ],
39 }
40
41 version = ""
42 with open("mesa/__init__.py") as fd:
43 version = re.search(
44 r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE
45 ).group(1)
46
47 with open("README.rst", "rb", encoding="utf-8") as f:
48 readme = f.read()
49
50
51 setup(
52 name="Mesa",
53 version=version,
54 description="Agent-based modeling (ABM) in Python 3+",
55 long_description=readme,
56 author="Project Mesa Team",
57 author_email="[email protected]",
58 url="https://github.com/projectmesa/mesa",
59 packages=find_packages(),
60 package_data={
61 "cookiecutter-mesa": ["cookiecutter-mesa/*"],
62 },
63 include_package_data=True,
64 install_requires=requires,
65 extras_require=extras_require,
66 keywords="agent based modeling model ABM simulation multi-agent",
67 license="Apache 2.0",
68 zip_safe=False,
69 classifiers=[
70 "Topic :: Scientific/Engineering",
71 "Topic :: Scientific/Engineering :: Artificial Life",
72 "Topic :: Scientific/Engineering :: Artificial Intelligence",
73 "Intended Audience :: Science/Research",
74 "Programming Language :: Python :: 3 :: Only",
75 "Programming Language :: Python :: 3.8",
76 "Programming Language :: Python :: 3.9",
77 "Programming Language :: Python :: 3.10",
78 "License :: OSI Approved :: Apache Software License",
79 "Operating System :: OS Independent",
80 "Development Status :: 3 - Alpha",
81 "Natural Language :: English",
82 ],
83 entry_points="""
84 [console_scripts]
85 mesa=mesa.main:cli
86 """,
87 python_requires=">=3.8",
88 )
89
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -8,7 +8,7 @@
"click",
"cookiecutter",
"matplotlib",
- "mesa_viz_tornado",
+ "mesa_viz_tornado~=0.1.0,>=0.1.2",
"networkx",
"numpy",
"pandas",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -8,7 +8,7 @@\n \"click\",\n \"cookiecutter\",\n \"matplotlib\",\n- \"mesa_viz_tornado\",\n+ \"mesa_viz_tornado~=0.1.0,>=0.1.2\",\n \"networkx\",\n \"numpy\",\n \"pandas\",\n", "issue": "mesa.visualization.chartmodule doesn't work\nAs shown in the picture, I run the boltzmann_wealth_model in the mesa example, but the line chart is not displayed normally. Can anyone help me?\r\n<img width=\"788\" alt=\"\u5c4f\u5e55\u622a\u56fe 2023-11-04 183542\" src=\"https://github.com/projectmesa/mesa/assets/75169342/89ba1b20-4011-471b-909e-5fea97da6b73\">\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport re\nfrom codecs import open\n\nfrom setuptools import find_packages, setup\n\nrequires = [\n \"click\",\n \"cookiecutter\",\n \"matplotlib\",\n \"mesa_viz_tornado\",\n \"networkx\",\n \"numpy\",\n \"pandas\",\n \"solara\",\n \"tqdm\",\n]\n\nextras_require = {\n \"dev\": [\n \"black\",\n \"ruff~=0.1.1\", # Update periodically\n \"coverage\",\n \"pytest >= 4.6\",\n \"pytest-cov\",\n \"sphinx\",\n ],\n # Explicitly install ipykernel for Python 3.8.\n # See https://stackoverflow.com/questions/28831854/how-do-i-add-python3-kernel-to-jupyter-ipython\n # Could be removed in the future\n \"docs\": [\n \"sphinx\",\n \"ipython\",\n \"ipykernel\",\n \"pydata_sphinx_theme\",\n \"seaborn\",\n \"myst-nb\",\n ],\n}\n\nversion = \"\"\nwith open(\"mesa/__init__.py\") as fd:\n version = re.search(\n r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', fd.read(), re.MULTILINE\n ).group(1)\n\nwith open(\"README.rst\", \"rb\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n\nsetup(\n name=\"Mesa\",\n version=version,\n description=\"Agent-based modeling (ABM) in Python 3+\",\n long_description=readme,\n author=\"Project Mesa Team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/projectmesa/mesa\",\n packages=find_packages(),\n package_data={\n \"cookiecutter-mesa\": [\"cookiecutter-mesa/*\"],\n },\n include_package_data=True,\n install_requires=requires,\n extras_require=extras_require,\n keywords=\"agent based modeling model ABM simulation multi-agent\",\n license=\"Apache 2.0\",\n zip_safe=False,\n classifiers=[\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Life\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 3 - Alpha\",\n \"Natural Language :: English\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n mesa=mesa.main:cli\n \"\"\",\n python_requires=\">=3.8\",\n)\n", "path": "setup.py"}]}
| 1,450 | 91 |
gh_patches_debug_11847
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-2030
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SubmissionSchema needs more nested fields
I'm having trouble accessing a user's name from a SubmissionSchema dump. This is probably because we need more Nested Fields on the Schema in addition to just the nested challenge schema.
</issue>
<code>
[start of CTFd/schemas/submissions.py]
1 from marshmallow import fields
2
3 from CTFd.models import Submissions, ma
4 from CTFd.schemas.challenges import ChallengeSchema
5 from CTFd.utils import string_types
6
7
8 class SubmissionSchema(ma.ModelSchema):
9 challenge = fields.Nested(ChallengeSchema, only=["name", "category", "value"])
10
11 class Meta:
12 model = Submissions
13 include_fk = True
14 dump_only = ("id",)
15
16 views = {
17 "admin": [
18 "provided",
19 "ip",
20 "challenge_id",
21 "challenge",
22 "user",
23 "team",
24 "date",
25 "type",
26 "id",
27 ],
28 "user": ["challenge_id", "challenge", "user", "team", "date", "type", "id"],
29 }
30
31 def __init__(self, view=None, *args, **kwargs):
32 if view:
33 if isinstance(view, string_types):
34 kwargs["only"] = self.views[view]
35 elif isinstance(view, list):
36 kwargs["only"] = view
37
38 super(SubmissionSchema, self).__init__(*args, **kwargs)
39
[end of CTFd/schemas/submissions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/CTFd/schemas/submissions.py b/CTFd/schemas/submissions.py
--- a/CTFd/schemas/submissions.py
+++ b/CTFd/schemas/submissions.py
@@ -2,11 +2,15 @@
from CTFd.models import Submissions, ma
from CTFd.schemas.challenges import ChallengeSchema
+from CTFd.schemas.teams import TeamSchema
+from CTFd.schemas.users import UserSchema
from CTFd.utils import string_types
class SubmissionSchema(ma.ModelSchema):
- challenge = fields.Nested(ChallengeSchema, only=["name", "category", "value"])
+ challenge = fields.Nested(ChallengeSchema, only=["id", "name", "category", "value"])
+ user = fields.Nested(UserSchema, only=["id", "name"])
+ team = fields.Nested(TeamSchema, only=["id", "name"])
class Meta:
model = Submissions
|
{"golden_diff": "diff --git a/CTFd/schemas/submissions.py b/CTFd/schemas/submissions.py\n--- a/CTFd/schemas/submissions.py\n+++ b/CTFd/schemas/submissions.py\n@@ -2,11 +2,15 @@\n \n from CTFd.models import Submissions, ma\n from CTFd.schemas.challenges import ChallengeSchema\n+from CTFd.schemas.teams import TeamSchema\n+from CTFd.schemas.users import UserSchema\n from CTFd.utils import string_types\n \n \n class SubmissionSchema(ma.ModelSchema):\n- challenge = fields.Nested(ChallengeSchema, only=[\"name\", \"category\", \"value\"])\n+ challenge = fields.Nested(ChallengeSchema, only=[\"id\", \"name\", \"category\", \"value\"])\n+ user = fields.Nested(UserSchema, only=[\"id\", \"name\"])\n+ team = fields.Nested(TeamSchema, only=[\"id\", \"name\"])\n \n class Meta:\n model = Submissions\n", "issue": "SubmissionSchema needs more nested fields\nI'm having trouble accessing a user's name from a SubmissionSchema dump. This is probably because we need more Nested Fields on the Schema in addition to just the nested challenge schema. \n", "before_files": [{"content": "from marshmallow import fields\n\nfrom CTFd.models import Submissions, ma\nfrom CTFd.schemas.challenges import ChallengeSchema\nfrom CTFd.utils import string_types\n\n\nclass SubmissionSchema(ma.ModelSchema):\n challenge = fields.Nested(ChallengeSchema, only=[\"name\", \"category\", \"value\"])\n\n class Meta:\n model = Submissions\n include_fk = True\n dump_only = (\"id\",)\n\n views = {\n \"admin\": [\n \"provided\",\n \"ip\",\n \"challenge_id\",\n \"challenge\",\n \"user\",\n \"team\",\n \"date\",\n \"type\",\n \"id\",\n ],\n \"user\": [\"challenge_id\", \"challenge\", \"user\", \"team\", \"date\", \"type\", \"id\"],\n }\n\n def __init__(self, view=None, *args, **kwargs):\n if view:\n if isinstance(view, string_types):\n kwargs[\"only\"] = self.views[view]\n elif isinstance(view, list):\n kwargs[\"only\"] = view\n\n super(SubmissionSchema, self).__init__(*args, **kwargs)\n", "path": "CTFd/schemas/submissions.py"}]}
| 896 | 214 |
gh_patches_debug_24458
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-4108
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support optimization for rsqrt
CUDA provides `rsqrt` function, which is faster than doing `1.0 / sqrt(x)`.
It is better to provide custom kernel in Chainer or directly support in CuPy.
(Note that NumPy does not provide `rsqrt`)
</issue>
<code>
[start of chainer/functions/math/sqrt.py]
1 from chainer.backends import cuda
2 from chainer import function_node
3 from chainer import utils
4 from chainer.utils import type_check
5
6
7 class Sqrt(function_node.FunctionNode):
8
9 @property
10 def label(self):
11 return 'sqrt'
12
13 def check_type_forward(self, in_types):
14 type_check.expect(
15 in_types.size() == 1,
16 in_types[0].dtype.kind == 'f',
17 )
18
19 def forward(self, x):
20 self.retain_outputs((0,))
21 xp = cuda.get_array_module(*x)
22 return utils.force_array(xp.sqrt(x[0], dtype=x[0].dtype)),
23
24 def backward(self, indexes, grad_outputs):
25 gx = self.get_retained_outputs()[0]
26 gy = grad_outputs[0]
27 return gy / (gx * 2.0),
28
29
30 def sqrt(x):
31 """Elementwise square root function.
32
33 .. math::
34 y_i = \\sqrt x_i.
35
36 If the value of :math:`x_i` is negative, it returns ``Nan`` for :math:`y_i`
37 respect to underlying numpy and cupy specification.
38
39 Args:
40 x (~chainer.Variable): Input variable.
41
42 Returns:
43 ~chainer.Variable: Output variable.
44 """
45 return Sqrt().apply((x,))[0]
46
47
48 def rsqrt(x):
49 """Computes elementwise reciprocal of square root of input :math:`x_i`.
50
51 .. math::
52 y_i = {1 \\over \\sqrt x_i}.
53
54 Args:
55 x (~chainer.Variable): Input variable.
56
57 Returns:
58 ~chainer.Variable: Output variable.
59
60 .. seealso:: :func:`~chainer.functions.sqrt`
61 """
62 return 1.0 / sqrt(x)
63
[end of chainer/functions/math/sqrt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/chainer/functions/math/sqrt.py b/chainer/functions/math/sqrt.py
--- a/chainer/functions/math/sqrt.py
+++ b/chainer/functions/math/sqrt.py
@@ -1,3 +1,5 @@
+import numpy
+
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
@@ -27,6 +29,36 @@
return gy / (gx * 2.0),
+class Rsqrt(function_node.FunctionNode):
+
+ @property
+ def label(self):
+ return 'rsqrt'
+
+ def check_type_forward(self, in_types):
+ type_check.expect(
+ in_types.size() == 1,
+ in_types[0].dtype.kind == 'f',
+ )
+
+ def forward(self, inputs):
+ self.retain_inputs((0,))
+ x, = inputs
+ xp = cuda.get_array_module(x)
+ dtype = x.dtype
+ if xp is numpy:
+ out = xp.reciprocal(xp.sqrt(x, dtype=dtype), dtype=dtype)
+ else:
+ # CuPy provides `rsqrt` which is faster than `1.0 / sqrt(x)`.
+ out = cuda.cupyx.rsqrt(x, dtype=dtype)
+ return utils.force_array(out),
+
+ def backward(self, indexes, grad_outputs):
+ x, = self.get_retained_inputs()
+ gy, = grad_outputs
+ return gy * (x ** -1.5) * -0.5,
+
+
def sqrt(x):
"""Elementwise square root function.
@@ -59,4 +91,4 @@
.. seealso:: :func:`~chainer.functions.sqrt`
"""
- return 1.0 / sqrt(x)
+ return Rsqrt().apply((x,))[0]
|
{"golden_diff": "diff --git a/chainer/functions/math/sqrt.py b/chainer/functions/math/sqrt.py\n--- a/chainer/functions/math/sqrt.py\n+++ b/chainer/functions/math/sqrt.py\n@@ -1,3 +1,5 @@\n+import numpy\n+\n from chainer.backends import cuda\n from chainer import function_node\n from chainer import utils\n@@ -27,6 +29,36 @@\n return gy / (gx * 2.0),\n \n \n+class Rsqrt(function_node.FunctionNode):\n+\n+ @property\n+ def label(self):\n+ return 'rsqrt'\n+\n+ def check_type_forward(self, in_types):\n+ type_check.expect(\n+ in_types.size() == 1,\n+ in_types[0].dtype.kind == 'f',\n+ )\n+\n+ def forward(self, inputs):\n+ self.retain_inputs((0,))\n+ x, = inputs\n+ xp = cuda.get_array_module(x)\n+ dtype = x.dtype\n+ if xp is numpy:\n+ out = xp.reciprocal(xp.sqrt(x, dtype=dtype), dtype=dtype)\n+ else:\n+ # CuPy provides `rsqrt` which is faster than `1.0 / sqrt(x)`.\n+ out = cuda.cupyx.rsqrt(x, dtype=dtype)\n+ return utils.force_array(out),\n+\n+ def backward(self, indexes, grad_outputs):\n+ x, = self.get_retained_inputs()\n+ gy, = grad_outputs\n+ return gy * (x ** -1.5) * -0.5,\n+\n+\n def sqrt(x):\n \"\"\"Elementwise square root function.\n \n@@ -59,4 +91,4 @@\n \n .. seealso:: :func:`~chainer.functions.sqrt`\n \"\"\"\n- return 1.0 / sqrt(x)\n+ return Rsqrt().apply((x,))[0]\n", "issue": "Support optimization for rsqrt\nCUDA provides `rsqrt` function, which is faster than doing `1.0 / sqrt(x)`.\r\nIt is better to provide custom kernel in Chainer or directly support in CuPy.\r\n(Note that NumPy does not provide `rsqrt`)\n", "before_files": [{"content": "from chainer.backends import cuda\nfrom chainer import function_node\nfrom chainer import utils\nfrom chainer.utils import type_check\n\n\nclass Sqrt(function_node.FunctionNode):\n\n @property\n def label(self):\n return 'sqrt'\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1,\n in_types[0].dtype.kind == 'f',\n )\n\n def forward(self, x):\n self.retain_outputs((0,))\n xp = cuda.get_array_module(*x)\n return utils.force_array(xp.sqrt(x[0], dtype=x[0].dtype)),\n\n def backward(self, indexes, grad_outputs):\n gx = self.get_retained_outputs()[0]\n gy = grad_outputs[0]\n return gy / (gx * 2.0),\n\n\ndef sqrt(x):\n \"\"\"Elementwise square root function.\n\n .. math::\n y_i = \\\\sqrt x_i.\n\n If the value of :math:`x_i` is negative, it returns ``Nan`` for :math:`y_i`\n respect to underlying numpy and cupy specification.\n\n Args:\n x (~chainer.Variable): Input variable.\n\n Returns:\n ~chainer.Variable: Output variable.\n \"\"\"\n return Sqrt().apply((x,))[0]\n\n\ndef rsqrt(x):\n \"\"\"Computes elementwise reciprocal of square root of input :math:`x_i`.\n\n .. math::\n y_i = {1 \\\\over \\\\sqrt x_i}.\n\n Args:\n x (~chainer.Variable): Input variable.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n .. seealso:: :func:`~chainer.functions.sqrt`\n \"\"\"\n return 1.0 / sqrt(x)\n", "path": "chainer/functions/math/sqrt.py"}]}
| 1,095 | 411 |
gh_patches_debug_39388
|
rasdani/github-patches
|
git_diff
|
aimhubio__aim-1221
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add matplotlib integration
We need let users to track matplotlib figures.
Need to investigate matplotlib library and understand figure's dump/load procedure and integrate it into our system.
Motivation: Make users more engaged and feel comfortable using aim for their projects
</issue>
<code>
[start of aim/sdk/objects/image.py]
1 import logging
2 import os.path
3
4 from PIL import Image as PILImage
5
6 from io import BytesIO
7 from itertools import chain, repeat
8 from typing import List
9 import numpy as np
10
11 from aim.sdk.num_utils import inst_has_typename
12 from aim.storage.object import CustomObject
13 from aim.storage.types import BLOB
14
15 logger = logging.getLogger(__name__)
16
17
18 @CustomObject.alias('aim.image')
19 class Image(CustomObject):
20 """Image object used to store image objects in Aim repository...
21
22 Args:
23 image (:obj:): pillow `Image` object or `torch.Tensor` or `numpy.array` used to construct `aim.Image`.
24 caption (:obj:`str`, optional): Optional image caption. '' by default.
25 format (:obj: `str`, optional): Parameter for PIL's .save() method. 'png' by default.
26 quality (:obj: `int`, optional): Parameter for PIL's .save() method. 85 by default.
27 optimize (:obj: `bool`, optional): Parameter for PIL's .save() method. False by default.
28
29 For more information on the format, quality and optimize parameters, refer to PIL documentation.
30
31 Example of params to reduce quality of the image:
32 format='jpeg',
33 optimize=True,
34 quality=85
35 """
36
37 FLAG_WARN_RGBA_RGB = False
38 AIM_NAME = 'aim.image'
39
40 def __init__(self, image, caption: str = '', format='png', quality=90, optimize=False):
41 super().__init__()
42
43 # normalize jpg
44 if format.lower() == 'jpg':
45 # PIL doesn't support 'jpg' key
46 format = 'jpeg'
47
48 params = {
49 'format': format.lower(),
50 'quality': quality,
51 'optimize': optimize
52 }
53
54 if isinstance(image, str):
55 self._from_file_path(image, params)
56 elif inst_has_typename(image, ['PIL', 'Image']):
57 self._from_pil_image(image, params)
58 elif inst_has_typename(image, ['torch', 'Tensor']):
59 self._from_torch_tensor(image, params)
60 elif inst_has_typename(image, ['tensorflow', 'Tensor']):
61 self._from_tf_tensor(image, params)
62 elif inst_has_typename(image, ['numpy', 'array']):
63 self._from_numpy_array(image, params)
64 else:
65 raise TypeError(f'Cannot convert to aim.Image. Unsupported type {type(image)}.')
66 self.caption = caption
67
68 @property
69 def caption(self) -> str:
70 """Image caption, set by user.
71
72 :getter: Returns image caption.
73 :setter: Sets image caption.
74 :type: string
75 """
76 return self.storage['caption']
77
78 @caption.setter
79 def caption(self, value: str):
80 self.storage['caption'] = value
81
82 @property
83 def format(self) -> str:
84 """Stored image format.
85
86 :getter: Returns image format.
87 :type: string
88 """
89 return self.storage['format']
90
91 @property
92 def width(self):
93 """Stored image width.
94
95 :getter: Returns image width.
96 :type: string
97 """
98 return self.storage['width']
99
100 @property
101 def height(self):
102 """Stored image height.
103
104 :getter: Returns image height.
105 :type: string
106 """
107 return self.storage['height']
108
109 @property
110 def size(self):
111 """Stored image size.
112
113 :getter: Returns image (width, height) pair.
114 :type: string
115 """
116 return self.storage['width'], self.storage['height']
117
118 def to_pil_image(self) -> PILImage.Image:
119 """Method to convert aim.Image to pillow Image"""
120 pil_img = PILImage.open(BytesIO(bytes(self.storage['data'])))
121 assert pil_img.size == self.size
122 return pil_img
123
124 def json(self):
125 """Dump image metadata to a dict"""
126 return {
127 'caption': self.caption,
128 'format': self.format,
129 'width': self.width,
130 'height': self.height,
131 }
132
133 def _from_pil_image(self, pil_image: PILImage.Image, params):
134 assert isinstance(pil_image, PILImage.Image)
135 img_container = BytesIO()
136
137 try:
138 pil_image.save(img_container, **params)
139 except OSError as exc:
140 # The best way to approach this problem is to prepare PIL Image object before hitting this method.
141 # This block only handles case where RGBA/P/LA/PA mode is mandated to save in RGB
142 # PIL won't do that automatically, so we have to convert image to RGB before saving it.
143 # In addition - make transparency "white" before conversion otherwise it will be black.
144 if pil_image.mode not in ('RGBA', 'LA', 'PA', 'P'):
145 raise
146 elif not Image.FLAG_WARN_RGBA_RGB:
147 logger.warning(f'Failed to save the image due to the following error: {exc}')
148 logger.warning(f'Attempting to convert mode "{pil_image.mode}" to "RGB"')
149 Image.FLAG_WARN_RGBA_RGB = True
150
151 alpha = pil_image.convert('RGBA').split()[-1] # Get only alpha
152 background = PILImage.new('RGBA', pil_image.size, (255, 255, 255, 255))
153 background.paste(pil_image, mask=alpha)
154 pil_image = background.convert('RGB')
155
156 # Retry
157 pil_image.save(img_container, **params)
158
159 self.storage['data'] = BLOB(data=img_container.getvalue())
160 self.storage['source'] = 'PIL.Image'
161 self.storage['mode'] = pil_image.mode
162 self.storage['format'] = params['format']
163 self.storage['width'], self.storage['height'] = pil_image.size
164
165 def _from_file_path(self, file_path, params):
166 if not os.path.isfile(file_path):
167 raise ValueError('Invalid image file path.')
168
169 return self._from_pil_image(PILImage.open(file_path), params)
170
171 def _from_numpy_array(self, array: np.ndarray, params):
172 if array.ndim not in {2, 3}:
173 raise ValueError('Cannot convert to aim.Image. array must have 2/3-D shape.')
174
175 if array.ndim == 3 and array.shape[2] == 1: # greyscale
176 pil_image = PILImage.fromarray(array[:, :, 0])
177 else:
178 pil_image = PILImage.fromarray(array)
179 self._from_pil_image(pil_image, params)
180
181 def _from_torch_tensor(self, tensor, params):
182 try:
183 import torch
184 assert isinstance(tensor, torch.Tensor)
185 except (ImportError, AssertionError):
186 raise ValueError('Cannot convert from torch.Tensor')
187
188 if tensor.ndim not in {2, 3}:
189 raise ValueError('Cannot convert to aim.Image. Tensor must have 2/3-D shape.')
190 if tensor.is_floating_point():
191 tensor = tensor.mul(255).byte()
192 array: np.ndarray = tensor.cpu().numpy()
193
194 if array.ndim == 3:
195 channels = array.shape[0]
196 if channels == 1:
197 pil_image = PILImage.fromarray(array[0, :, :])
198 else:
199 # reverse order of channels: c,h,w => h,w,c
200 pil_image = PILImage.fromarray(np.transpose(array, (1, 2, 0)))
201 else:
202 pil_image = PILImage.fromarray(array)
203 self._from_pil_image(pil_image, params)
204
205 def _from_tf_tensor(self, tensor, params):
206 try:
207 import tensorflow as tf
208 assert isinstance(tensor, tf.Tensor)
209 except (ImportError, AssertionError):
210 raise ValueError('Cannot convert from tf.Tensor')
211
212 if tensor.ndim not in {2, 3}:
213 raise ValueError('Cannot convert to aim.Image. Tensor must have 2/3-D shape.')
214
215 if tensor.dtype.is_floating:
216 tensor = tf.cast(tf.math.scalar_mul(255.0, tensor), tf.uint8)
217 array: np.ndarray = tensor.numpy()
218
219 if array.ndim == 3 and array.shape[2] == 1: # greyscale
220 pil_image = PILImage.fromarray(array[:, :, 0])
221 else:
222 pil_image = PILImage.fromarray(array)
223 self._from_pil_image(pil_image, params)
224
225 def __eq__(self, other):
226 if not isinstance(other, Image):
227 return False
228
229 props = ['mode', 'format', 'width', 'height']
230 for p in props:
231 if self.storage[p] != other.storage[p]:
232 return False
233
234 return (self.storage['data'].data == other.storage['data'].data)
235
236
237 def convert_to_aim_image_list(images, labels=None) -> List[Image]:
238 aim_images = []
239 if labels is not None:
240 labels_it = chain(labels, repeat(''))
241 else:
242 labels_it = repeat('')
243 for img, lbl in zip(images, labels_it):
244 aim_img = Image(img, lbl if isinstance(lbl, str) else str(lbl.item()))
245 aim_images.append(aim_img)
246 return aim_images
247
[end of aim/sdk/objects/image.py]
[start of aim/sdk/objects/figure.py]
1 from aim.sdk.num_utils import inst_has_typename
2 from aim.storage.object import CustomObject
3 from aim.storage.types import BLOB
4
5
6 @CustomObject.alias('aim.figure')
7 class Figure(CustomObject):
8 """Figure object used to store plotly figures in Aim repository.
9
10 Args:
11 obj (:obj:): plotly figure object.
12 """
13
14 AIM_NAME = 'aim.figure'
15
16 def __init__(self, obj):
17 super().__init__()
18
19 if inst_has_typename(obj, ['Figure', 'BaseFigure']):
20 self._prepare(obj)
21 else:
22 raise TypeError('Object is not a Plotly Figure instance')
23
24 def _prepare(self, obj):
25 try:
26 from plotly.version import __version__ as plotly_version
27 except ModuleNotFoundError:
28 plotly_version = 'unknown'
29
30 assert hasattr(obj, 'to_json')
31
32 self.storage['source'] = 'plotly'
33 self.storage['version'] = plotly_version
34 self.storage['format'] = 'raw_json'
35 self.storage['data'] = BLOB(data=obj.to_json())
36
37 def json(self):
38 blob_data = self.storage['data']
39 return blob_data.data
40
41 def to_plotly_figure(self):
42 try:
43 from plotly.io import from_json
44 except ModuleNotFoundError:
45 raise ModuleNotFoundError('Could not find plotly in the installed modules.')
46
47 return from_json(self.json())
48
[end of aim/sdk/objects/figure.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/aim/sdk/objects/figure.py b/aim/sdk/objects/figure.py
--- a/aim/sdk/objects/figure.py
+++ b/aim/sdk/objects/figure.py
@@ -5,10 +5,12 @@
@CustomObject.alias('aim.figure')
class Figure(CustomObject):
- """Figure object used to store plotly figures in Aim repository.
+ """
+ Figure object can be used for storing Plotly or Matplotlib figures into Aim repository.
+ Core functionality is based on Plotly.
Args:
- obj (:obj:): plotly figure object.
+ obj (:obj:): plotly or matplotlib figure object.
"""
AIM_NAME = 'aim.figure'
@@ -16,7 +18,9 @@
def __init__(self, obj):
super().__init__()
- if inst_has_typename(obj, ['Figure', 'BaseFigure']):
+ if inst_has_typename(obj, ['matplotlib', 'Figure']):
+ self._from_matplotlib_figure(obj)
+ elif inst_has_typename(obj, ['plotly', 'Figure', 'BaseFigure']):
self._prepare(obj)
else:
raise TypeError('Object is not a Plotly Figure instance')
@@ -34,6 +38,19 @@
self.storage['format'] = 'raw_json'
self.storage['data'] = BLOB(data=obj.to_json())
+ def _from_matplotlib_figure(self, obj):
+ try:
+ from plotly.tools import mpl_to_plotly
+ except ModuleNotFoundError:
+ raise ModuleNotFoundError('Plotly is required to track matplotlib figure.')
+
+ try:
+ plotly_obj = mpl_to_plotly(obj)
+ except ValueError as err:
+ raise ValueError(f'Failed to convert matplotlib figure to plotly figure: {err}')
+
+ return self._prepare(plotly_obj)
+
def json(self):
blob_data = self.storage['data']
return blob_data.data
diff --git a/aim/sdk/objects/image.py b/aim/sdk/objects/image.py
--- a/aim/sdk/objects/image.py
+++ b/aim/sdk/objects/image.py
@@ -61,6 +61,8 @@
self._from_tf_tensor(image, params)
elif inst_has_typename(image, ['numpy', 'array']):
self._from_numpy_array(image, params)
+ elif inst_has_typename(image, ['Figure', 'matplotlib', 'figure']):
+ self._from_matplotlib_figure(image, params)
else:
raise TypeError(f'Cannot convert to aim.Image. Unsupported type {type(image)}.')
self.caption = caption
@@ -222,6 +224,18 @@
pil_image = PILImage.fromarray(array)
self._from_pil_image(pil_image, params)
+ def _from_matplotlib_figure(self, figure, params):
+ try:
+ assert hasattr(figure, 'savefig')
+ except AssertionError:
+ raise ValueError('Cannot convert from matplotlib figure.')
+
+ buffer = BytesIO()
+ figure.savefig(buffer)
+ buffer.seek(0)
+
+ return self._from_pil_image(PILImage.open(buffer), params)
+
def __eq__(self, other):
if not isinstance(other, Image):
return False
|
{"golden_diff": "diff --git a/aim/sdk/objects/figure.py b/aim/sdk/objects/figure.py\n--- a/aim/sdk/objects/figure.py\n+++ b/aim/sdk/objects/figure.py\n@@ -5,10 +5,12 @@\n \n @CustomObject.alias('aim.figure')\n class Figure(CustomObject):\n- \"\"\"Figure object used to store plotly figures in Aim repository.\n+ \"\"\"\n+ Figure object can be used for storing Plotly or Matplotlib figures into Aim repository.\n+ Core functionality is based on Plotly.\n \n Args:\n- obj (:obj:): plotly figure object.\n+ obj (:obj:): plotly or matplotlib figure object.\n \"\"\"\n \n AIM_NAME = 'aim.figure'\n@@ -16,7 +18,9 @@\n def __init__(self, obj):\n super().__init__()\n \n- if inst_has_typename(obj, ['Figure', 'BaseFigure']):\n+ if inst_has_typename(obj, ['matplotlib', 'Figure']):\n+ self._from_matplotlib_figure(obj)\n+ elif inst_has_typename(obj, ['plotly', 'Figure', 'BaseFigure']):\n self._prepare(obj)\n else:\n raise TypeError('Object is not a Plotly Figure instance')\n@@ -34,6 +38,19 @@\n self.storage['format'] = 'raw_json'\n self.storage['data'] = BLOB(data=obj.to_json())\n \n+ def _from_matplotlib_figure(self, obj):\n+ try:\n+ from plotly.tools import mpl_to_plotly\n+ except ModuleNotFoundError:\n+ raise ModuleNotFoundError('Plotly is required to track matplotlib figure.')\n+\n+ try:\n+ plotly_obj = mpl_to_plotly(obj)\n+ except ValueError as err:\n+ raise ValueError(f'Failed to convert matplotlib figure to plotly figure: {err}')\n+\n+ return self._prepare(plotly_obj)\n+\n def json(self):\n blob_data = self.storage['data']\n return blob_data.data\ndiff --git a/aim/sdk/objects/image.py b/aim/sdk/objects/image.py\n--- a/aim/sdk/objects/image.py\n+++ b/aim/sdk/objects/image.py\n@@ -61,6 +61,8 @@\n self._from_tf_tensor(image, params)\n elif inst_has_typename(image, ['numpy', 'array']):\n self._from_numpy_array(image, params)\n+ elif inst_has_typename(image, ['Figure', 'matplotlib', 'figure']):\n+ self._from_matplotlib_figure(image, params)\n else:\n raise TypeError(f'Cannot convert to aim.Image. Unsupported type {type(image)}.')\n self.caption = caption\n@@ -222,6 +224,18 @@\n pil_image = PILImage.fromarray(array)\n self._from_pil_image(pil_image, params)\n \n+ def _from_matplotlib_figure(self, figure, params):\n+ try:\n+ assert hasattr(figure, 'savefig')\n+ except AssertionError:\n+ raise ValueError('Cannot convert from matplotlib figure.')\n+\n+ buffer = BytesIO()\n+ figure.savefig(buffer)\n+ buffer.seek(0)\n+\n+ return self._from_pil_image(PILImage.open(buffer), params)\n+\n def __eq__(self, other):\n if not isinstance(other, Image):\n return False\n", "issue": "Add matplotlib integration\nWe need let users to track matplotlib figures.\r\nNeed to investigate matplotlib library and understand figure's dump/load procedure and integrate it into our system.\r\n\r\nMotivation: Make users more engaged and feel comfortable using aim for their projects\n", "before_files": [{"content": "import logging\nimport os.path\n\nfrom PIL import Image as PILImage\n\nfrom io import BytesIO\nfrom itertools import chain, repeat\nfrom typing import List\nimport numpy as np\n\nfrom aim.sdk.num_utils import inst_has_typename\nfrom aim.storage.object import CustomObject\nfrom aim.storage.types import BLOB\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]('aim.image')\nclass Image(CustomObject):\n \"\"\"Image object used to store image objects in Aim repository...\n\n Args:\n image (:obj:): pillow `Image` object or `torch.Tensor` or `numpy.array` used to construct `aim.Image`.\n caption (:obj:`str`, optional): Optional image caption. '' by default.\n format (:obj: `str`, optional): Parameter for PIL's .save() method. 'png' by default.\n quality (:obj: `int`, optional): Parameter for PIL's .save() method. 85 by default.\n optimize (:obj: `bool`, optional): Parameter for PIL's .save() method. False by default.\n\n For more information on the format, quality and optimize parameters, refer to PIL documentation.\n\n Example of params to reduce quality of the image:\n format='jpeg',\n optimize=True,\n quality=85\n \"\"\"\n\n FLAG_WARN_RGBA_RGB = False\n AIM_NAME = 'aim.image'\n\n def __init__(self, image, caption: str = '', format='png', quality=90, optimize=False):\n super().__init__()\n\n # normalize jpg\n if format.lower() == 'jpg':\n # PIL doesn't support 'jpg' key\n format = 'jpeg'\n\n params = {\n 'format': format.lower(),\n 'quality': quality,\n 'optimize': optimize\n }\n\n if isinstance(image, str):\n self._from_file_path(image, params)\n elif inst_has_typename(image, ['PIL', 'Image']):\n self._from_pil_image(image, params)\n elif inst_has_typename(image, ['torch', 'Tensor']):\n self._from_torch_tensor(image, params)\n elif inst_has_typename(image, ['tensorflow', 'Tensor']):\n self._from_tf_tensor(image, params)\n elif inst_has_typename(image, ['numpy', 'array']):\n self._from_numpy_array(image, params)\n else:\n raise TypeError(f'Cannot convert to aim.Image. Unsupported type {type(image)}.')\n self.caption = caption\n\n @property\n def caption(self) -> str:\n \"\"\"Image caption, set by user.\n\n :getter: Returns image caption.\n :setter: Sets image caption.\n :type: string\n \"\"\"\n return self.storage['caption']\n\n @caption.setter\n def caption(self, value: str):\n self.storage['caption'] = value\n\n @property\n def format(self) -> str:\n \"\"\"Stored image format.\n\n :getter: Returns image format.\n :type: string\n \"\"\"\n return self.storage['format']\n\n @property\n def width(self):\n \"\"\"Stored image width.\n\n :getter: Returns image width.\n :type: string\n \"\"\"\n return self.storage['width']\n\n @property\n def height(self):\n \"\"\"Stored image height.\n\n :getter: Returns image height.\n :type: string\n \"\"\"\n return self.storage['height']\n\n @property\n def size(self):\n \"\"\"Stored image size.\n\n :getter: Returns image (width, height) pair.\n :type: string\n \"\"\"\n return self.storage['width'], self.storage['height']\n\n def to_pil_image(self) -> PILImage.Image:\n \"\"\"Method to convert aim.Image to pillow Image\"\"\"\n pil_img = PILImage.open(BytesIO(bytes(self.storage['data'])))\n assert pil_img.size == self.size\n return pil_img\n\n def json(self):\n \"\"\"Dump image metadata to a dict\"\"\"\n return {\n 'caption': self.caption,\n 'format': self.format,\n 'width': self.width,\n 'height': self.height,\n }\n\n def _from_pil_image(self, pil_image: PILImage.Image, params):\n assert isinstance(pil_image, PILImage.Image)\n img_container = BytesIO()\n\n try:\n pil_image.save(img_container, **params)\n except OSError as exc:\n # The best way to approach this problem is to prepare PIL Image object before hitting this method.\n # This block only handles case where RGBA/P/LA/PA mode is mandated to save in RGB\n # PIL won't do that automatically, so we have to convert image to RGB before saving it.\n # In addition - make transparency \"white\" before conversion otherwise it will be black.\n if pil_image.mode not in ('RGBA', 'LA', 'PA', 'P'):\n raise\n elif not Image.FLAG_WARN_RGBA_RGB:\n logger.warning(f'Failed to save the image due to the following error: {exc}')\n logger.warning(f'Attempting to convert mode \"{pil_image.mode}\" to \"RGB\"')\n Image.FLAG_WARN_RGBA_RGB = True\n\n alpha = pil_image.convert('RGBA').split()[-1] # Get only alpha\n background = PILImage.new('RGBA', pil_image.size, (255, 255, 255, 255))\n background.paste(pil_image, mask=alpha)\n pil_image = background.convert('RGB')\n\n # Retry\n pil_image.save(img_container, **params)\n\n self.storage['data'] = BLOB(data=img_container.getvalue())\n self.storage['source'] = 'PIL.Image'\n self.storage['mode'] = pil_image.mode\n self.storage['format'] = params['format']\n self.storage['width'], self.storage['height'] = pil_image.size\n\n def _from_file_path(self, file_path, params):\n if not os.path.isfile(file_path):\n raise ValueError('Invalid image file path.')\n\n return self._from_pil_image(PILImage.open(file_path), params)\n\n def _from_numpy_array(self, array: np.ndarray, params):\n if array.ndim not in {2, 3}:\n raise ValueError('Cannot convert to aim.Image. array must have 2/3-D shape.')\n\n if array.ndim == 3 and array.shape[2] == 1: # greyscale\n pil_image = PILImage.fromarray(array[:, :, 0])\n else:\n pil_image = PILImage.fromarray(array)\n self._from_pil_image(pil_image, params)\n\n def _from_torch_tensor(self, tensor, params):\n try:\n import torch\n assert isinstance(tensor, torch.Tensor)\n except (ImportError, AssertionError):\n raise ValueError('Cannot convert from torch.Tensor')\n\n if tensor.ndim not in {2, 3}:\n raise ValueError('Cannot convert to aim.Image. Tensor must have 2/3-D shape.')\n if tensor.is_floating_point():\n tensor = tensor.mul(255).byte()\n array: np.ndarray = tensor.cpu().numpy()\n\n if array.ndim == 3:\n channels = array.shape[0]\n if channels == 1:\n pil_image = PILImage.fromarray(array[0, :, :])\n else:\n # reverse order of channels: c,h,w => h,w,c\n pil_image = PILImage.fromarray(np.transpose(array, (1, 2, 0)))\n else:\n pil_image = PILImage.fromarray(array)\n self._from_pil_image(pil_image, params)\n\n def _from_tf_tensor(self, tensor, params):\n try:\n import tensorflow as tf\n assert isinstance(tensor, tf.Tensor)\n except (ImportError, AssertionError):\n raise ValueError('Cannot convert from tf.Tensor')\n\n if tensor.ndim not in {2, 3}:\n raise ValueError('Cannot convert to aim.Image. Tensor must have 2/3-D shape.')\n\n if tensor.dtype.is_floating:\n tensor = tf.cast(tf.math.scalar_mul(255.0, tensor), tf.uint8)\n array: np.ndarray = tensor.numpy()\n\n if array.ndim == 3 and array.shape[2] == 1: # greyscale\n pil_image = PILImage.fromarray(array[:, :, 0])\n else:\n pil_image = PILImage.fromarray(array)\n self._from_pil_image(pil_image, params)\n\n def __eq__(self, other):\n if not isinstance(other, Image):\n return False\n\n props = ['mode', 'format', 'width', 'height']\n for p in props:\n if self.storage[p] != other.storage[p]:\n return False\n\n return (self.storage['data'].data == other.storage['data'].data)\n\n\ndef convert_to_aim_image_list(images, labels=None) -> List[Image]:\n aim_images = []\n if labels is not None:\n labels_it = chain(labels, repeat(''))\n else:\n labels_it = repeat('')\n for img, lbl in zip(images, labels_it):\n aim_img = Image(img, lbl if isinstance(lbl, str) else str(lbl.item()))\n aim_images.append(aim_img)\n return aim_images\n", "path": "aim/sdk/objects/image.py"}, {"content": "from aim.sdk.num_utils import inst_has_typename\nfrom aim.storage.object import CustomObject\nfrom aim.storage.types import BLOB\n\n\[email protected]('aim.figure')\nclass Figure(CustomObject):\n \"\"\"Figure object used to store plotly figures in Aim repository.\n\n Args:\n obj (:obj:): plotly figure object.\n \"\"\"\n\n AIM_NAME = 'aim.figure'\n\n def __init__(self, obj):\n super().__init__()\n\n if inst_has_typename(obj, ['Figure', 'BaseFigure']):\n self._prepare(obj)\n else:\n raise TypeError('Object is not a Plotly Figure instance')\n\n def _prepare(self, obj):\n try:\n from plotly.version import __version__ as plotly_version\n except ModuleNotFoundError:\n plotly_version = 'unknown'\n\n assert hasattr(obj, 'to_json')\n\n self.storage['source'] = 'plotly'\n self.storage['version'] = plotly_version\n self.storage['format'] = 'raw_json'\n self.storage['data'] = BLOB(data=obj.to_json())\n\n def json(self):\n blob_data = self.storage['data']\n return blob_data.data\n\n def to_plotly_figure(self):\n try:\n from plotly.io import from_json\n except ModuleNotFoundError:\n raise ModuleNotFoundError('Could not find plotly in the installed modules.')\n\n return from_json(self.json())\n", "path": "aim/sdk/objects/figure.py"}]}
| 3,615 | 720 |
gh_patches_debug_1891
|
rasdani/github-patches
|
git_diff
|
DataBiosphere__toil-1535
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NonCachingFileStore doesn't have the jobID attribute
This makes NonCachingFileStore incompatible with dockerCall.
NonCachingFileStore doesn't have the jobID attribute
This makes NonCachingFileStore incompatible with dockerCall.
</issue>
<code>
[start of src/toil/lib/docker.py]
1 """
2 Module for calling Docker. Assumes `docker` is on the PATH.
3
4 Contains two user-facing functions: dockerCall and dockerCheckOutput
5
6 Uses Toil's defer functionality to ensure containers are shutdown even in case of job or pipeline failure
7
8 Example of using dockerCall in a Toil pipeline to index a FASTA file with SAMtools:
9 def toil_job(job):
10 work_dir = job.fileStore.getLocalTempDir()
11 path = job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fasta')
12 parameters = ['faidx', path]
13 dockerCall(job, tool='quay.io/ucgc_cgl/samtools:latest', work_dir=work_dir, parameters=parameters)
14 """
15 import base64
16 import logging
17 import subprocess
18 import pipes
19 import os
20 from bd2k.util.exceptions import require
21
22 _logger = logging.getLogger(__name__)
23
24
25 def dockerCall(job,
26 tool,
27 parameters=None,
28 workDir=None,
29 dockerParameters=None,
30 outfile=None,
31 defer=None):
32 """
33 Throws CalledProcessorError if the Docker invocation returns a non-zero exit code
34 This function blocks until the subprocess call to Docker returns
35
36 :param toil.Job.job job: The Job instance for the calling function.
37 :param str tool: Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools:latest).
38 :param list[str] parameters: Command line arguments to be passed to the tool.
39 If list of lists: list[list[str]], then treat as successive commands chained with pipe.
40 :param str workDir: Directory to mount into the container via `-v`. Destination convention is /data
41 :param list[str] dockerParameters: Parameters to pass to Docker. Default parameters are `--rm`,
42 `--log-driver none`, and the mountpoint `-v work_dir:/data` where /data is the destination convention.
43 These defaults are removed if docker_parmaters is passed, so be sure to pass them if they are desired.
44 :param file outfile: Pipe output of Docker call to file handle
45 :param int defer: What action should be taken on the container upon job completion?
46 FORGO (0) will leave the container untouched.
47 STOP (1) will attempt to stop the container with `docker stop` (useful for debugging).
48 RM (2) will stop the container and then forcefully remove it from the system
49 using `docker rm -f`. This is the default behavior if defer is set to None.
50 """
51 _docker(job, tool=tool, parameters=parameters, workDir=workDir, dockerParameters=dockerParameters,
52 outfile=outfile, checkOutput=False, defer=defer)
53
54
55 def dockerCheckOutput(job,
56 tool,
57 parameters=None,
58 workDir=None,
59 dockerParameters=None,
60 defer=None):
61 """
62 Returns the stdout from the Docker invocation (via subprocess.check_output)
63 Throws CalledProcessorError if the Docker invocation returns a non-zero exit code
64 This function blocks until the subprocess call to Docker returns
65
66 :param toil.Job.job job: The Job instance for the calling function.
67 :param str tool: Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools:latest).
68 :param list[str] parameters: Command line arguments to be passed to the tool.
69 If list of lists: list[list[str]], then treat as successive commands chained with pipe.
70 :param str workDir: Directory to mount into the container via `-v`. Destination convention is /data
71 :param list[str] dockerParameters: Parameters to pass to Docker. Default parameters are `--rm`,
72 `--log-driver none`, and the mountpoint `-v work_dir:/data` where /data is the destination convention.
73 These defaults are removed if docker_parmaters is passed, so be sure to pass them if they are desired.
74 :param int defer: What action should be taken on the container upon job completion?
75 FORGO (0) will leave the container untouched.
76 STOP (1) will attempt to stop the container with `docker stop` (useful for debugging).
77 RM (2) will stop the container and then forcefully remove it from the system
78 using `docker rm -f`. This is the default behavior if defer is set to None.
79 :returns: Stdout from the docker call
80 :rtype: str
81 """
82 return _docker(job, tool=tool, parameters=parameters, workDir=workDir,
83 dockerParameters=dockerParameters, checkOutput=True, defer=defer)
84
85
86 def _docker(job,
87 tool,
88 parameters=None,
89 workDir=None,
90 dockerParameters=None,
91 outfile=None,
92 checkOutput=False,
93 defer=None):
94 """
95 :param toil.Job.job job: The Job instance for the calling function.
96 :param str tool: Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools).
97 :param list[str] parameters: Command line arguments to be passed to the tool.
98 If list of lists: list[list[str]], then treat as successive commands chained with pipe.
99 :param str workDir: Directory to mount into the container via `-v`. Destination convention is /data
100 :param list[str] dockerParameters: Parameters to pass to Docker. Default parameters are `--rm`,
101 `--log-driver none`, and the mountpoint `-v work_dir:/data` where /data is the destination convention.
102 These defaults are removed if docker_parmaters is passed, so be sure to pass them if they are desired.
103 :param file outfile: Pipe output of Docker call to file handle
104 :param bool checkOutput: When True, this function returns docker's output.
105 :param int defer: What action should be taken on the container upon job completion?
106 FORGO (0) will leave the container untouched.
107 STOP (1) will attempt to stop the container with `docker stop` (useful for debugging).
108 RM (2) will stop the container and then forcefully remove it from the system
109 using `docker rm -f`. This is the default behavior if defer is set to None.
110 """
111 if parameters is None:
112 parameters = []
113 if workDir is None:
114 workDir = os.getcwd()
115
116 # Setup the outgoing subprocess call for docker
117 baseDockerCall = ['docker', 'run']
118 if dockerParameters:
119 baseDockerCall += dockerParameters
120 else:
121 baseDockerCall += ['--rm', '--log-driver', 'none', '-v',
122 os.path.abspath(workDir) + ':/data']
123
124 # Ensure the user has passed a valid value for defer
125 require(defer in (None, FORGO, STOP, RM),
126 'Please provide a valid value for defer.')
127
128 # Get container name which is needed for _dockerKill
129 try:
130 if any('--name' in x for x in baseDockerCall):
131 if any('--name=' in x for x in baseDockerCall):
132 containerName = [x.split('=')[1] for x in baseDockerCall if '--name' in x][0]
133 else:
134 containerName = baseDockerCall[baseDockerCall.index('--name') + 1]
135 else:
136 containerName = _getContainerName(job)
137 except ValueError:
138 containerName = _getContainerName(job)
139 baseDockerCall.extend(['--name', containerName])
140 except IndexError:
141 raise RuntimeError("Couldn't parse Docker's `--name=` option, check parameters: " + str(dockerParameters))
142
143 # Defer the container on-exit action
144 if '--rm' in baseDockerCall and defer is None:
145 defer = RM
146 if '--rm' in baseDockerCall and defer is not RM:
147 _logger.warn('--rm being passed to docker call but defer not set to dockerCall.RM, defer set to: ' + str(defer))
148 job.defer(_dockerKill, containerName, action=defer)
149 # Defer the permission fixing function which will run after this job concludes.
150 # We call this explicitly later on in this function, but we defer it as well to handle unexpected job failure.
151 job.defer(_fixPermissions, tool, workDir)
152
153 # Make subprocess call
154
155 # If parameters is list of lists, treat each list as separate command and chain with pipes
156 if len(parameters) > 0 and type(parameters[0]) is list:
157 # When piping, all arguments now get merged into a single string to bash -c.
158 # We try to support spaces in paths by wrapping them all in quotes first.
159 chain_params = [' '.join(p) for p in [map(pipes.quote, q) for q in parameters]]
160 call = baseDockerCall + ['--entrypoint', '/bin/bash', tool, '-c', ' | '.join(chain_params)]
161 else:
162 call = baseDockerCall + [tool] + parameters
163 _logger.info("Calling docker with " + repr(call))
164
165 if outfile:
166 subprocess.check_call(call, stdout=outfile)
167 else:
168 if checkOutput:
169 return subprocess.check_output(call)
170 else:
171 subprocess.check_call(call)
172
173
174 FORGO = 0
175 STOP = 1
176 RM = 2
177
178
179 def _dockerKill(containerName, action):
180 """
181 Kills the specified container.
182 :param str containerName: The name of the container created by docker_call
183 :param int action: What action should be taken on the container? See `defer=` in
184 :func:`docker_call`
185 """
186 running = _containerIsRunning(containerName)
187 if running is None:
188 # This means that the container doesn't exist. We will see this if the container was run
189 # with --rm and has already exited before this call.
190 _logger.info('The container with name "%s" appears to have already been removed. Nothing to '
191 'do.', containerName)
192 else:
193 if action in (None, FORGO):
194 _logger.info('The container with name %s continues to exist as we were asked to forgo a '
195 'post-job action on it.', containerName)
196 else:
197 _logger.info('The container with name %s exists. Running user-specified defer functions.',
198 containerName)
199 if running and action >= STOP:
200 _logger.info('Stopping container "%s".', containerName)
201 subprocess.check_call(['docker', 'stop', containerName])
202 else:
203 _logger.info('The container "%s" was not found to be running.', containerName)
204 if action >= RM:
205 # If the container was run with --rm, then stop will most likely remove the
206 # container. We first check if it is running then remove it.
207 running = _containerIsRunning(containerName)
208 if running is not None:
209 _logger.info('Removing container "%s".', containerName)
210 subprocess.check_call(['docker', 'rm', '-f', containerName])
211 else:
212 _logger.info('The container "%s" was not found on the system. Nothing to remove.',
213 containerName)
214
215
216 def _fixPermissions(tool, workDir):
217 """
218 Fix permission of a mounted Docker directory by reusing the tool to change ownership.
219 Docker natively runs as a root inside the container, and files written to the
220 mounted directory are implicitly owned by root.
221
222 :param list baseDockerCall: Docker run parameters
223 :param str tool: Name of tool
224 :param str workDir: Path of work directory to recursively chown
225 """
226 baseDockerCall = ['docker', 'run', '--log-driver=none',
227 '-v', os.path.abspath(workDir) + ':/data', '--rm', '--entrypoint=chown']
228 stat = os.stat(workDir)
229 command = baseDockerCall + [tool] + ['-R', '{}:{}'.format(stat.st_uid, stat.st_gid), '/data']
230 subprocess.check_call(command)
231
232
233 def _getContainerName(job):
234 return '--'.join([str(job),
235 job.fileStore.jobID,
236 base64.b64encode(os.urandom(9), '-_')]).replace("'", '').replace('_', '')
237
238
239 def _containerIsRunning(container_name):
240 """
241 Checks whether the container is running or not.
242 :param container_name: Name of the container being checked.
243 :returns: True if running, False if not running, None if the container doesn't exist.
244 :rtype: bool
245 """
246 try:
247 output = subprocess.check_output(['docker', 'inspect', '--format', '{{.State.Running}}',
248 container_name]).strip()
249 except subprocess.CalledProcessError:
250 # This will be raised if the container didn't exist.
251 _logger.debug("'docker inspect' failed. Assuming container %s doesn't exist.", container_name,
252 exc_info=True)
253 return None
254 if output == 'true':
255 return True
256 elif output == 'false':
257 return False
258 else:
259 raise RuntimeError("Got unexpected value for State.Running (%s)" % output)
260
[end of src/toil/lib/docker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/toil/lib/docker.py b/src/toil/lib/docker.py
--- a/src/toil/lib/docker.py
+++ b/src/toil/lib/docker.py
@@ -232,7 +232,6 @@
def _getContainerName(job):
return '--'.join([str(job),
- job.fileStore.jobID,
base64.b64encode(os.urandom(9), '-_')]).replace("'", '').replace('_', '')
|
{"golden_diff": "diff --git a/src/toil/lib/docker.py b/src/toil/lib/docker.py\n--- a/src/toil/lib/docker.py\n+++ b/src/toil/lib/docker.py\n@@ -232,7 +232,6 @@\n \n def _getContainerName(job):\n return '--'.join([str(job),\n- job.fileStore.jobID,\n base64.b64encode(os.urandom(9), '-_')]).replace(\"'\", '').replace('_', '')\n", "issue": "NonCachingFileStore doesn't have the jobID attribute\nThis makes NonCachingFileStore incompatible with dockerCall.\nNonCachingFileStore doesn't have the jobID attribute\nThis makes NonCachingFileStore incompatible with dockerCall.\n", "before_files": [{"content": "\"\"\"\n Module for calling Docker. Assumes `docker` is on the PATH.\n\n Contains two user-facing functions: dockerCall and dockerCheckOutput\n\n Uses Toil's defer functionality to ensure containers are shutdown even in case of job or pipeline failure\n\n Example of using dockerCall in a Toil pipeline to index a FASTA file with SAMtools:\n def toil_job(job):\n work_dir = job.fileStore.getLocalTempDir()\n path = job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fasta')\n parameters = ['faidx', path]\n dockerCall(job, tool='quay.io/ucgc_cgl/samtools:latest', work_dir=work_dir, parameters=parameters)\n\"\"\"\nimport base64\nimport logging\nimport subprocess\nimport pipes\nimport os\nfrom bd2k.util.exceptions import require\n\n_logger = logging.getLogger(__name__)\n\n\ndef dockerCall(job,\n tool,\n parameters=None,\n workDir=None,\n dockerParameters=None,\n outfile=None,\n defer=None):\n \"\"\"\n Throws CalledProcessorError if the Docker invocation returns a non-zero exit code\n This function blocks until the subprocess call to Docker returns\n\n :param toil.Job.job job: The Job instance for the calling function.\n :param str tool: Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools:latest).\n :param list[str] parameters: Command line arguments to be passed to the tool.\n If list of lists: list[list[str]], then treat as successive commands chained with pipe.\n :param str workDir: Directory to mount into the container via `-v`. Destination convention is /data\n :param list[str] dockerParameters: Parameters to pass to Docker. Default parameters are `--rm`,\n `--log-driver none`, and the mountpoint `-v work_dir:/data` where /data is the destination convention.\n These defaults are removed if docker_parmaters is passed, so be sure to pass them if they are desired.\n :param file outfile: Pipe output of Docker call to file handle\n :param int defer: What action should be taken on the container upon job completion?\n FORGO (0) will leave the container untouched.\n STOP (1) will attempt to stop the container with `docker stop` (useful for debugging).\n RM (2) will stop the container and then forcefully remove it from the system\n using `docker rm -f`. This is the default behavior if defer is set to None.\n \"\"\"\n _docker(job, tool=tool, parameters=parameters, workDir=workDir, dockerParameters=dockerParameters,\n outfile=outfile, checkOutput=False, defer=defer)\n\n\ndef dockerCheckOutput(job,\n tool,\n parameters=None,\n workDir=None,\n dockerParameters=None,\n defer=None):\n \"\"\"\n Returns the stdout from the Docker invocation (via subprocess.check_output)\n Throws CalledProcessorError if the Docker invocation returns a non-zero exit code\n This function blocks until the subprocess call to Docker returns\n\n :param toil.Job.job job: The Job instance for the calling function.\n :param str tool: Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools:latest).\n :param list[str] parameters: Command line arguments to be passed to the tool.\n If list of lists: list[list[str]], then treat as successive commands chained with pipe.\n :param str workDir: Directory to mount into the container via `-v`. Destination convention is /data\n :param list[str] dockerParameters: Parameters to pass to Docker. Default parameters are `--rm`,\n `--log-driver none`, and the mountpoint `-v work_dir:/data` where /data is the destination convention.\n These defaults are removed if docker_parmaters is passed, so be sure to pass them if they are desired.\n :param int defer: What action should be taken on the container upon job completion?\n FORGO (0) will leave the container untouched.\n STOP (1) will attempt to stop the container with `docker stop` (useful for debugging).\n RM (2) will stop the container and then forcefully remove it from the system\n using `docker rm -f`. This is the default behavior if defer is set to None.\n :returns: Stdout from the docker call\n :rtype: str\n \"\"\"\n return _docker(job, tool=tool, parameters=parameters, workDir=workDir,\n dockerParameters=dockerParameters, checkOutput=True, defer=defer)\n\n\ndef _docker(job,\n tool,\n parameters=None,\n workDir=None,\n dockerParameters=None,\n outfile=None,\n checkOutput=False,\n defer=None):\n \"\"\"\n :param toil.Job.job job: The Job instance for the calling function.\n :param str tool: Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools).\n :param list[str] parameters: Command line arguments to be passed to the tool.\n If list of lists: list[list[str]], then treat as successive commands chained with pipe.\n :param str workDir: Directory to mount into the container via `-v`. Destination convention is /data\n :param list[str] dockerParameters: Parameters to pass to Docker. Default parameters are `--rm`,\n `--log-driver none`, and the mountpoint `-v work_dir:/data` where /data is the destination convention.\n These defaults are removed if docker_parmaters is passed, so be sure to pass them if they are desired.\n :param file outfile: Pipe output of Docker call to file handle\n :param bool checkOutput: When True, this function returns docker's output.\n :param int defer: What action should be taken on the container upon job completion?\n FORGO (0) will leave the container untouched.\n STOP (1) will attempt to stop the container with `docker stop` (useful for debugging).\n RM (2) will stop the container and then forcefully remove it from the system\n using `docker rm -f`. This is the default behavior if defer is set to None.\n \"\"\"\n if parameters is None:\n parameters = []\n if workDir is None:\n workDir = os.getcwd()\n\n # Setup the outgoing subprocess call for docker\n baseDockerCall = ['docker', 'run']\n if dockerParameters:\n baseDockerCall += dockerParameters\n else:\n baseDockerCall += ['--rm', '--log-driver', 'none', '-v',\n os.path.abspath(workDir) + ':/data']\n\n # Ensure the user has passed a valid value for defer\n require(defer in (None, FORGO, STOP, RM),\n 'Please provide a valid value for defer.')\n\n # Get container name which is needed for _dockerKill\n try:\n if any('--name' in x for x in baseDockerCall):\n if any('--name=' in x for x in baseDockerCall):\n containerName = [x.split('=')[1] for x in baseDockerCall if '--name' in x][0]\n else:\n containerName = baseDockerCall[baseDockerCall.index('--name') + 1]\n else:\n containerName = _getContainerName(job)\n except ValueError:\n containerName = _getContainerName(job)\n baseDockerCall.extend(['--name', containerName])\n except IndexError:\n raise RuntimeError(\"Couldn't parse Docker's `--name=` option, check parameters: \" + str(dockerParameters))\n\n # Defer the container on-exit action\n if '--rm' in baseDockerCall and defer is None:\n defer = RM\n if '--rm' in baseDockerCall and defer is not RM:\n _logger.warn('--rm being passed to docker call but defer not set to dockerCall.RM, defer set to: ' + str(defer))\n job.defer(_dockerKill, containerName, action=defer)\n # Defer the permission fixing function which will run after this job concludes.\n # We call this explicitly later on in this function, but we defer it as well to handle unexpected job failure.\n job.defer(_fixPermissions, tool, workDir)\n\n # Make subprocess call\n\n # If parameters is list of lists, treat each list as separate command and chain with pipes\n if len(parameters) > 0 and type(parameters[0]) is list:\n # When piping, all arguments now get merged into a single string to bash -c.\n # We try to support spaces in paths by wrapping them all in quotes first.\n chain_params = [' '.join(p) for p in [map(pipes.quote, q) for q in parameters]]\n call = baseDockerCall + ['--entrypoint', '/bin/bash', tool, '-c', ' | '.join(chain_params)]\n else:\n call = baseDockerCall + [tool] + parameters\n _logger.info(\"Calling docker with \" + repr(call))\n\n if outfile:\n subprocess.check_call(call, stdout=outfile)\n else:\n if checkOutput:\n return subprocess.check_output(call)\n else:\n subprocess.check_call(call)\n\n\nFORGO = 0\nSTOP = 1\nRM = 2\n\n\ndef _dockerKill(containerName, action):\n \"\"\"\n Kills the specified container.\n :param str containerName: The name of the container created by docker_call\n :param int action: What action should be taken on the container? See `defer=` in\n :func:`docker_call`\n \"\"\"\n running = _containerIsRunning(containerName)\n if running is None:\n # This means that the container doesn't exist. We will see this if the container was run\n # with --rm and has already exited before this call.\n _logger.info('The container with name \"%s\" appears to have already been removed. Nothing to '\n 'do.', containerName)\n else:\n if action in (None, FORGO):\n _logger.info('The container with name %s continues to exist as we were asked to forgo a '\n 'post-job action on it.', containerName)\n else:\n _logger.info('The container with name %s exists. Running user-specified defer functions.',\n containerName)\n if running and action >= STOP:\n _logger.info('Stopping container \"%s\".', containerName)\n subprocess.check_call(['docker', 'stop', containerName])\n else:\n _logger.info('The container \"%s\" was not found to be running.', containerName)\n if action >= RM:\n # If the container was run with --rm, then stop will most likely remove the\n # container. We first check if it is running then remove it.\n running = _containerIsRunning(containerName)\n if running is not None:\n _logger.info('Removing container \"%s\".', containerName)\n subprocess.check_call(['docker', 'rm', '-f', containerName])\n else:\n _logger.info('The container \"%s\" was not found on the system. Nothing to remove.',\n containerName)\n\n\ndef _fixPermissions(tool, workDir):\n \"\"\"\n Fix permission of a mounted Docker directory by reusing the tool to change ownership.\n Docker natively runs as a root inside the container, and files written to the\n mounted directory are implicitly owned by root.\n\n :param list baseDockerCall: Docker run parameters\n :param str tool: Name of tool\n :param str workDir: Path of work directory to recursively chown\n \"\"\"\n baseDockerCall = ['docker', 'run', '--log-driver=none',\n '-v', os.path.abspath(workDir) + ':/data', '--rm', '--entrypoint=chown']\n stat = os.stat(workDir)\n command = baseDockerCall + [tool] + ['-R', '{}:{}'.format(stat.st_uid, stat.st_gid), '/data']\n subprocess.check_call(command)\n\n\ndef _getContainerName(job):\n return '--'.join([str(job),\n job.fileStore.jobID,\n base64.b64encode(os.urandom(9), '-_')]).replace(\"'\", '').replace('_', '')\n\n\ndef _containerIsRunning(container_name):\n \"\"\"\n Checks whether the container is running or not.\n :param container_name: Name of the container being checked.\n :returns: True if running, False if not running, None if the container doesn't exist.\n :rtype: bool\n \"\"\"\n try:\n output = subprocess.check_output(['docker', 'inspect', '--format', '{{.State.Running}}',\n container_name]).strip()\n except subprocess.CalledProcessError:\n # This will be raised if the container didn't exist.\n _logger.debug(\"'docker inspect' failed. Assuming container %s doesn't exist.\", container_name,\n exc_info=True)\n return None\n if output == 'true':\n return True\n elif output == 'false':\n return False\n else:\n raise RuntimeError(\"Got unexpected value for State.Running (%s)\" % output)\n", "path": "src/toil/lib/docker.py"}]}
| 4,069 | 99 |
gh_patches_debug_34864
|
rasdani/github-patches
|
git_diff
|
RedHatInsights__insights-core-1741
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SMDA* is not business SAP instances
~~~
# cat insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance
*********************************************************
CreationClassName , String , SAPInstance
SID , String , SMA
SystemNumber , String , 98
InstanceName , String , SMDA98
Hostname , String , li-ld-1846
FullQualifiedHostname , String , li-ld-1846.hag.hilti.com
SapVersionInfo , String , 749, patch 200, changelist 1746260
~~~
From Rolf:
> the reported instance SMDA98 is the Solution Manager agent, which the customer wants use to monitor that system [1]. With newer systems, that can alternatively also be the diagnostic agent (instance name DAA*98).
* here is a typo, DAA should be SID name
</issue>
<code>
[start of insights/combiners/sap.py]
1 """
2 Sap - Combiner
3 ==============
4
5 This combiner gets the running SAP instances on the system based on below
6 logic::
7
8 if (SAPLOCALHOST = 'hostname') && InstanceType = D## ) then
9 on this system runs SAP Netweaver Application Server version
10
11 if (SAPLOCALHOST = 'hostname') && InstanceType = ASCS## ) then
12 on this system runs SAP Netweaver Application Server Central Instance
13 version
14
15 if (SAPLOCALHOST = 'hostname') && InstanceType = HDB## ) then
16 on this system runs SAP HANA database version
17
18 Check settings according SAP Notes compiled here:
19 https://wiki.scn.sap.com/wiki/x/rDK7Gg
20
21 """
22
23 from collections import namedtuple
24 from insights import LegacyItemAccess
25 from insights.parsers import SkipException
26 from insights.core.plugins import combiner
27 from insights.combiners.hostname import hostname
28 from insights.parsers.lssap import Lssap
29 from insights.parsers.saphostctrl import SAPHostCtrlInstances
30
31
32 SAPInstances = namedtuple("SAPInstances",
33 field_names=["name", "hostname", "sid", "type", "number", "version"])
34 """namedtuple: Type for storing the SAP instance."""
35
36
37 @combiner(hostname, optional=[SAPHostCtrlInstances, Lssap])
38 class Sap(LegacyItemAccess):
39 """
40 Combiner for analyzing the SAP instances running on the system.
41
42 Prefer SAPHostCtrlInstances to Lssap.
43
44 Examples:
45 >>> type(saps)
46 <class 'insights.combiners.sap.Sap'>
47 >>> saps['D16'].number
48 '16'
49 >>> saps.sid('HDB16')
50 'HA2'
51 >>> saps.hostname('HDB16')
52 'lu0417'
53 >>> 'D22' in saps.local_instances
54 False
55 >>> saps.is_hana
56 True
57 >>> saps.is_netweaver
58 True
59 >>> saps.is_ascs
60 False
61
62 Attributes:
63 all_instances (list): List all the SAP instances listed by the command.
64 local_instances (list): List SAP instances which are running on the system.
65 """
66
67 def __init__(self, hostname, insts, lssap):
68 hn = hostname.hostname
69 self.data = {}
70 self.local_instances = []
71 self.all_instances = []
72 self._types = set()
73 if insts:
74 for inst in insts.data:
75 k = inst['InstanceName']
76 self.all_instances.append(k)
77 if hn == inst['Hostname']:
78 self.local_instances.append(k)
79 self._types.add(inst['InstanceType'])
80 self.data[k] = SAPInstances(k,
81 inst['Hostname'],
82 inst['SID'],
83 inst['InstanceType'],
84 inst['SystemNumber'],
85 inst['SapVersionInfo'])
86 elif lssap:
87 for inst in lssap.data:
88 k = inst['Instance']
89 t = k.rstrip('1234567890')
90 self.all_instances.append(k)
91 if hn == inst['SAPLOCALHOST']:
92 self.local_instances.append(k)
93 self._types.add(t)
94 self.data[k] = SAPInstances(k,
95 inst['SAPLOCALHOST'],
96 inst['SID'],
97 t,
98 inst['Nr'],
99 inst['Version'])
100 else:
101 raise SkipException('No SAP instance.')
102
103 def version(self, instance):
104 """str: Returns the version of the ``instance``."""
105 return self.data[instance].version if instance in self.data else None
106
107 def sid(self, instance):
108 """str: Returns the sid of the ``instance``."""
109 return self.data[instance].sid if instance in self.data else None
110
111 def type(self, instance):
112 """str: Returns the type code of the ``instance``."""
113 return self.data[instance].type if instance in self.data else None
114
115 def hostname(self, instance):
116 """str: Returns the hostname of the ``instance``."""
117 return self.data[instance].hostname if instance in self.data else None
118
119 def number(self, instance):
120 """str: Returns the systeme number of the ``instance``."""
121 return self.data[instance].number if instance in self.data else None
122
123 @property
124 def is_netweaver(self):
125 """bool: SAP Netweaver is running on the system."""
126 return 'D' in self._types
127
128 @property
129 def is_hana(self):
130 """bool: SAP Hana is running on the system."""
131 return 'HDB' in self._types
132
133 @property
134 def is_ascs(self):
135 """bool: SAP System Central Services is running on the system."""
136 return 'ASCS' in self._types
137
[end of insights/combiners/sap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/insights/combiners/sap.py b/insights/combiners/sap.py
--- a/insights/combiners/sap.py
+++ b/insights/combiners/sap.py
@@ -52,6 +52,8 @@
'lu0417'
>>> 'D22' in saps.local_instances
False
+ >>> len(saps.business_instances)
+ 2
>>> saps.is_hana
True
>>> saps.is_netweaver
@@ -60,14 +62,19 @@
False
Attributes:
- all_instances (list): List all the SAP instances listed by the command.
- local_instances (list): List SAP instances which are running on the system.
+ all_instances (list): List of all the SAP instances listed by the command.
+ local_instances (list): List of SAP instances which are running on the system.
+ function_instances (list): List of function SAP instances running on the system.
+ E.g. Diagnostics Agents SMDA97/SMDA98
+ business_instances (list): List of business SAP instances running on the system.
+ E.g. HANA, NetWeaver, ASCS, or others
"""
-
def __init__(self, hostname, insts, lssap):
hn = hostname.hostname
self.data = {}
self.local_instances = []
+ self.business_instances = []
+ self.function_instances = []
self.all_instances = []
self._types = set()
if insts:
@@ -100,6 +107,10 @@
else:
raise SkipException('No SAP instance.')
+ FUNC_INSTS = ('SMDA')
+ for i in self.local_instances:
+ (self.function_instances if i.startswith(FUNC_INSTS) else self.business_instances).append(i)
+
def version(self, instance):
"""str: Returns the version of the ``instance``."""
return self.data[instance].version if instance in self.data else None
@@ -122,7 +133,7 @@
@property
def is_netweaver(self):
- """bool: SAP Netweaver is running on the system."""
+ """bool: SAP NetWeaver is running on the system."""
return 'D' in self._types
@property
|
{"golden_diff": "diff --git a/insights/combiners/sap.py b/insights/combiners/sap.py\n--- a/insights/combiners/sap.py\n+++ b/insights/combiners/sap.py\n@@ -52,6 +52,8 @@\n 'lu0417'\n >>> 'D22' in saps.local_instances\n False\n+ >>> len(saps.business_instances)\n+ 2\n >>> saps.is_hana\n True\n >>> saps.is_netweaver\n@@ -60,14 +62,19 @@\n False\n \n Attributes:\n- all_instances (list): List all the SAP instances listed by the command.\n- local_instances (list): List SAP instances which are running on the system.\n+ all_instances (list): List of all the SAP instances listed by the command.\n+ local_instances (list): List of SAP instances which are running on the system.\n+ function_instances (list): List of function SAP instances running on the system.\n+ E.g. Diagnostics Agents SMDA97/SMDA98\n+ business_instances (list): List of business SAP instances running on the system.\n+ E.g. HANA, NetWeaver, ASCS, or others\n \"\"\"\n-\n def __init__(self, hostname, insts, lssap):\n hn = hostname.hostname\n self.data = {}\n self.local_instances = []\n+ self.business_instances = []\n+ self.function_instances = []\n self.all_instances = []\n self._types = set()\n if insts:\n@@ -100,6 +107,10 @@\n else:\n raise SkipException('No SAP instance.')\n \n+ FUNC_INSTS = ('SMDA')\n+ for i in self.local_instances:\n+ (self.function_instances if i.startswith(FUNC_INSTS) else self.business_instances).append(i)\n+\n def version(self, instance):\n \"\"\"str: Returns the version of the ``instance``.\"\"\"\n return self.data[instance].version if instance in self.data else None\n@@ -122,7 +133,7 @@\n \n @property\n def is_netweaver(self):\n- \"\"\"bool: SAP Netweaver is running on the system.\"\"\"\n+ \"\"\"bool: SAP NetWeaver is running on the system.\"\"\"\n return 'D' in self._types\n \n @property\n", "issue": "SMDA* is not business SAP instances\n~~~\r\n# cat insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance\r\n*********************************************************\r\n CreationClassName , String , SAPInstance\r\n SID , String , SMA\r\n SystemNumber , String , 98\r\n InstanceName , String , SMDA98\r\n Hostname , String , li-ld-1846\r\n FullQualifiedHostname , String , li-ld-1846.hag.hilti.com\r\n SapVersionInfo , String , 749, patch 200, changelist 1746260\r\n~~~\r\n\r\nFrom Rolf:\r\n> the reported instance SMDA98 is the Solution Manager agent, which the customer wants use to monitor that system [1]. With newer systems, that can alternatively also be the diagnostic agent (instance name DAA*98).\r\n* here is a typo, DAA should be SID name\n", "before_files": [{"content": "\"\"\"\nSap - Combiner\n==============\n\nThis combiner gets the running SAP instances on the system based on below\nlogic::\n\n if (SAPLOCALHOST = 'hostname') && InstanceType = D## ) then\n on this system runs SAP Netweaver Application Server version\n\n if (SAPLOCALHOST = 'hostname') && InstanceType = ASCS## ) then\n on this system runs SAP Netweaver Application Server Central Instance\n version\n\n if (SAPLOCALHOST = 'hostname') && InstanceType = HDB## ) then\n on this system runs SAP HANA database version\n\nCheck settings according SAP Notes compiled here:\nhttps://wiki.scn.sap.com/wiki/x/rDK7Gg\n\n\"\"\"\n\nfrom collections import namedtuple\nfrom insights import LegacyItemAccess\nfrom insights.parsers import SkipException\nfrom insights.core.plugins import combiner\nfrom insights.combiners.hostname import hostname\nfrom insights.parsers.lssap import Lssap\nfrom insights.parsers.saphostctrl import SAPHostCtrlInstances\n\n\nSAPInstances = namedtuple(\"SAPInstances\",\n field_names=[\"name\", \"hostname\", \"sid\", \"type\", \"number\", \"version\"])\n\"\"\"namedtuple: Type for storing the SAP instance.\"\"\"\n\n\n@combiner(hostname, optional=[SAPHostCtrlInstances, Lssap])\nclass Sap(LegacyItemAccess):\n \"\"\"\n Combiner for analyzing the SAP instances running on the system.\n\n Prefer SAPHostCtrlInstances to Lssap.\n\n Examples:\n >>> type(saps)\n <class 'insights.combiners.sap.Sap'>\n >>> saps['D16'].number\n '16'\n >>> saps.sid('HDB16')\n 'HA2'\n >>> saps.hostname('HDB16')\n 'lu0417'\n >>> 'D22' in saps.local_instances\n False\n >>> saps.is_hana\n True\n >>> saps.is_netweaver\n True\n >>> saps.is_ascs\n False\n\n Attributes:\n all_instances (list): List all the SAP instances listed by the command.\n local_instances (list): List SAP instances which are running on the system.\n \"\"\"\n\n def __init__(self, hostname, insts, lssap):\n hn = hostname.hostname\n self.data = {}\n self.local_instances = []\n self.all_instances = []\n self._types = set()\n if insts:\n for inst in insts.data:\n k = inst['InstanceName']\n self.all_instances.append(k)\n if hn == inst['Hostname']:\n self.local_instances.append(k)\n self._types.add(inst['InstanceType'])\n self.data[k] = SAPInstances(k,\n inst['Hostname'],\n inst['SID'],\n inst['InstanceType'],\n inst['SystemNumber'],\n inst['SapVersionInfo'])\n elif lssap:\n for inst in lssap.data:\n k = inst['Instance']\n t = k.rstrip('1234567890')\n self.all_instances.append(k)\n if hn == inst['SAPLOCALHOST']:\n self.local_instances.append(k)\n self._types.add(t)\n self.data[k] = SAPInstances(k,\n inst['SAPLOCALHOST'],\n inst['SID'],\n t,\n inst['Nr'],\n inst['Version'])\n else:\n raise SkipException('No SAP instance.')\n\n def version(self, instance):\n \"\"\"str: Returns the version of the ``instance``.\"\"\"\n return self.data[instance].version if instance in self.data else None\n\n def sid(self, instance):\n \"\"\"str: Returns the sid of the ``instance``.\"\"\"\n return self.data[instance].sid if instance in self.data else None\n\n def type(self, instance):\n \"\"\"str: Returns the type code of the ``instance``.\"\"\"\n return self.data[instance].type if instance in self.data else None\n\n def hostname(self, instance):\n \"\"\"str: Returns the hostname of the ``instance``.\"\"\"\n return self.data[instance].hostname if instance in self.data else None\n\n def number(self, instance):\n \"\"\"str: Returns the systeme number of the ``instance``.\"\"\"\n return self.data[instance].number if instance in self.data else None\n\n @property\n def is_netweaver(self):\n \"\"\"bool: SAP Netweaver is running on the system.\"\"\"\n return 'D' in self._types\n\n @property\n def is_hana(self):\n \"\"\"bool: SAP Hana is running on the system.\"\"\"\n return 'HDB' in self._types\n\n @property\n def is_ascs(self):\n \"\"\"bool: SAP System Central Services is running on the system.\"\"\"\n return 'ASCS' in self._types\n", "path": "insights/combiners/sap.py"}]}
| 2,086 | 525 |
gh_patches_debug_16856
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-3348
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error reporting system tests needed
Follow up to #3263.
</issue>
<code>
[start of error_reporting/nox.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17 import os
18
19 import nox
20
21
22 LOCAL_DEPS = ('../core/', '../logging/')
23
24
25 @nox.session
26 @nox.parametrize('python_version', ['2.7', '3.4', '3.5', '3.6'])
27 def unit_tests(session, python_version):
28 """Run the unit test suite."""
29
30 # Run unit tests against all supported versions of Python.
31 session.interpreter = 'python{}'.format(python_version)
32
33 # Install all test dependencies, then install this package in-place.
34 session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS)
35 session.install('-e', '.')
36
37 # Run py.test against the unit tests.
38 session.run(
39 'py.test', '--quiet', '--cov=google.cloud.error_reporting',
40 '--cov=tests.unit', '--cov-append', '--cov-config=.coveragerc',
41 '--cov-report=', '--cov-fail-under=97', 'tests/unit',
42 )
43
44
45 @nox.session
46 def lint(session):
47 """Run flake8.
48
49 Returns a failure if flake8 finds linting errors or sufficiently
50 serious code quality issues.
51 """
52 session.interpreter = 'python3.6'
53 session.install('flake8', *LOCAL_DEPS)
54 session.install('.')
55 session.run('flake8', 'google/cloud/error_reporting')
56
57
58 @nox.session
59 def lint_setup_py(session):
60 """Verify that setup.py is valid (including RST check)."""
61 session.interpreter = 'python3.6'
62 session.install('docutils', 'Pygments')
63 session.run(
64 'python', 'setup.py', 'check', '--restructuredtext', '--strict')
65
66
67 @nox.session
68 def cover(session):
69 """Run the final coverage report.
70
71 This outputs the coverage report aggregating coverage from the unit
72 test runs (not system test runs), and then erases coverage data.
73 """
74 session.interpreter = 'python3.6'
75 session.install('coverage', 'pytest-cov')
76 session.run('coverage', 'report', '--show-missing', '--fail-under=100')
77 session.run('coverage', 'erase')
78
[end of error_reporting/nox.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/error_reporting/nox.py b/error_reporting/nox.py
--- a/error_reporting/nox.py
+++ b/error_reporting/nox.py
@@ -64,6 +64,28 @@
'python', 'setup.py', 'check', '--restructuredtext', '--strict')
[email protected]
[email protected]('python_version', ['2.7', '3.6'])
+def system_tests(session, python_version):
+ """Run the system test suite."""
+
+ # Sanity check: Only run system tests if the environment variable is set.
+ if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
+ return
+
+ # Run the system tests against latest Python 2 and Python 3 only.
+ session.interpreter = 'python{}'.format(python_version)
+
+ # Install all test dependencies, then install this package into the
+ # virtualenv's dist-packages.
+ session.install('mock', 'pytest', *LOCAL_DEPS)
+ session.install('../test_utils/')
+ session.install('.')
+
+ # Run py.test against the system tests.
+ session.run('py.test', '-vvv', 'tests/system.py')
+
+
@nox.session
def cover(session):
"""Run the final coverage report.
|
{"golden_diff": "diff --git a/error_reporting/nox.py b/error_reporting/nox.py\n--- a/error_reporting/nox.py\n+++ b/error_reporting/nox.py\n@@ -64,6 +64,28 @@\n 'python', 'setup.py', 'check', '--restructuredtext', '--strict')\n \n \[email protected]\[email protected]('python_version', ['2.7', '3.6'])\n+def system_tests(session, python_version):\n+ \"\"\"Run the system test suite.\"\"\"\n+\n+ # Sanity check: Only run system tests if the environment variable is set.\n+ if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):\n+ return\n+\n+ # Run the system tests against latest Python 2 and Python 3 only.\n+ session.interpreter = 'python{}'.format(python_version)\n+\n+ # Install all test dependencies, then install this package into the\n+ # virtualenv's dist-packages.\n+ session.install('mock', 'pytest', *LOCAL_DEPS)\n+ session.install('../test_utils/')\n+ session.install('.')\n+\n+ # Run py.test against the system tests.\n+ session.run('py.test', '-vvv', 'tests/system.py')\n+\n+\n @nox.session\n def cover(session):\n \"\"\"Run the final coverage report.\n", "issue": "Error reporting system tests needed\nFollow up to #3263.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport os\n\nimport nox\n\n\nLOCAL_DEPS = ('../core/', '../logging/')\n\n\[email protected]\[email protected]('python_version', ['2.7', '3.4', '3.5', '3.6'])\ndef unit_tests(session, python_version):\n \"\"\"Run the unit test suite.\"\"\"\n\n # Run unit tests against all supported versions of Python.\n session.interpreter = 'python{}'.format(python_version)\n\n # Install all test dependencies, then install this package in-place.\n session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS)\n session.install('-e', '.')\n\n # Run py.test against the unit tests.\n session.run(\n 'py.test', '--quiet', '--cov=google.cloud.error_reporting',\n '--cov=tests.unit', '--cov-append', '--cov-config=.coveragerc',\n '--cov-report=', '--cov-fail-under=97', 'tests/unit',\n )\n\n\[email protected]\ndef lint(session):\n \"\"\"Run flake8.\n\n Returns a failure if flake8 finds linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n session.interpreter = 'python3.6'\n session.install('flake8', *LOCAL_DEPS)\n session.install('.')\n session.run('flake8', 'google/cloud/error_reporting')\n\n\[email protected]\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n session.interpreter = 'python3.6'\n session.install('docutils', 'Pygments')\n session.run(\n 'python', 'setup.py', 'check', '--restructuredtext', '--strict')\n\n\[email protected]\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.interpreter = 'python3.6'\n session.install('coverage', 'pytest-cov')\n session.run('coverage', 'report', '--show-missing', '--fail-under=100')\n session.run('coverage', 'erase')\n", "path": "error_reporting/nox.py"}]}
| 1,301 | 278 |
gh_patches_debug_12060
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-21310
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
adaptive_avg_pool2d
Close #20804
</issue>
<code>
[start of ivy/functional/frontends/mindspore/ops/function/nn_func.py]
1 """Includes Mindspore Frontend functions listed in the TODO list
2 https://github.com/unifyai/ivy/issues/14951."""
3
4 # local
5 import ivy
6 from ivy.func_wrapper import with_supported_dtypes
7 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
8
9
10 @with_supported_dtypes(
11 {
12 "2.0.0 and below": (
13 "int8",
14 "int16",
15 "int32",
16 "int64",
17 "float16",
18 "float32",
19 "float64",
20 )
21 },
22 "mindspore",
23 )
24 @to_ivy_arrays_and_back
25 def dropout2d(input, p=0.5, training=True):
26 return ivy.dropout2d(input, p, training=training, data_format="NCHW")
27
28
29 @with_supported_dtypes({"2.0.0 and below": ("float16", "float32")}, "mindspore")
30 @to_ivy_arrays_and_back
31 def selu(input_x):
32 return ivy.selu(input_x)
33
34
35 @with_supported_dtypes({"2.0 and below": ("float16", "float32")}, "mindspore")
36 @to_ivy_arrays_and_back
37 def softsign(x):
38 return ivy.divide(x, ivy.add(1, ivy.abs(x)))
39
40 @with_supported_dtypes({"2.0 and below": ("int8", "int16", "int32", "int64", "float16", "float32", "float64")}, "mindspore")
41 @to_ivy_arrays_and_back
42 def pad(input, pad_width, mode='constant', constant_values=0):
43 return ivy.pad(input, pad_width, mode=mode, constant_values=constant_values)
[end of ivy/functional/frontends/mindspore/ops/function/nn_func.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ivy/functional/frontends/mindspore/ops/function/nn_func.py b/ivy/functional/frontends/mindspore/ops/function/nn_func.py
--- a/ivy/functional/frontends/mindspore/ops/function/nn_func.py
+++ b/ivy/functional/frontends/mindspore/ops/function/nn_func.py
@@ -40,4 +40,10 @@
@with_supported_dtypes({"2.0 and below": ("int8", "int16", "int32", "int64", "float16", "float32", "float64")}, "mindspore")
@to_ivy_arrays_and_back
def pad(input, pad_width, mode='constant', constant_values=0):
- return ivy.pad(input, pad_width, mode=mode, constant_values=constant_values)
\ No newline at end of file
+ return ivy.pad(input, pad_width, mode=mode, constant_values=constant_values)
+
+
+@with_supported_dtypes({"2.0.0 and below": ("float16", "float32", "float64")}, "mindspore")
+@to_ivy_arrays_and_back
+def adaptive_avg_pool2d(input, output_size):
+ return ivy.adaptive_avg_pool2d(input, output_size)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/mindspore/ops/function/nn_func.py b/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n--- a/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n+++ b/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n@@ -40,4 +40,10 @@\n @with_supported_dtypes({\"2.0 and below\": (\"int8\", \"int16\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")}, \"mindspore\")\n @to_ivy_arrays_and_back\n def pad(input, pad_width, mode='constant', constant_values=0):\n- return ivy.pad(input, pad_width, mode=mode, constant_values=constant_values)\n\\ No newline at end of file\n+ return ivy.pad(input, pad_width, mode=mode, constant_values=constant_values)\n+\n+\n+@with_supported_dtypes({\"2.0.0 and below\": (\"float16\", \"float32\", \"float64\")}, \"mindspore\")\n+@to_ivy_arrays_and_back\n+def adaptive_avg_pool2d(input, output_size):\n+ return ivy.adaptive_avg_pool2d(input, output_size)\n", "issue": "adaptive_avg_pool2d\nClose #20804\n", "before_files": [{"content": "\"\"\"Includes Mindspore Frontend functions listed in the TODO list\nhttps://github.com/unifyai/ivy/issues/14951.\"\"\"\n\n# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_supported_dtypes(\n {\n \"2.0.0 and below\": (\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float16\",\n \"float32\",\n \"float64\",\n )\n },\n \"mindspore\",\n)\n@to_ivy_arrays_and_back\ndef dropout2d(input, p=0.5, training=True):\n return ivy.dropout2d(input, p, training=training, data_format=\"NCHW\")\n\n\n@with_supported_dtypes({\"2.0.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n@to_ivy_arrays_and_back\ndef selu(input_x):\n return ivy.selu(input_x)\n\n\n@with_supported_dtypes({\"2.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n@to_ivy_arrays_and_back\ndef softsign(x):\n return ivy.divide(x, ivy.add(1, ivy.abs(x)))\n\n@with_supported_dtypes({\"2.0 and below\": (\"int8\", \"int16\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")}, \"mindspore\")\n@to_ivy_arrays_and_back\ndef pad(input, pad_width, mode='constant', constant_values=0):\n return ivy.pad(input, pad_width, mode=mode, constant_values=constant_values)", "path": "ivy/functional/frontends/mindspore/ops/function/nn_func.py"}]}
| 1,048 | 294 |
gh_patches_debug_3752
|
rasdani/github-patches
|
git_diff
|
ranaroussi__yfinance-941
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ticker.history: history's index is not of type DateTimeIndex when dividend and split tables are empty
**Issue**:
Ticker.history (line 295):
> df.index = df.index.tz_localize("UTC").tz_convert(data["chart"]["result"][0]["meta"]["exchangeTimezoneName"])
E AttributeError: 'Index' object has no attribute 'tz_localize'
**Reason**:
During the concatenation of a proper price table with empty dividend and split tables, the price table's index has changed from _pandas.DatetimeIndex_ to _pandas.Index_ because the empty tables had this index type.
**Proposed solution**:
Modify _parse_actions_ (in utils.py) to initialise the dividend and split tables with :
`dividends = _pd.DataFrame(columns=["Dividends"], index=_pd.DatetimeIndex([]))`
`splits = _pd.DataFrame(columns=["Stock Splits"], index=_pd.DatetimeIndex([]))`
Local modules' versions:
finance : 0.1.69
pandas : 1.4.0
python : 3.10
</issue>
<code>
[start of yfinance/utils.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # yfinance - market data downloader
5 # https://github.com/ranaroussi/yfinance
6 #
7 # Copyright 2017-2019 Ran Aroussi
8 #
9 # Licensed under the Apache License, Version 2.0 (the "License");
10 # you may not use this file except in compliance with the License.
11 # You may obtain a copy of the License at
12 #
13 # http://www.apache.org/licenses/LICENSE-2.0
14 #
15 # Unless required by applicable law or agreed to in writing, software
16 # distributed under the License is distributed on an "AS IS" BASIS,
17 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 # See the License for the specific language governing permissions and
19 # limitations under the License.
20 #
21
22 from __future__ import print_function
23
24 import requests as _requests
25 import re as _re
26 import pandas as _pd
27 import numpy as _np
28 import sys as _sys
29
30 try:
31 import ujson as _json
32 except ImportError:
33 import json as _json
34
35
36 user_agent_headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
37
38
39 def is_isin(string):
40 return bool(_re.match("^([A-Z]{2})([A-Z0-9]{9})([0-9]{1})$", string))
41
42
43 def get_all_by_isin(isin, proxy=None, session=None):
44 if not(is_isin(isin)):
45 raise ValueError("Invalid ISIN number")
46
47 from .base import _BASE_URL_
48 session = session or _requests
49 url = "{}/v1/finance/search?q={}".format(_BASE_URL_, isin)
50 data = session.get(url=url, proxies=proxy, headers=user_agent_headers)
51 try:
52 data = data.json()
53 ticker = data.get('quotes', [{}])[0]
54 return {
55 'ticker': {
56 'symbol': ticker['symbol'],
57 'shortname': ticker['shortname'],
58 'longname': ticker['longname'],
59 'type': ticker['quoteType'],
60 'exchange': ticker['exchDisp'],
61 },
62 'news': data.get('news', [])
63 }
64 except Exception:
65 return {}
66
67
68 def get_ticker_by_isin(isin, proxy=None, session=None):
69 data = get_all_by_isin(isin, proxy, session)
70 return data.get('ticker', {}).get('symbol', '')
71
72
73 def get_info_by_isin(isin, proxy=None, session=None):
74 data = get_all_by_isin(isin, proxy, session)
75 return data.get('ticker', {})
76
77
78 def get_news_by_isin(isin, proxy=None, session=None):
79 data = get_all_by_isin(isin, proxy, session)
80 return data.get('news', {})
81
82
83 def empty_df(index=[]):
84 empty = _pd.DataFrame(index=index, data={
85 'Open': _np.nan, 'High': _np.nan, 'Low': _np.nan,
86 'Close': _np.nan, 'Adj Close': _np.nan, 'Volume': _np.nan})
87 empty.index.name = 'Date'
88 return empty
89
90
91 def get_html(url, proxy=None, session=None):
92 session = session or _requests
93 html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text
94 return html
95
96
97 def get_json(url, proxy=None, session=None):
98 session = session or _requests
99 html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text
100
101 if "QuoteSummaryStore" not in html:
102 html = session.get(url=url, proxies=proxy).text
103 if "QuoteSummaryStore" not in html:
104 return {}
105
106 json_str = html.split('root.App.main =')[1].split(
107 '(this)')[0].split(';\n}')[0].strip()
108 data = _json.loads(json_str)[
109 'context']['dispatcher']['stores']['QuoteSummaryStore']
110 # add data about Shares Outstanding for companies' tickers if they are available
111 try:
112 data['annualBasicAverageShares'] = _json.loads(json_str)[
113 'context']['dispatcher']['stores']['QuoteTimeSeriesStore']['timeSeries']['annualBasicAverageShares']
114 except Exception:
115 pass
116
117 # return data
118 new_data = _json.dumps(data).replace('{}', 'null')
119 new_data = _re.sub(
120 r'\{[\'|\"]raw[\'|\"]:(.*?),(.*?)\}', r'\1', new_data)
121
122 return _json.loads(new_data)
123
124
125 def camel2title(o):
126 return [_re.sub("([a-z])([A-Z])", r"\g<1> \g<2>", i).title() for i in o]
127
128
129 def auto_adjust(data):
130 df = data.copy()
131 ratio = df["Close"] / df["Adj Close"]
132 df["Adj Open"] = df["Open"] / ratio
133 df["Adj High"] = df["High"] / ratio
134 df["Adj Low"] = df["Low"] / ratio
135
136 df.drop(
137 ["Open", "High", "Low", "Close"],
138 axis=1, inplace=True)
139
140 df.rename(columns={
141 "Adj Open": "Open", "Adj High": "High",
142 "Adj Low": "Low", "Adj Close": "Close"
143 }, inplace=True)
144
145 df = df[["Open", "High", "Low", "Close", "Volume"]]
146 return df[["Open", "High", "Low", "Close", "Volume"]]
147
148
149 def back_adjust(data):
150 """ back-adjusted data to mimic true historical prices """
151
152 df = data.copy()
153 ratio = df["Adj Close"] / df["Close"]
154 df["Adj Open"] = df["Open"] * ratio
155 df["Adj High"] = df["High"] * ratio
156 df["Adj Low"] = df["Low"] * ratio
157
158 df.drop(
159 ["Open", "High", "Low", "Adj Close"],
160 axis=1, inplace=True)
161
162 df.rename(columns={
163 "Adj Open": "Open", "Adj High": "High",
164 "Adj Low": "Low"
165 }, inplace=True)
166
167 return df[["Open", "High", "Low", "Close", "Volume"]]
168
169
170 def parse_quotes(data, tz=None):
171 timestamps = data["timestamp"]
172 ohlc = data["indicators"]["quote"][0]
173 volumes = ohlc["volume"]
174 opens = ohlc["open"]
175 closes = ohlc["close"]
176 lows = ohlc["low"]
177 highs = ohlc["high"]
178
179 adjclose = closes
180 if "adjclose" in data["indicators"]:
181 adjclose = data["indicators"]["adjclose"][0]["adjclose"]
182
183 quotes = _pd.DataFrame({"Open": opens,
184 "High": highs,
185 "Low": lows,
186 "Close": closes,
187 "Adj Close": adjclose,
188 "Volume": volumes})
189
190 quotes.index = _pd.to_datetime(timestamps, unit="s")
191 quotes.sort_index(inplace=True)
192
193 if tz is not None:
194 quotes.index = quotes.index.tz_localize(tz)
195
196 return quotes
197
198
199 def parse_actions(data, tz=None):
200 dividends = _pd.DataFrame(columns=["Dividends"])
201 splits = _pd.DataFrame(columns=["Stock Splits"])
202
203 if "events" in data:
204 if "dividends" in data["events"]:
205 dividends = _pd.DataFrame(
206 data=list(data["events"]["dividends"].values()))
207 dividends.set_index("date", inplace=True)
208 dividends.index = _pd.to_datetime(dividends.index, unit="s")
209 dividends.sort_index(inplace=True)
210 if tz is not None:
211 dividends.index = dividends.index.tz_localize(tz)
212
213 dividends.columns = ["Dividends"]
214
215 if "splits" in data["events"]:
216 splits = _pd.DataFrame(
217 data=list(data["events"]["splits"].values()))
218 splits.set_index("date", inplace=True)
219 splits.index = _pd.to_datetime(splits.index, unit="s")
220 splits.sort_index(inplace=True)
221 if tz is not None:
222 splits.index = splits.index.tz_localize(tz)
223 splits["Stock Splits"] = splits["numerator"] / \
224 splits["denominator"]
225 splits = splits["Stock Splits"]
226
227 return dividends, splits
228
229
230 class ProgressBar:
231 def __init__(self, iterations, text='completed'):
232 self.text = text
233 self.iterations = iterations
234 self.prog_bar = '[]'
235 self.fill_char = '*'
236 self.width = 50
237 self.__update_amount(0)
238 self.elapsed = 1
239
240 def completed(self):
241 if self.elapsed > self.iterations:
242 self.elapsed = self.iterations
243 self.update_iteration(1)
244 print('\r' + str(self), end='')
245 _sys.stdout.flush()
246 print()
247
248 def animate(self, iteration=None):
249 if iteration is None:
250 self.elapsed += 1
251 iteration = self.elapsed
252 else:
253 self.elapsed += iteration
254
255 print('\r' + str(self), end='')
256 _sys.stdout.flush()
257 self.update_iteration()
258
259 def update_iteration(self, val=None):
260 val = val if val is not None else self.elapsed / float(self.iterations)
261 self.__update_amount(val * 100.0)
262 self.prog_bar += ' %s of %s %s' % (
263 self.elapsed, self.iterations, self.text)
264
265 def __update_amount(self, new_amount):
266 percent_done = int(round((new_amount / 100.0) * 100.0))
267 all_full = self.width - 2
268 num_hashes = int(round((percent_done / 100.0) * all_full))
269 self.prog_bar = '[' + self.fill_char * \
270 num_hashes + ' ' * (all_full - num_hashes) + ']'
271 pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
272 pct_string = '%d%%' % percent_done
273 self.prog_bar = self.prog_bar[0:pct_place] + \
274 (pct_string + self.prog_bar[pct_place + len(pct_string):])
275
276 def __str__(self):
277 return str(self.prog_bar)
278
[end of yfinance/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/yfinance/utils.py b/yfinance/utils.py
--- a/yfinance/utils.py
+++ b/yfinance/utils.py
@@ -197,8 +197,8 @@
def parse_actions(data, tz=None):
- dividends = _pd.DataFrame(columns=["Dividends"])
- splits = _pd.DataFrame(columns=["Stock Splits"])
+ dividends = _pd.DataFrame(columns=["Dividends"], index=_pd.DatetimeIndex([]))
+ splits = _pd.DataFrame(columns=["Stock Splits"], index=_pd.DatetimeIndex([]))
if "events" in data:
if "dividends" in data["events"]:
|
{"golden_diff": "diff --git a/yfinance/utils.py b/yfinance/utils.py\n--- a/yfinance/utils.py\n+++ b/yfinance/utils.py\n@@ -197,8 +197,8 @@\n \n \n def parse_actions(data, tz=None):\n- dividends = _pd.DataFrame(columns=[\"Dividends\"])\n- splits = _pd.DataFrame(columns=[\"Stock Splits\"])\n+ dividends = _pd.DataFrame(columns=[\"Dividends\"], index=_pd.DatetimeIndex([]))\n+ splits = _pd.DataFrame(columns=[\"Stock Splits\"], index=_pd.DatetimeIndex([]))\n \n if \"events\" in data:\n if \"dividends\" in data[\"events\"]:\n", "issue": "Ticker.history: history's index is not of type DateTimeIndex when dividend and split tables are empty\n**Issue**:\r\n\r\nTicker.history (line 295):\r\n> df.index = df.index.tz_localize(\"UTC\").tz_convert(data[\"chart\"][\"result\"][0][\"meta\"][\"exchangeTimezoneName\"])\r\nE AttributeError: 'Index' object has no attribute 'tz_localize'\r\n\r\n**Reason**: \r\n\r\nDuring the concatenation of a proper price table with empty dividend and split tables, the price table's index has changed from _pandas.DatetimeIndex_ to _pandas.Index_ because the empty tables had this index type.\r\n\r\n**Proposed solution**: \r\n\r\nModify _parse_actions_ (in utils.py) to initialise the dividend and split tables with : \r\n`dividends = _pd.DataFrame(columns=[\"Dividends\"], index=_pd.DatetimeIndex([]))`\r\n `splits = _pd.DataFrame(columns=[\"Stock Splits\"], index=_pd.DatetimeIndex([]))`\r\n\r\n\r\nLocal modules' versions:\r\n\r\nfinance : 0.1.69\r\npandas : 1.4.0\r\npython : 3.10\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# yfinance - market data downloader\n# https://github.com/ranaroussi/yfinance\n#\n# Copyright 2017-2019 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\nimport requests as _requests\nimport re as _re\nimport pandas as _pd\nimport numpy as _np\nimport sys as _sys\n\ntry:\n import ujson as _json\nexcept ImportError:\n import json as _json\n\n\nuser_agent_headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n\ndef is_isin(string):\n return bool(_re.match(\"^([A-Z]{2})([A-Z0-9]{9})([0-9]{1})$\", string))\n\n\ndef get_all_by_isin(isin, proxy=None, session=None):\n if not(is_isin(isin)):\n raise ValueError(\"Invalid ISIN number\")\n\n from .base import _BASE_URL_\n session = session or _requests\n url = \"{}/v1/finance/search?q={}\".format(_BASE_URL_, isin)\n data = session.get(url=url, proxies=proxy, headers=user_agent_headers)\n try:\n data = data.json()\n ticker = data.get('quotes', [{}])[0]\n return {\n 'ticker': {\n 'symbol': ticker['symbol'],\n 'shortname': ticker['shortname'],\n 'longname': ticker['longname'],\n 'type': ticker['quoteType'],\n 'exchange': ticker['exchDisp'],\n },\n 'news': data.get('news', [])\n }\n except Exception:\n return {}\n\n\ndef get_ticker_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('ticker', {}).get('symbol', '')\n\n\ndef get_info_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('ticker', {})\n\n\ndef get_news_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('news', {})\n\n\ndef empty_df(index=[]):\n empty = _pd.DataFrame(index=index, data={\n 'Open': _np.nan, 'High': _np.nan, 'Low': _np.nan,\n 'Close': _np.nan, 'Adj Close': _np.nan, 'Volume': _np.nan})\n empty.index.name = 'Date'\n return empty\n\n\ndef get_html(url, proxy=None, session=None):\n session = session or _requests\n html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text\n return html\n\n\ndef get_json(url, proxy=None, session=None):\n session = session or _requests\n html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text\n\n if \"QuoteSummaryStore\" not in html:\n html = session.get(url=url, proxies=proxy).text\n if \"QuoteSummaryStore\" not in html:\n return {}\n\n json_str = html.split('root.App.main =')[1].split(\n '(this)')[0].split(';\\n}')[0].strip()\n data = _json.loads(json_str)[\n 'context']['dispatcher']['stores']['QuoteSummaryStore']\n # add data about Shares Outstanding for companies' tickers if they are available\n try:\n data['annualBasicAverageShares'] = _json.loads(json_str)[\n 'context']['dispatcher']['stores']['QuoteTimeSeriesStore']['timeSeries']['annualBasicAverageShares']\n except Exception:\n pass\n\n # return data\n new_data = _json.dumps(data).replace('{}', 'null')\n new_data = _re.sub(\n r'\\{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)\\}', r'\\1', new_data)\n\n return _json.loads(new_data)\n\n\ndef camel2title(o):\n return [_re.sub(\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", i).title() for i in o]\n\n\ndef auto_adjust(data):\n df = data.copy()\n ratio = df[\"Close\"] / df[\"Adj Close\"]\n df[\"Adj Open\"] = df[\"Open\"] / ratio\n df[\"Adj High\"] = df[\"High\"] / ratio\n df[\"Adj Low\"] = df[\"Low\"] / ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\", \"Adj Close\": \"Close\"\n }, inplace=True)\n\n df = df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef back_adjust(data):\n \"\"\" back-adjusted data to mimic true historical prices \"\"\"\n\n df = data.copy()\n ratio = df[\"Adj Close\"] / df[\"Close\"]\n df[\"Adj Open\"] = df[\"Open\"] * ratio\n df[\"Adj High\"] = df[\"High\"] * ratio\n df[\"Adj Low\"] = df[\"Low\"] * ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Adj Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\"\n }, inplace=True)\n\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef parse_quotes(data, tz=None):\n timestamps = data[\"timestamp\"]\n ohlc = data[\"indicators\"][\"quote\"][0]\n volumes = ohlc[\"volume\"]\n opens = ohlc[\"open\"]\n closes = ohlc[\"close\"]\n lows = ohlc[\"low\"]\n highs = ohlc[\"high\"]\n\n adjclose = closes\n if \"adjclose\" in data[\"indicators\"]:\n adjclose = data[\"indicators\"][\"adjclose\"][0][\"adjclose\"]\n\n quotes = _pd.DataFrame({\"Open\": opens,\n \"High\": highs,\n \"Low\": lows,\n \"Close\": closes,\n \"Adj Close\": adjclose,\n \"Volume\": volumes})\n\n quotes.index = _pd.to_datetime(timestamps, unit=\"s\")\n quotes.sort_index(inplace=True)\n\n if tz is not None:\n quotes.index = quotes.index.tz_localize(tz)\n\n return quotes\n\n\ndef parse_actions(data, tz=None):\n dividends = _pd.DataFrame(columns=[\"Dividends\"])\n splits = _pd.DataFrame(columns=[\"Stock Splits\"])\n\n if \"events\" in data:\n if \"dividends\" in data[\"events\"]:\n dividends = _pd.DataFrame(\n data=list(data[\"events\"][\"dividends\"].values()))\n dividends.set_index(\"date\", inplace=True)\n dividends.index = _pd.to_datetime(dividends.index, unit=\"s\")\n dividends.sort_index(inplace=True)\n if tz is not None:\n dividends.index = dividends.index.tz_localize(tz)\n\n dividends.columns = [\"Dividends\"]\n\n if \"splits\" in data[\"events\"]:\n splits = _pd.DataFrame(\n data=list(data[\"events\"][\"splits\"].values()))\n splits.set_index(\"date\", inplace=True)\n splits.index = _pd.to_datetime(splits.index, unit=\"s\")\n splits.sort_index(inplace=True)\n if tz is not None:\n splits.index = splits.index.tz_localize(tz)\n splits[\"Stock Splits\"] = splits[\"numerator\"] / \\\n splits[\"denominator\"]\n splits = splits[\"Stock Splits\"]\n\n return dividends, splits\n\n\nclass ProgressBar:\n def __init__(self, iterations, text='completed'):\n self.text = text\n self.iterations = iterations\n self.prog_bar = '[]'\n self.fill_char = '*'\n self.width = 50\n self.__update_amount(0)\n self.elapsed = 1\n\n def completed(self):\n if self.elapsed > self.iterations:\n self.elapsed = self.iterations\n self.update_iteration(1)\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n print()\n\n def animate(self, iteration=None):\n if iteration is None:\n self.elapsed += 1\n iteration = self.elapsed\n else:\n self.elapsed += iteration\n\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n self.update_iteration()\n\n def update_iteration(self, val=None):\n val = val if val is not None else self.elapsed / float(self.iterations)\n self.__update_amount(val * 100.0)\n self.prog_bar += ' %s of %s %s' % (\n self.elapsed, self.iterations, self.text)\n\n def __update_amount(self, new_amount):\n percent_done = int(round((new_amount / 100.0) * 100.0))\n all_full = self.width - 2\n num_hashes = int(round((percent_done / 100.0) * all_full))\n self.prog_bar = '[' + self.fill_char * \\\n num_hashes + ' ' * (all_full - num_hashes) + ']'\n pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))\n pct_string = '%d%%' % percent_done\n self.prog_bar = self.prog_bar[0:pct_place] + \\\n (pct_string + self.prog_bar[pct_place + len(pct_string):])\n\n def __str__(self):\n return str(self.prog_bar)\n", "path": "yfinance/utils.py"}]}
| 3,816 | 140 |
gh_patches_debug_38882
|
rasdani/github-patches
|
git_diff
|
modin-project__modin-2851
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix defensive assertions for Ray Client users
Related to https://github.com/ray-project/ray/issues/14042
For now, we will check for both types.
</issue>
<code>
[start of modin/engines/ray/pandas_on_ray/frame/partition.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 import pandas
15
16 from modin.data_management.utils import length_fn_pandas, width_fn_pandas
17 from modin.engines.base.frame.partition import BaseFramePartition
18 from modin.engines.ray.utils import handle_ray_task_error
19
20 import ray
21 from ray.worker import RayTaskError
22 from ray.services import get_node_ip_address
23
24
25 class PandasOnRayFramePartition(BaseFramePartition):
26 def __init__(self, object_id, length=None, width=None, ip=None, call_queue=None):
27 assert type(object_id) is ray.ObjectRef
28
29 self.oid = object_id
30 if call_queue is None:
31 call_queue = []
32 self.call_queue = call_queue
33 self._length_cache = length
34 self._width_cache = width
35 self._ip_cache = ip
36
37 def get(self):
38 """Gets the object out of the plasma store.
39
40 Returns:
41 The object from the plasma store.
42 """
43 if len(self.call_queue):
44 self.drain_call_queue()
45 try:
46 return ray.get(self.oid)
47 except RayTaskError as e:
48 handle_ray_task_error(e)
49
50 def apply(self, func, **kwargs):
51 """Apply a function to the object stored in this partition.
52
53 Note: It does not matter if func is callable or an ObjectRef. Ray will
54 handle it correctly either way. The keyword arguments are sent as a
55 dictionary.
56
57 Args:
58 func: The function to apply.
59
60 Returns:
61 A RayRemotePartition object.
62 """
63 oid = self.oid
64 call_queue = self.call_queue + [(func, kwargs)]
65 result, length, width, ip = deploy_ray_func.remote(call_queue, oid)
66 return PandasOnRayFramePartition(result, length, width, ip)
67
68 def add_to_apply_calls(self, func, **kwargs):
69 return PandasOnRayFramePartition(
70 self.oid, call_queue=self.call_queue + [(func, kwargs)]
71 )
72
73 def drain_call_queue(self):
74 if len(self.call_queue) == 0:
75 return
76 oid = self.oid
77 call_queue = self.call_queue
78 (
79 self.oid,
80 self._length_cache,
81 self._width_cache,
82 self._ip_cache,
83 ) = deploy_ray_func.remote(call_queue, oid)
84 self.call_queue = []
85
86 def wait(self):
87 self.drain_call_queue()
88 try:
89 ray.wait([self.oid])
90 except RayTaskError as e:
91 handle_ray_task_error(e)
92
93 def __copy__(self):
94 return PandasOnRayFramePartition(
95 self.oid,
96 length=self._length_cache,
97 width=self._width_cache,
98 ip=self._ip_cache,
99 call_queue=self.call_queue,
100 )
101
102 def to_pandas(self):
103 """Convert the object stored in this partition to a Pandas DataFrame.
104
105 Returns:
106 A Pandas DataFrame.
107 """
108 dataframe = self.get()
109 assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series
110 return dataframe
111
112 def to_numpy(self, **kwargs):
113 """
114 Convert the object stored in this partition to a NumPy array.
115
116 Returns
117 -------
118 A NumPy array.
119 """
120 return self.apply(lambda df, **kwargs: df.to_numpy(**kwargs)).get()
121
122 def mask(self, row_indices, col_indices):
123 if (
124 (isinstance(row_indices, slice) and row_indices == slice(None))
125 or (
126 not isinstance(row_indices, slice)
127 and self._length_cache is not None
128 and len(row_indices) == self._length_cache
129 )
130 ) and (
131 (isinstance(col_indices, slice) and col_indices == slice(None))
132 or (
133 not isinstance(col_indices, slice)
134 and self._width_cache is not None
135 and len(col_indices) == self._width_cache
136 )
137 ):
138 return self.__copy__()
139
140 new_obj = self.add_to_apply_calls(
141 lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
142 )
143 if not isinstance(row_indices, slice):
144 new_obj._length_cache = len(row_indices)
145 if not isinstance(col_indices, slice):
146 new_obj._width_cache = len(col_indices)
147 return new_obj
148
149 @classmethod
150 def put(cls, obj):
151 """Put an object in the Plasma store and wrap it in this object.
152
153 Args:
154 obj: The object to be put.
155
156 Returns:
157 A `RayRemotePartition` object.
158 """
159 return PandasOnRayFramePartition(ray.put(obj), len(obj.index), len(obj.columns))
160
161 @classmethod
162 def preprocess_func(cls, func):
163 """Put a callable function into the plasma store for use in `apply`.
164
165 Args:
166 func: The function to preprocess.
167
168 Returns:
169 A ray.ObjectRef.
170 """
171 return ray.put(func)
172
173 def length(self):
174 if self._length_cache is None:
175 if len(self.call_queue):
176 self.drain_call_queue()
177 else:
178 self._length_cache, self._width_cache = get_index_and_columns.remote(
179 self.oid
180 )
181 if isinstance(self._length_cache, ray.ObjectRef):
182 try:
183 self._length_cache = ray.get(self._length_cache)
184 except RayTaskError as e:
185 handle_ray_task_error(e)
186 return self._length_cache
187
188 def width(self):
189 if self._width_cache is None:
190 if len(self.call_queue):
191 self.drain_call_queue()
192 else:
193 self._length_cache, self._width_cache = get_index_and_columns.remote(
194 self.oid
195 )
196 if isinstance(self._width_cache, ray.ObjectRef):
197 try:
198 self._width_cache = ray.get(self._width_cache)
199 except RayTaskError as e:
200 handle_ray_task_error(e)
201 return self._width_cache
202
203 def ip(self):
204 if self._ip_cache is None:
205 if len(self.call_queue):
206 self.drain_call_queue()
207 else:
208 self._ip_cache = self.apply(lambda df: df)._ip_cache
209 if isinstance(self._ip_cache, ray.ObjectID):
210 try:
211 self._ip_cache = ray.get(self._ip_cache)
212 except RayTaskError as e:
213 handle_ray_task_error(e)
214 return self._ip_cache
215
216 @classmethod
217 def length_extraction_fn(cls):
218 return length_fn_pandas
219
220 @classmethod
221 def width_extraction_fn(cls):
222 return width_fn_pandas
223
224 @classmethod
225 def empty(cls):
226 return cls.put(pandas.DataFrame())
227
228
229 @ray.remote(num_returns=2)
230 def get_index_and_columns(df):
231 return len(df.index), len(df.columns)
232
233
234 @ray.remote(num_returns=4)
235 def deploy_ray_func(call_queue, partition): # pragma: no cover
236 def deserialize(obj):
237 if isinstance(obj, ray.ObjectRef):
238 return ray.get(obj)
239 return obj
240
241 if len(call_queue) > 1:
242 for func, kwargs in call_queue[:-1]:
243 func = deserialize(func)
244 kwargs = deserialize(kwargs)
245 try:
246 partition = func(partition, **kwargs)
247 except ValueError:
248 partition = func(partition.copy(), **kwargs)
249 func, kwargs = call_queue[-1]
250 func = deserialize(func)
251 kwargs = deserialize(kwargs)
252 try:
253 result = func(partition, **kwargs)
254 # Sometimes Arrow forces us to make a copy of an object before we operate on it. We
255 # don't want the error to propagate to the user, and we want to avoid copying unless
256 # we absolutely have to.
257 except ValueError:
258 result = func(partition.copy(), **kwargs)
259 return (
260 result,
261 len(result) if hasattr(result, "__len__") else 0,
262 len(result.columns) if hasattr(result, "columns") else 0,
263 get_node_ip_address(),
264 )
265
[end of modin/engines/ray/pandas_on_ray/frame/partition.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/modin/engines/ray/pandas_on_ray/frame/partition.py b/modin/engines/ray/pandas_on_ray/frame/partition.py
--- a/modin/engines/ray/pandas_on_ray/frame/partition.py
+++ b/modin/engines/ray/pandas_on_ray/frame/partition.py
@@ -20,11 +20,18 @@
import ray
from ray.worker import RayTaskError
from ray.services import get_node_ip_address
+from packaging import version
+
+ObjectIDType = ray.ObjectRef
+if version.parse(ray.__version__) >= version.parse("1.2.0"):
+ from ray.util.client.common import ClientObjectRef
+
+ ObjectIDType = (ray.ObjectRef, ClientObjectRef)
class PandasOnRayFramePartition(BaseFramePartition):
def __init__(self, object_id, length=None, width=None, ip=None, call_queue=None):
- assert type(object_id) is ray.ObjectRef
+ assert isinstance(object_id, ObjectIDType)
self.oid = object_id
if call_queue is None:
@@ -178,7 +185,7 @@
self._length_cache, self._width_cache = get_index_and_columns.remote(
self.oid
)
- if isinstance(self._length_cache, ray.ObjectRef):
+ if isinstance(self._length_cache, ObjectIDType):
try:
self._length_cache = ray.get(self._length_cache)
except RayTaskError as e:
@@ -193,7 +200,7 @@
self._length_cache, self._width_cache = get_index_and_columns.remote(
self.oid
)
- if isinstance(self._width_cache, ray.ObjectRef):
+ if isinstance(self._width_cache, ObjectIDType):
try:
self._width_cache = ray.get(self._width_cache)
except RayTaskError as e:
@@ -206,7 +213,7 @@
self.drain_call_queue()
else:
self._ip_cache = self.apply(lambda df: df)._ip_cache
- if isinstance(self._ip_cache, ray.ObjectID):
+ if isinstance(self._ip_cache, ObjectIDType):
try:
self._ip_cache = ray.get(self._ip_cache)
except RayTaskError as e:
@@ -234,7 +241,7 @@
@ray.remote(num_returns=4)
def deploy_ray_func(call_queue, partition): # pragma: no cover
def deserialize(obj):
- if isinstance(obj, ray.ObjectRef):
+ if isinstance(obj, ObjectIDType):
return ray.get(obj)
return obj
|
{"golden_diff": "diff --git a/modin/engines/ray/pandas_on_ray/frame/partition.py b/modin/engines/ray/pandas_on_ray/frame/partition.py\n--- a/modin/engines/ray/pandas_on_ray/frame/partition.py\n+++ b/modin/engines/ray/pandas_on_ray/frame/partition.py\n@@ -20,11 +20,18 @@\n import ray\n from ray.worker import RayTaskError\n from ray.services import get_node_ip_address\n+from packaging import version\n+\n+ObjectIDType = ray.ObjectRef\n+if version.parse(ray.__version__) >= version.parse(\"1.2.0\"):\n+ from ray.util.client.common import ClientObjectRef\n+\n+ ObjectIDType = (ray.ObjectRef, ClientObjectRef)\n \n \n class PandasOnRayFramePartition(BaseFramePartition):\n def __init__(self, object_id, length=None, width=None, ip=None, call_queue=None):\n- assert type(object_id) is ray.ObjectRef\n+ assert isinstance(object_id, ObjectIDType)\n \n self.oid = object_id\n if call_queue is None:\n@@ -178,7 +185,7 @@\n self._length_cache, self._width_cache = get_index_and_columns.remote(\n self.oid\n )\n- if isinstance(self._length_cache, ray.ObjectRef):\n+ if isinstance(self._length_cache, ObjectIDType):\n try:\n self._length_cache = ray.get(self._length_cache)\n except RayTaskError as e:\n@@ -193,7 +200,7 @@\n self._length_cache, self._width_cache = get_index_and_columns.remote(\n self.oid\n )\n- if isinstance(self._width_cache, ray.ObjectRef):\n+ if isinstance(self._width_cache, ObjectIDType):\n try:\n self._width_cache = ray.get(self._width_cache)\n except RayTaskError as e:\n@@ -206,7 +213,7 @@\n self.drain_call_queue()\n else:\n self._ip_cache = self.apply(lambda df: df)._ip_cache\n- if isinstance(self._ip_cache, ray.ObjectID):\n+ if isinstance(self._ip_cache, ObjectIDType):\n try:\n self._ip_cache = ray.get(self._ip_cache)\n except RayTaskError as e:\n@@ -234,7 +241,7 @@\n @ray.remote(num_returns=4)\n def deploy_ray_func(call_queue, partition): # pragma: no cover\n def deserialize(obj):\n- if isinstance(obj, ray.ObjectRef):\n+ if isinstance(obj, ObjectIDType):\n return ray.get(obj)\n return obj\n", "issue": "Fix defensive assertions for Ray Client users\nRelated to https://github.com/ray-project/ray/issues/14042\r\n\r\nFor now, we will check for both types.\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport pandas\n\nfrom modin.data_management.utils import length_fn_pandas, width_fn_pandas\nfrom modin.engines.base.frame.partition import BaseFramePartition\nfrom modin.engines.ray.utils import handle_ray_task_error\n\nimport ray\nfrom ray.worker import RayTaskError\nfrom ray.services import get_node_ip_address\n\n\nclass PandasOnRayFramePartition(BaseFramePartition):\n def __init__(self, object_id, length=None, width=None, ip=None, call_queue=None):\n assert type(object_id) is ray.ObjectRef\n\n self.oid = object_id\n if call_queue is None:\n call_queue = []\n self.call_queue = call_queue\n self._length_cache = length\n self._width_cache = width\n self._ip_cache = ip\n\n def get(self):\n \"\"\"Gets the object out of the plasma store.\n\n Returns:\n The object from the plasma store.\n \"\"\"\n if len(self.call_queue):\n self.drain_call_queue()\n try:\n return ray.get(self.oid)\n except RayTaskError as e:\n handle_ray_task_error(e)\n\n def apply(self, func, **kwargs):\n \"\"\"Apply a function to the object stored in this partition.\n\n Note: It does not matter if func is callable or an ObjectRef. Ray will\n handle it correctly either way. The keyword arguments are sent as a\n dictionary.\n\n Args:\n func: The function to apply.\n\n Returns:\n A RayRemotePartition object.\n \"\"\"\n oid = self.oid\n call_queue = self.call_queue + [(func, kwargs)]\n result, length, width, ip = deploy_ray_func.remote(call_queue, oid)\n return PandasOnRayFramePartition(result, length, width, ip)\n\n def add_to_apply_calls(self, func, **kwargs):\n return PandasOnRayFramePartition(\n self.oid, call_queue=self.call_queue + [(func, kwargs)]\n )\n\n def drain_call_queue(self):\n if len(self.call_queue) == 0:\n return\n oid = self.oid\n call_queue = self.call_queue\n (\n self.oid,\n self._length_cache,\n self._width_cache,\n self._ip_cache,\n ) = deploy_ray_func.remote(call_queue, oid)\n self.call_queue = []\n\n def wait(self):\n self.drain_call_queue()\n try:\n ray.wait([self.oid])\n except RayTaskError as e:\n handle_ray_task_error(e)\n\n def __copy__(self):\n return PandasOnRayFramePartition(\n self.oid,\n length=self._length_cache,\n width=self._width_cache,\n ip=self._ip_cache,\n call_queue=self.call_queue,\n )\n\n def to_pandas(self):\n \"\"\"Convert the object stored in this partition to a Pandas DataFrame.\n\n Returns:\n A Pandas DataFrame.\n \"\"\"\n dataframe = self.get()\n assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series\n return dataframe\n\n def to_numpy(self, **kwargs):\n \"\"\"\n Convert the object stored in this partition to a NumPy array.\n\n Returns\n -------\n A NumPy array.\n \"\"\"\n return self.apply(lambda df, **kwargs: df.to_numpy(**kwargs)).get()\n\n def mask(self, row_indices, col_indices):\n if (\n (isinstance(row_indices, slice) and row_indices == slice(None))\n or (\n not isinstance(row_indices, slice)\n and self._length_cache is not None\n and len(row_indices) == self._length_cache\n )\n ) and (\n (isinstance(col_indices, slice) and col_indices == slice(None))\n or (\n not isinstance(col_indices, slice)\n and self._width_cache is not None\n and len(col_indices) == self._width_cache\n )\n ):\n return self.__copy__()\n\n new_obj = self.add_to_apply_calls(\n lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])\n )\n if not isinstance(row_indices, slice):\n new_obj._length_cache = len(row_indices)\n if not isinstance(col_indices, slice):\n new_obj._width_cache = len(col_indices)\n return new_obj\n\n @classmethod\n def put(cls, obj):\n \"\"\"Put an object in the Plasma store and wrap it in this object.\n\n Args:\n obj: The object to be put.\n\n Returns:\n A `RayRemotePartition` object.\n \"\"\"\n return PandasOnRayFramePartition(ray.put(obj), len(obj.index), len(obj.columns))\n\n @classmethod\n def preprocess_func(cls, func):\n \"\"\"Put a callable function into the plasma store for use in `apply`.\n\n Args:\n func: The function to preprocess.\n\n Returns:\n A ray.ObjectRef.\n \"\"\"\n return ray.put(func)\n\n def length(self):\n if self._length_cache is None:\n if len(self.call_queue):\n self.drain_call_queue()\n else:\n self._length_cache, self._width_cache = get_index_and_columns.remote(\n self.oid\n )\n if isinstance(self._length_cache, ray.ObjectRef):\n try:\n self._length_cache = ray.get(self._length_cache)\n except RayTaskError as e:\n handle_ray_task_error(e)\n return self._length_cache\n\n def width(self):\n if self._width_cache is None:\n if len(self.call_queue):\n self.drain_call_queue()\n else:\n self._length_cache, self._width_cache = get_index_and_columns.remote(\n self.oid\n )\n if isinstance(self._width_cache, ray.ObjectRef):\n try:\n self._width_cache = ray.get(self._width_cache)\n except RayTaskError as e:\n handle_ray_task_error(e)\n return self._width_cache\n\n def ip(self):\n if self._ip_cache is None:\n if len(self.call_queue):\n self.drain_call_queue()\n else:\n self._ip_cache = self.apply(lambda df: df)._ip_cache\n if isinstance(self._ip_cache, ray.ObjectID):\n try:\n self._ip_cache = ray.get(self._ip_cache)\n except RayTaskError as e:\n handle_ray_task_error(e)\n return self._ip_cache\n\n @classmethod\n def length_extraction_fn(cls):\n return length_fn_pandas\n\n @classmethod\n def width_extraction_fn(cls):\n return width_fn_pandas\n\n @classmethod\n def empty(cls):\n return cls.put(pandas.DataFrame())\n\n\[email protected](num_returns=2)\ndef get_index_and_columns(df):\n return len(df.index), len(df.columns)\n\n\[email protected](num_returns=4)\ndef deploy_ray_func(call_queue, partition): # pragma: no cover\n def deserialize(obj):\n if isinstance(obj, ray.ObjectRef):\n return ray.get(obj)\n return obj\n\n if len(call_queue) > 1:\n for func, kwargs in call_queue[:-1]:\n func = deserialize(func)\n kwargs = deserialize(kwargs)\n try:\n partition = func(partition, **kwargs)\n except ValueError:\n partition = func(partition.copy(), **kwargs)\n func, kwargs = call_queue[-1]\n func = deserialize(func)\n kwargs = deserialize(kwargs)\n try:\n result = func(partition, **kwargs)\n # Sometimes Arrow forces us to make a copy of an object before we operate on it. We\n # don't want the error to propagate to the user, and we want to avoid copying unless\n # we absolutely have to.\n except ValueError:\n result = func(partition.copy(), **kwargs)\n return (\n result,\n len(result) if hasattr(result, \"__len__\") else 0,\n len(result.columns) if hasattr(result, \"columns\") else 0,\n get_node_ip_address(),\n )\n", "path": "modin/engines/ray/pandas_on_ray/frame/partition.py"}]}
| 3,144 | 581 |
gh_patches_debug_3192
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-2359
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update Lunr dependency to 0.5.9
Please consider updating Lunr to the latest version, [0.5.9](https://github.com/yeraydiazdiaz/lunr.py/releases/tag/0.5.9). This avoids the following error without he need to downgrade Lunr:
```python
Traceback (most recent call last):
File "/usr/lib/python3.9/site-packages/pkg_resources/__init__.py", line 568, in _build_master
ws.require(__requires__)
File "/usr/lib/python3.9/site-packages/pkg_resources/__init__.py", line 886, in require
needed = self.resolve(parse_requirements(requirements))
File "/usr/lib/python3.9/site-packages/pkg_resources/__init__.py", line 777, in resolve
raise VersionConflict(dist, req).with_context(dependent_req)
pkg_resources.ContextualVersionConflict: (lunr 0.5.9 (/usr/lib/python3.9/site-packages), Requirement.parse('lunr[languages]==0.5.8'), {'mkdocs'})
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/bin/mkdocs", line 33, in <module>
sys.exit(load_entry_point('mkdocs==1.1.2', 'console_scripts', 'mkdocs')())
File "/bin/mkdocs", line 25, in importlib_load_entry_point
return next(matches).load()
File "/usr/lib/python3.9/importlib/metadata.py", line 77, in load
module = import_module(match.group('module'))
File "/usr/lib/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 790, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/usr/lib/python3.9/site-packages/mkdocs/__main__.py", line 14, in <module>
from mkdocs import utils # noqa: E402
File "/usr/lib/python3.9/site-packages/mkdocs/utils/__init__.py", line 11, in <module>
import pkg_resources
File "/usr/lib/python3.9/site-packages/pkg_resources/__init__.py", line 3243, in <module>
def _initialize_master_working_set():
File "/usr/lib/python3.9/site-packages/pkg_resources/__init__.py", line 3226, in _call_aside
f(*args, **kwargs)
File "/usr/lib/python3.9/site-packages/pkg_resources/__init__.py", line 3255, in _initialize_master_working_set
working_set = WorkingSet._build_master()
File "/usr/lib/python3.9/site-packages/pkg_resources/__init__.py", line 570, in _build_master
return cls._build_from_requirements(__requires__)
File "/usr/lib/python3.9/site-packages/pkg_resources/__init__.py", line 583, in _build_from_requirements
dists = ws.resolve(reqs, Environment())
File "/usr/lib/python3.9/site-packages/pkg_resources/__init__.py", line 772, in resolve
raise DistributionNotFound(req, requirers)
pkg_resources.DistributionNotFound: The 'lunr[languages]==0.5.8' distribution was not found and is required by mkdocs
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import setup
4 import re
5 import os
6 import sys
7
8
9 with open('README.md') as f:
10 long_description = f.read()
11
12
13 def get_version(package):
14 """Return package version as listed in `__version__` in `init.py`."""
15 init_py = open(os.path.join(package, '__init__.py')).read()
16 return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
17
18
19 def get_packages(package):
20 """Return root package and all sub-packages."""
21 return [dirpath
22 for dirpath, dirnames, filenames in os.walk(package)
23 if os.path.exists(os.path.join(dirpath, '__init__.py'))]
24
25
26 if sys.argv[-1] == 'publish':
27 if os.system("pip freeze | grep wheel"):
28 print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
29 sys.exit()
30 if os.system("pip freeze | grep twine"):
31 print("twine not installed.\nUse `pip install twine`.\nExiting.")
32 sys.exit()
33 os.system("python setup.py sdist bdist_wheel")
34 os.system("twine upload dist/*")
35 print("You probably want to also tag the version now:")
36 print(" git tag -a {0} -m 'version {0}'".format(get_version("mkdocs")))
37 print(" git push --tags")
38 sys.exit()
39
40
41 setup(
42 name="mkdocs",
43 version=get_version("mkdocs"),
44 url='https://www.mkdocs.org',
45 license='BSD',
46 description='Project documentation with Markdown.',
47 long_description=long_description,
48 long_description_content_type='text/markdown',
49 author='Tom Christie',
50 author_email='[email protected]', # SEE NOTE BELOW (*)
51 packages=get_packages("mkdocs"),
52 include_package_data=True,
53 install_requires=[
54 'click>=3.3',
55 'Jinja2>=2.10.1',
56 'livereload>=2.5.1',
57 'lunr[languages]==0.5.8', # must support lunr.js version included in search
58 'Markdown>=3.2.1',
59 'PyYAML>=3.10',
60 'tornado>=5.0',
61 'ghp-import>=1.0',
62 'pyyaml_env_tag>=0.1',
63 'importlib_metadata>=3.10',
64 'packaging>=20.5'
65 ],
66 python_requires='>=3.6',
67 entry_points={
68 'console_scripts': [
69 'mkdocs = mkdocs.__main__:cli',
70 ],
71 'mkdocs.themes': [
72 'mkdocs = mkdocs.themes.mkdocs',
73 'readthedocs = mkdocs.themes.readthedocs',
74 ],
75 'mkdocs.plugins': [
76 'search = mkdocs.contrib.search:SearchPlugin',
77 ],
78 },
79 classifiers=[
80 'Development Status :: 5 - Production/Stable',
81 'Environment :: Console',
82 'Environment :: Web Environment',
83 'Intended Audience :: Developers',
84 'License :: OSI Approved :: BSD License',
85 'Operating System :: OS Independent',
86 'Programming Language :: Python',
87 'Programming Language :: Python :: 3',
88 'Programming Language :: Python :: 3.6',
89 'Programming Language :: Python :: 3.7',
90 'Programming Language :: Python :: 3.8',
91 'Programming Language :: Python :: 3.9',
92 'Programming Language :: Python :: 3 :: Only',
93 "Programming Language :: Python :: Implementation :: CPython",
94 "Programming Language :: Python :: Implementation :: PyPy",
95 'Topic :: Documentation',
96 'Topic :: Text Processing',
97 ],
98 zip_safe=False,
99 )
100
101 # (*) Please direct queries to the discussion group:
102 # https://groups.google.com/forum/#!forum/mkdocs
103
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -54,7 +54,7 @@
'click>=3.3',
'Jinja2>=2.10.1',
'livereload>=2.5.1',
- 'lunr[languages]==0.5.8', # must support lunr.js version included in search
+ 'lunr[languages]==0.5.9', # must support lunr.js version included in search
'Markdown>=3.2.1',
'PyYAML>=3.10',
'tornado>=5.0',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -54,7 +54,7 @@\n 'click>=3.3',\n 'Jinja2>=2.10.1',\n 'livereload>=2.5.1',\n- 'lunr[languages]==0.5.8', # must support lunr.js version included in search\n+ 'lunr[languages]==0.5.9', # must support lunr.js version included in search\n 'Markdown>=3.2.1',\n 'PyYAML>=3.10',\n 'tornado>=5.0',\n", "issue": "Update Lunr dependency to 0.5.9\nPlease consider updating Lunr to the latest version, [0.5.9](https://github.com/yeraydiazdiaz/lunr.py/releases/tag/0.5.9). This avoids the following error without he need to downgrade Lunr:\r\n\r\n```python\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.9/site-packages/pkg_resources/__init__.py\", line 568, in _build_master\r\n ws.require(__requires__)\r\n File \"/usr/lib/python3.9/site-packages/pkg_resources/__init__.py\", line 886, in require\r\n needed = self.resolve(parse_requirements(requirements))\r\n File \"/usr/lib/python3.9/site-packages/pkg_resources/__init__.py\", line 777, in resolve\r\n raise VersionConflict(dist, req).with_context(dependent_req)\r\npkg_resources.ContextualVersionConflict: (lunr 0.5.9 (/usr/lib/python3.9/site-packages), Requirement.parse('lunr[languages]==0.5.8'), {'mkdocs'})\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/bin/mkdocs\", line 33, in <module>\r\n sys.exit(load_entry_point('mkdocs==1.1.2', 'console_scripts', 'mkdocs')())\r\n File \"/bin/mkdocs\", line 25, in importlib_load_entry_point\r\n return next(matches).load()\r\n File \"/usr/lib/python3.9/importlib/metadata.py\", line 77, in load\r\n module = import_module(match.group('module'))\r\n File \"/usr/lib/python3.9/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 1030, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 1007, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 986, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 680, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 790, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 228, in _call_with_frames_removed\r\n File \"/usr/lib/python3.9/site-packages/mkdocs/__main__.py\", line 14, in <module>\r\n from mkdocs import utils # noqa: E402\r\n File \"/usr/lib/python3.9/site-packages/mkdocs/utils/__init__.py\", line 11, in <module>\r\n import pkg_resources\r\n File \"/usr/lib/python3.9/site-packages/pkg_resources/__init__.py\", line 3243, in <module>\r\n def _initialize_master_working_set():\r\n File \"/usr/lib/python3.9/site-packages/pkg_resources/__init__.py\", line 3226, in _call_aside\r\n f(*args, **kwargs)\r\n File \"/usr/lib/python3.9/site-packages/pkg_resources/__init__.py\", line 3255, in _initialize_master_working_set\r\n working_set = WorkingSet._build_master()\r\n File \"/usr/lib/python3.9/site-packages/pkg_resources/__init__.py\", line 570, in _build_master\r\n return cls._build_from_requirements(__requires__)\r\n File \"/usr/lib/python3.9/site-packages/pkg_resources/__init__.py\", line 583, in _build_from_requirements\r\n dists = ws.resolve(reqs, Environment())\r\n File \"/usr/lib/python3.9/site-packages/pkg_resources/__init__.py\", line 772, in resolve\r\n raise DistributionNotFound(req, requirers)\r\npkg_resources.DistributionNotFound: The 'lunr[languages]==0.5.8' distribution was not found and is required by mkdocs\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n\nwith open('README.md') as f:\n long_description = f.read()\n\n\ndef get_version(package):\n \"\"\"Return package version as listed in `__version__` in `init.py`.\"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"Return root package and all sub-packages.\"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nif sys.argv[-1] == 'publish':\n if os.system(\"pip freeze | grep wheel\"):\n print(\"wheel not installed.\\nUse `pip install wheel`.\\nExiting.\")\n sys.exit()\n if os.system(\"pip freeze | grep twine\"):\n print(\"twine not installed.\\nUse `pip install twine`.\\nExiting.\")\n sys.exit()\n os.system(\"python setup.py sdist bdist_wheel\")\n os.system(\"twine upload dist/*\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(get_version(\"mkdocs\")))\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=\"mkdocs\",\n version=get_version(\"mkdocs\"),\n url='https://www.mkdocs.org',\n license='BSD',\n description='Project documentation with Markdown.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='Tom Christie',\n author_email='[email protected]', # SEE NOTE BELOW (*)\n packages=get_packages(\"mkdocs\"),\n include_package_data=True,\n install_requires=[\n 'click>=3.3',\n 'Jinja2>=2.10.1',\n 'livereload>=2.5.1',\n 'lunr[languages]==0.5.8', # must support lunr.js version included in search\n 'Markdown>=3.2.1',\n 'PyYAML>=3.10',\n 'tornado>=5.0',\n 'ghp-import>=1.0',\n 'pyyaml_env_tag>=0.1',\n 'importlib_metadata>=3.10',\n 'packaging>=20.5'\n ],\n python_requires='>=3.6',\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.__main__:cli',\n ],\n 'mkdocs.themes': [\n 'mkdocs = mkdocs.themes.mkdocs',\n 'readthedocs = mkdocs.themes.readthedocs',\n ],\n 'mkdocs.plugins': [\n 'search = mkdocs.contrib.search:SearchPlugin',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3 :: Only',\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ],\n zip_safe=False,\n)\n\n# (*) Please direct queries to the discussion group:\n# https://groups.google.com/forum/#!forum/mkdocs\n", "path": "setup.py"}]}
| 2,455 | 149 |
gh_patches_debug_62454
|
rasdani/github-patches
|
git_diff
|
apache__tvm-6881
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Do not show meta data when printing TIR
Recently I found TVM will print meta when I do
```python
print(tvm.lower(s, [A, B, C], simple_mode=True))
```
Here is an example output in the tutorial website (https://tvm.apache.org/docs/tutorials/language/schedule_primitives.html)
```
#[version = "0.0.5"]
primfn(A_1: handle, B_1: handle, C_1: handle) -> ()
attr = {"global_symbol": "main", "tir.noalias": True}
buffers = {C: Buffer(C_2: Pointer(float32), float32, [m: int32, n: int32], [stride: int32, stride_1: int32], type="auto"),
B: Buffer(B_2: Pointer(float32), float32, [m, n], [stride_2: int32, stride_3: int32], type="auto"),
A: Buffer(A_2: Pointer(float32), float32, [m, n], [stride_4: int32, stride_5: int32], type="auto")}
buffer_map = {A_1: A, B_1: B, C_1: C} {
for (i: int32, 0, m) {
for (j: int32, 0, n) {
C_2[((i*stride) + (j*stride_1))] = ((float32*)A_2[((i*stride_4) + (j*stride_5))]*(float32*)B_2[((i*stride_2) + (j*stride_3))])
}
}
}
#[metadata]
{
"root": 1,
"nodes": [
{
"type_key": ""
},
{
"type_key": "Map",
"keys": [
"IntImm"
],
"data": [2]
},
{
"type_key": "Array",
"data": [3]
},
{
"type_key": "IntImm",
"attrs": {
"dtype": "bool",
"value": "1"
}
}
],
"b64ndarrays": [],
"attrs": {"tvm_version": "0.8.dev0"}
}
```
I found this very annoying. Why do we print it? Can we disable it for this use case by default?
cc @tqchen @spectrometerHBH @jroesch @junrushao1994
</issue>
<code>
[start of python/tvm/ir/module.py]
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 """IRModule that holds the functions and type definitions."""
18 from tvm._ffi.base import string_types
19 import tvm._ffi
20
21 from .base import Node
22 from . import expr as _expr
23 from . import type as _ty
24 from . import _ffi_api
25
26
27 @tvm._ffi.register_object("IRModule")
28 class IRModule(Node):
29 """IRModule that holds functions and type definitions.
30
31 IRModule is the basic unit for all IR transformations across the stack.
32
33 Parameters
34 ----------
35 functions: Optional[dict].
36 Map of global var to BaseFunc
37 """
38
39 def __init__(self, functions=None, type_definitions=None):
40 if functions is None:
41 functions = {}
42 elif isinstance(functions, dict):
43 mapped_funcs = {}
44 for k, v in functions.items():
45 if isinstance(k, string_types):
46 k = _expr.GlobalVar(k)
47 if not isinstance(k, _expr.GlobalVar):
48 raise TypeError("Expect functions to be Dict[GlobalVar, Function]")
49 mapped_funcs[k] = v
50 functions = mapped_funcs
51 if type_definitions is None:
52 type_definitions = {}
53 elif isinstance(type_definitions, dict):
54 mapped_type_defs = {}
55 for k, v in type_definitions.items():
56 if isinstance(k, string_types):
57 k = _ty.GlobalTypeVar(k)
58 if not isinstance(k, _ty.GlobalTypeVar):
59 raise TypeError("Expect type_definitions to be Dict[GlobalTypeVar, Type]")
60 mapped_type_defs[k] = v
61 type_definitions = mapped_type_defs
62 self.__init_handle_by_constructor__(_ffi_api.IRModule, functions, type_definitions)
63
64 def __setitem__(self, var, val):
65 """Add a mapping to the module.
66
67 Parameters
68 ---------
69 var: GlobalVar
70 The global variable.
71
72 val: Union[Function, Type]
73 The value.
74 """
75 return self._add(var, val, True)
76
77 def _add(self, var, val, update=True):
78 if isinstance(val, _expr.RelayExpr):
79 if isinstance(var, string_types):
80 if _ffi_api.Module_ContainGlobalVar(self, var):
81 var = _ffi_api.Module_GetGlobalVar(self, var)
82 else:
83 var = _expr.GlobalVar(var)
84 _ffi_api.Module_Add(self, var, val, update)
85 else:
86 assert isinstance(val, _ty.Type)
87 if isinstance(var, string_types):
88 var = _ty.GlobalTypeVar(var)
89 _ffi_api.Module_AddDef(self, var, val, update)
90
91 def __getitem__(self, var):
92 """Lookup a global definition by name or by variable.
93
94 Parameters
95 ----------
96 var: Union[String, GlobalVar, GlobalTypeVar]
97 The name or global variable.
98
99 Returns
100 -------
101 val: Union[Function, Type]
102 The definition referenced by :code:`var` (either a function or type).
103 """
104 if isinstance(var, string_types):
105 return _ffi_api.Module_Lookup_str(self, var)
106 if isinstance(var, _expr.GlobalVar):
107 return _ffi_api.Module_Lookup(self, var)
108 return _ffi_api.Module_LookupDef(self, var)
109
110 def update(self, other):
111 """Insert functions in another Module to current one.
112
113 Parameters
114 ----------
115 other: IRModule
116 The module to merge into the current Module.
117 """
118 if isinstance(other, dict):
119 other = IRModule(other)
120
121 return _ffi_api.Module_Update(self, other)
122
123 def update_func(self, var, func):
124 """Update the function corresponding to a global variable in the
125 module.
126
127 Parameters
128 ----------
129 var: GlobalVar
130 The global variable.
131
132 func: tvm.relay.Function
133 The function to be inserted.
134 """
135 return _ffi_api.Module_UpdateFunction(self, var, func)
136
137 def get_global_var(self, name):
138 """Get a global variable in the function by name.
139
140 Parameters
141 ----------
142 name: str
143 The name of the global variable.
144
145 Returns
146 -------
147 global_var: GlobalVar
148 The global variable mapped to :code:`name`.
149
150 Raises
151 ------
152 tvm.error.TVMError if we cannot find corresponding global var.
153 """
154 return _ffi_api.Module_GetGlobalVar(self, name)
155
156 def get_global_vars(self):
157 """Collect all global vars defined in this module.
158
159 Returns
160 -------
161 global_vars: Array[GlobalVar]
162 An array of global vars.
163 """
164 return _ffi_api.Module_GetGlobalVars(self)
165
166 def get_global_type_vars(self):
167 """Collect all global type vars defined in this module.
168
169 Returns
170 -------
171 global_type_vars: Array[GlobalTypeVar]
172 An array of global type vars.
173 """
174 return _ffi_api.Module_GetGlobalTypeVars(self)
175
176 def get_global_type_var(self, name):
177 """Get a global type variable in the function by name.
178
179 Parameters
180 ----------
181 name: str
182 The name of the global type variable.
183
184 Returns
185 -------
186 global_type_var: GlobalTypeVar
187 The global variable mapped to :code:`name`.
188
189 Raises
190 ------
191 tvm.error.TVMError if we cannot find corresponding global type var.
192 """
193 return _ffi_api.Module_GetGlobalTypeVar(self, name)
194
195 def get_constructor(self, tag):
196 """Look up an ADT constructor by tag.
197
198 Parameters
199 ----------
200 tag: int
201 The tag for a constructor.
202
203 Returns
204 -------
205 constructor: Constructor
206 The constructor associated with the given tag,
207
208 Raises
209 ------
210 tvm.error.TVMError if the corresponding constructor cannot be found.
211 """
212 return _ffi_api.Module_LookupTag(self, tag)
213
214 def get_type(self, name):
215 ty_var = self.get_global_type_var(name)
216 ty_data = self.type_definitions[ty_var]
217 return tuple([ty_var] + list(ty_data.constructors))
218
219 @staticmethod
220 def from_expr(expr, functions=None, type_defs=None):
221 """Construct a module from a standalone expression.
222
223 Parameters
224 ----------
225 expr: RelayExpr
226 The starting expression
227
228 global_funcs: Optional[dict]
229 Map of global vars to function definitions
230
231 type_defs: Optional[dict]
232 Map of global type vars to type definitions
233
234 Returns
235 -------
236 mod: Module
237 A module containing the passed definitions,
238 where expr is set as the entry point
239 (wrapped in a function if necessary)
240 """
241 funcs = functions if functions is not None else {}
242 defs = type_defs if type_defs is not None else {}
243 return _ffi_api.Module_FromExpr(expr, funcs, defs)
244
245 def _import(self, file_to_import):
246 return _ffi_api.Module_Import(self, file_to_import)
247
248 def import_from_std(self, file_to_import):
249 # TODO(@jroesch): clean up prelude
250 _ffi_api.Module_ImportFromStd(self, file_to_import)
251 return tvm.relay.transform.InferType()(self)
252
253 def __str__(self):
254 # TODO(jroesch): why does this hang sometimes?
255 return self.astext()
256
257 def __repr__(self):
258 return self.astext()
259
[end of python/tvm/ir/module.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/python/tvm/ir/module.py b/python/tvm/ir/module.py
--- a/python/tvm/ir/module.py
+++ b/python/tvm/ir/module.py
@@ -251,8 +251,7 @@
return tvm.relay.transform.InferType()(self)
def __str__(self):
- # TODO(jroesch): why does this hang sometimes?
- return self.astext()
+ return _ffi_api.PrettyPrint(self)
def __repr__(self):
return self.astext()
|
{"golden_diff": "diff --git a/python/tvm/ir/module.py b/python/tvm/ir/module.py\n--- a/python/tvm/ir/module.py\n+++ b/python/tvm/ir/module.py\n@@ -251,8 +251,7 @@\n return tvm.relay.transform.InferType()(self)\n \n def __str__(self):\n- # TODO(jroesch): why does this hang sometimes?\n- return self.astext()\n+ return _ffi_api.PrettyPrint(self)\n \n def __repr__(self):\n return self.astext()\n", "issue": "Do not show meta data when printing TIR\nRecently I found TVM will print meta when I do \r\n```python\r\nprint(tvm.lower(s, [A, B, C], simple_mode=True))\r\n```\r\n\r\nHere is an example output in the tutorial website (https://tvm.apache.org/docs/tutorials/language/schedule_primitives.html)\r\n```\r\n#[version = \"0.0.5\"]\r\nprimfn(A_1: handle, B_1: handle, C_1: handle) -> ()\r\n attr = {\"global_symbol\": \"main\", \"tir.noalias\": True}\r\n buffers = {C: Buffer(C_2: Pointer(float32), float32, [m: int32, n: int32], [stride: int32, stride_1: int32], type=\"auto\"),\r\n B: Buffer(B_2: Pointer(float32), float32, [m, n], [stride_2: int32, stride_3: int32], type=\"auto\"),\r\n A: Buffer(A_2: Pointer(float32), float32, [m, n], [stride_4: int32, stride_5: int32], type=\"auto\")}\r\n buffer_map = {A_1: A, B_1: B, C_1: C} {\r\n for (i: int32, 0, m) {\r\n for (j: int32, 0, n) {\r\n C_2[((i*stride) + (j*stride_1))] = ((float32*)A_2[((i*stride_4) + (j*stride_5))]*(float32*)B_2[((i*stride_2) + (j*stride_3))])\r\n }\r\n }\r\n}\r\n\r\n#[metadata]\r\n{\r\n \"root\": 1,\r\n \"nodes\": [\r\n {\r\n \"type_key\": \"\"\r\n },\r\n {\r\n \"type_key\": \"Map\",\r\n \"keys\": [\r\n \"IntImm\"\r\n ],\r\n \"data\": [2]\r\n },\r\n {\r\n \"type_key\": \"Array\",\r\n \"data\": [3]\r\n },\r\n {\r\n \"type_key\": \"IntImm\",\r\n \"attrs\": {\r\n \"dtype\": \"bool\",\r\n \"value\": \"1\"\r\n }\r\n }\r\n ],\r\n \"b64ndarrays\": [],\r\n \"attrs\": {\"tvm_version\": \"0.8.dev0\"}\r\n}\r\n```\r\n\r\nI found this very annoying. Why do we print it? Can we disable it for this use case by default?\r\ncc @tqchen @spectrometerHBH @jroesch @junrushao1994 \n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"IRModule that holds the functions and type definitions.\"\"\"\nfrom tvm._ffi.base import string_types\nimport tvm._ffi\n\nfrom .base import Node\nfrom . import expr as _expr\nfrom . import type as _ty\nfrom . import _ffi_api\n\n\n@tvm._ffi.register_object(\"IRModule\")\nclass IRModule(Node):\n \"\"\"IRModule that holds functions and type definitions.\n\n IRModule is the basic unit for all IR transformations across the stack.\n\n Parameters\n ----------\n functions: Optional[dict].\n Map of global var to BaseFunc\n \"\"\"\n\n def __init__(self, functions=None, type_definitions=None):\n if functions is None:\n functions = {}\n elif isinstance(functions, dict):\n mapped_funcs = {}\n for k, v in functions.items():\n if isinstance(k, string_types):\n k = _expr.GlobalVar(k)\n if not isinstance(k, _expr.GlobalVar):\n raise TypeError(\"Expect functions to be Dict[GlobalVar, Function]\")\n mapped_funcs[k] = v\n functions = mapped_funcs\n if type_definitions is None:\n type_definitions = {}\n elif isinstance(type_definitions, dict):\n mapped_type_defs = {}\n for k, v in type_definitions.items():\n if isinstance(k, string_types):\n k = _ty.GlobalTypeVar(k)\n if not isinstance(k, _ty.GlobalTypeVar):\n raise TypeError(\"Expect type_definitions to be Dict[GlobalTypeVar, Type]\")\n mapped_type_defs[k] = v\n type_definitions = mapped_type_defs\n self.__init_handle_by_constructor__(_ffi_api.IRModule, functions, type_definitions)\n\n def __setitem__(self, var, val):\n \"\"\"Add a mapping to the module.\n\n Parameters\n ---------\n var: GlobalVar\n The global variable.\n\n val: Union[Function, Type]\n The value.\n \"\"\"\n return self._add(var, val, True)\n\n def _add(self, var, val, update=True):\n if isinstance(val, _expr.RelayExpr):\n if isinstance(var, string_types):\n if _ffi_api.Module_ContainGlobalVar(self, var):\n var = _ffi_api.Module_GetGlobalVar(self, var)\n else:\n var = _expr.GlobalVar(var)\n _ffi_api.Module_Add(self, var, val, update)\n else:\n assert isinstance(val, _ty.Type)\n if isinstance(var, string_types):\n var = _ty.GlobalTypeVar(var)\n _ffi_api.Module_AddDef(self, var, val, update)\n\n def __getitem__(self, var):\n \"\"\"Lookup a global definition by name or by variable.\n\n Parameters\n ----------\n var: Union[String, GlobalVar, GlobalTypeVar]\n The name or global variable.\n\n Returns\n -------\n val: Union[Function, Type]\n The definition referenced by :code:`var` (either a function or type).\n \"\"\"\n if isinstance(var, string_types):\n return _ffi_api.Module_Lookup_str(self, var)\n if isinstance(var, _expr.GlobalVar):\n return _ffi_api.Module_Lookup(self, var)\n return _ffi_api.Module_LookupDef(self, var)\n\n def update(self, other):\n \"\"\"Insert functions in another Module to current one.\n\n Parameters\n ----------\n other: IRModule\n The module to merge into the current Module.\n \"\"\"\n if isinstance(other, dict):\n other = IRModule(other)\n\n return _ffi_api.Module_Update(self, other)\n\n def update_func(self, var, func):\n \"\"\"Update the function corresponding to a global variable in the\n module.\n\n Parameters\n ----------\n var: GlobalVar\n The global variable.\n\n func: tvm.relay.Function\n The function to be inserted.\n \"\"\"\n return _ffi_api.Module_UpdateFunction(self, var, func)\n\n def get_global_var(self, name):\n \"\"\"Get a global variable in the function by name.\n\n Parameters\n ----------\n name: str\n The name of the global variable.\n\n Returns\n -------\n global_var: GlobalVar\n The global variable mapped to :code:`name`.\n\n Raises\n ------\n tvm.error.TVMError if we cannot find corresponding global var.\n \"\"\"\n return _ffi_api.Module_GetGlobalVar(self, name)\n\n def get_global_vars(self):\n \"\"\"Collect all global vars defined in this module.\n\n Returns\n -------\n global_vars: Array[GlobalVar]\n An array of global vars.\n \"\"\"\n return _ffi_api.Module_GetGlobalVars(self)\n\n def get_global_type_vars(self):\n \"\"\"Collect all global type vars defined in this module.\n\n Returns\n -------\n global_type_vars: Array[GlobalTypeVar]\n An array of global type vars.\n \"\"\"\n return _ffi_api.Module_GetGlobalTypeVars(self)\n\n def get_global_type_var(self, name):\n \"\"\"Get a global type variable in the function by name.\n\n Parameters\n ----------\n name: str\n The name of the global type variable.\n\n Returns\n -------\n global_type_var: GlobalTypeVar\n The global variable mapped to :code:`name`.\n\n Raises\n ------\n tvm.error.TVMError if we cannot find corresponding global type var.\n \"\"\"\n return _ffi_api.Module_GetGlobalTypeVar(self, name)\n\n def get_constructor(self, tag):\n \"\"\"Look up an ADT constructor by tag.\n\n Parameters\n ----------\n tag: int\n The tag for a constructor.\n\n Returns\n -------\n constructor: Constructor\n The constructor associated with the given tag,\n\n Raises\n ------\n tvm.error.TVMError if the corresponding constructor cannot be found.\n \"\"\"\n return _ffi_api.Module_LookupTag(self, tag)\n\n def get_type(self, name):\n ty_var = self.get_global_type_var(name)\n ty_data = self.type_definitions[ty_var]\n return tuple([ty_var] + list(ty_data.constructors))\n\n @staticmethod\n def from_expr(expr, functions=None, type_defs=None):\n \"\"\"Construct a module from a standalone expression.\n\n Parameters\n ----------\n expr: RelayExpr\n The starting expression\n\n global_funcs: Optional[dict]\n Map of global vars to function definitions\n\n type_defs: Optional[dict]\n Map of global type vars to type definitions\n\n Returns\n -------\n mod: Module\n A module containing the passed definitions,\n where expr is set as the entry point\n (wrapped in a function if necessary)\n \"\"\"\n funcs = functions if functions is not None else {}\n defs = type_defs if type_defs is not None else {}\n return _ffi_api.Module_FromExpr(expr, funcs, defs)\n\n def _import(self, file_to_import):\n return _ffi_api.Module_Import(self, file_to_import)\n\n def import_from_std(self, file_to_import):\n # TODO(@jroesch): clean up prelude\n _ffi_api.Module_ImportFromStd(self, file_to_import)\n return tvm.relay.transform.InferType()(self)\n\n def __str__(self):\n # TODO(jroesch): why does this hang sometimes?\n return self.astext()\n\n def __repr__(self):\n return self.astext()\n", "path": "python/tvm/ir/module.py"}]}
| 3,537 | 121 |
gh_patches_debug_10346
|
rasdani/github-patches
|
git_diff
|
liberapay__liberapay.com-180
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix connecting a GitHub org
I tried connecting the GitHub liberapay org to the [LiberapayOrg](https://liberapay.com/LiberapayOrg/) account but I ended up on the take-over confirmation page asking me to transfer my personal account.
</issue>
<code>
[start of liberapay/elsewhere/github.py]
1 from __future__ import absolute_import, division, print_function, unicode_literals
2
3 from liberapay.elsewhere import PlatformOAuth2
4 from liberapay.elsewhere._extractors import key
5 from liberapay.elsewhere._paginators import header_links_paginator
6
7
8 class GitHub(PlatformOAuth2):
9
10 # Platform attributes
11 name = 'github'
12 display_name = 'GitHub'
13 account_url = 'https://github.com/{user_name}'
14 allows_team_connect = True
15
16 # Auth attributes
17 auth_url = 'https://github.com/login/oauth/authorize'
18 access_token_url = 'https://github.com/login/oauth/access_token'
19 oauth_email_scope = 'user:email'
20 oauth_default_scope = ['read:org']
21
22 # API attributes
23 api_format = 'json'
24 api_paginator = header_links_paginator()
25 api_url = 'https://api.github.com'
26 api_user_info_path = '/user/{user_id}'
27 api_user_name_info_path = '/users/{user_name}'
28 api_user_self_info_path = '/user'
29 api_team_members_path = '/orgs/{user_name}/public_members'
30 api_friends_path = '/users/{user_name}/following'
31 ratelimit_headers_prefix = 'x-ratelimit-'
32
33 # User info extractors
34 x_user_id = key('id')
35 x_user_name = key('login')
36 x_display_name = key('name')
37 x_email = key('email')
38 x_gravatar_id = key('gravatar_id')
39 x_avatar_url = key('avatar_url')
40 x_is_team = key('type', clean=lambda t: t.lower() == 'organization')
41
42 def is_team_admin(self, team_name, sess):
43 user_teams = self.api_parser(self.api_get('/user/teams', sess=sess))
44 return any(team.get('organization', {}).get('login') == team_name and
45 team.get('permission') == 'admin'
46 for team in user_teams)
47
[end of liberapay/elsewhere/github.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/liberapay/elsewhere/github.py b/liberapay/elsewhere/github.py
--- a/liberapay/elsewhere/github.py
+++ b/liberapay/elsewhere/github.py
@@ -39,8 +39,7 @@
x_avatar_url = key('avatar_url')
x_is_team = key('type', clean=lambda t: t.lower() == 'organization')
- def is_team_admin(self, team_name, sess):
- user_teams = self.api_parser(self.api_get('/user/teams', sess=sess))
- return any(team.get('organization', {}).get('login') == team_name and
- team.get('permission') == 'admin'
- for team in user_teams)
+ def is_team_member(self, org_name, sess):
+ org_name = org_name.lower()
+ user_orgs = self.api_parser(self.api_get('/user/orgs', sess=sess))
+ return any(org.get('login') == org_name for org in user_orgs)
|
{"golden_diff": "diff --git a/liberapay/elsewhere/github.py b/liberapay/elsewhere/github.py\n--- a/liberapay/elsewhere/github.py\n+++ b/liberapay/elsewhere/github.py\n@@ -39,8 +39,7 @@\n x_avatar_url = key('avatar_url')\n x_is_team = key('type', clean=lambda t: t.lower() == 'organization')\n \n- def is_team_admin(self, team_name, sess):\n- user_teams = self.api_parser(self.api_get('/user/teams', sess=sess))\n- return any(team.get('organization', {}).get('login') == team_name and\n- team.get('permission') == 'admin'\n- for team in user_teams)\n+ def is_team_member(self, org_name, sess):\n+ org_name = org_name.lower()\n+ user_orgs = self.api_parser(self.api_get('/user/orgs', sess=sess))\n+ return any(org.get('login') == org_name for org in user_orgs)\n", "issue": "Fix connecting a GitHub org\nI tried connecting the GitHub liberapay org to the [LiberapayOrg](https://liberapay.com/LiberapayOrg/) account but I ended up on the take-over confirmation page asking me to transfer my personal account.\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom liberapay.elsewhere import PlatformOAuth2\nfrom liberapay.elsewhere._extractors import key\nfrom liberapay.elsewhere._paginators import header_links_paginator\n\n\nclass GitHub(PlatformOAuth2):\n\n # Platform attributes\n name = 'github'\n display_name = 'GitHub'\n account_url = 'https://github.com/{user_name}'\n allows_team_connect = True\n\n # Auth attributes\n auth_url = 'https://github.com/login/oauth/authorize'\n access_token_url = 'https://github.com/login/oauth/access_token'\n oauth_email_scope = 'user:email'\n oauth_default_scope = ['read:org']\n\n # API attributes\n api_format = 'json'\n api_paginator = header_links_paginator()\n api_url = 'https://api.github.com'\n api_user_info_path = '/user/{user_id}'\n api_user_name_info_path = '/users/{user_name}'\n api_user_self_info_path = '/user'\n api_team_members_path = '/orgs/{user_name}/public_members'\n api_friends_path = '/users/{user_name}/following'\n ratelimit_headers_prefix = 'x-ratelimit-'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('login')\n x_display_name = key('name')\n x_email = key('email')\n x_gravatar_id = key('gravatar_id')\n x_avatar_url = key('avatar_url')\n x_is_team = key('type', clean=lambda t: t.lower() == 'organization')\n\n def is_team_admin(self, team_name, sess):\n user_teams = self.api_parser(self.api_get('/user/teams', sess=sess))\n return any(team.get('organization', {}).get('login') == team_name and\n team.get('permission') == 'admin'\n for team in user_teams)\n", "path": "liberapay/elsewhere/github.py"}]}
| 1,108 | 224 |
gh_patches_debug_42052
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-836
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
datadog: honor DD environment variables
There's a few environment variables that are not honored by the exporter currently. These are:
* DD_AGENT_HOST, for the agent hostname
* DATADOG_SERVICE_NAME, DD_SERVICE_NAME, DD_SERVICE for the datadog service name.
There is a more comprehensive list, as outlined here:
https://docs.datadoghq.com/tracing/setup/python/#configuration
</issue>
<code>
[start of ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/exporter.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 import os
17 from urllib.parse import urlparse
18
19 from ddtrace.ext import SpanTypes as DatadogSpanTypes
20 from ddtrace.internal.writer import AgentWriter
21 from ddtrace.span import Span as DatadogSpan
22
23 import opentelemetry.trace as trace_api
24 from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
25 from opentelemetry.trace.status import StatusCanonicalCode
26
27 # pylint:disable=relative-beyond-top-level
28 from .constants import DD_ORIGIN, SAMPLE_RATE_METRIC_KEY
29
30 logger = logging.getLogger(__name__)
31
32
33 DEFAULT_AGENT_URL = "http://localhost:8126"
34 _INSTRUMENTATION_SPAN_TYPES = {
35 "opentelemetry.ext.aiohttp-client": DatadogSpanTypes.HTTP,
36 "opentelemetry.ext.asgi": DatadogSpanTypes.WEB,
37 "opentelemetry.ext.dbapi": DatadogSpanTypes.SQL,
38 "opentelemetry.ext.django": DatadogSpanTypes.WEB,
39 "opentelemetry.ext.flask": DatadogSpanTypes.WEB,
40 "opentelemetry.ext.grpc": DatadogSpanTypes.GRPC,
41 "opentelemetry.ext.jinja2": DatadogSpanTypes.TEMPLATE,
42 "opentelemetry.ext.mysql": DatadogSpanTypes.SQL,
43 "opentelemetry.ext.psycopg2": DatadogSpanTypes.SQL,
44 "opentelemetry.ext.pymemcache": DatadogSpanTypes.CACHE,
45 "opentelemetry.ext.pymongo": DatadogSpanTypes.MONGODB,
46 "opentelemetry.ext.pymysql": DatadogSpanTypes.SQL,
47 "opentelemetry.ext.redis": DatadogSpanTypes.REDIS,
48 "opentelemetry.ext.requests": DatadogSpanTypes.HTTP,
49 "opentelemetry.ext.sqlalchemy": DatadogSpanTypes.SQL,
50 "opentelemetry.ext.wsgi": DatadogSpanTypes.WEB,
51 }
52
53
54 class DatadogSpanExporter(SpanExporter):
55 """Datadog span exporter for OpenTelemetry.
56
57 Args:
58 agent_url: The url of the Datadog Agent or use ``DD_TRACE_AGENT_URL`` environment variable
59 service: The service to be used for the application or use ``DD_SERVICE`` environment variable
60 """
61
62 def __init__(self, agent_url=None, service=None):
63 self.agent_url = (
64 agent_url
65 if agent_url
66 else os.environ.get("DD_TRACE_AGENT_URL", DEFAULT_AGENT_URL)
67 )
68 self.service = service if service else os.environ.get("DD_SERVICE")
69 self._agent_writer = None
70
71 @property
72 def agent_writer(self):
73 if self._agent_writer is None:
74 url_parsed = urlparse(self.agent_url)
75 if url_parsed.scheme in ("http", "https"):
76 self._agent_writer = AgentWriter(
77 hostname=url_parsed.hostname,
78 port=url_parsed.port,
79 https=url_parsed.scheme == "https",
80 )
81 elif url_parsed.scheme == "unix":
82 self._agent_writer = AgentWriter(uds_path=url_parsed.path)
83 else:
84 raise ValueError(
85 "Unknown scheme `%s` for agent URL" % url_parsed.scheme
86 )
87 return self._agent_writer
88
89 def export(self, spans):
90 datadog_spans = self._translate_to_datadog(spans)
91
92 self.agent_writer.write(spans=datadog_spans)
93
94 return SpanExportResult.SUCCESS
95
96 def shutdown(self):
97 if self.agent_writer.started:
98 self.agent_writer.stop()
99 self.agent_writer.join(self.agent_writer.exit_timeout)
100
101 def _translate_to_datadog(self, spans):
102 datadog_spans = []
103
104 for span in spans:
105 trace_id, parent_id, span_id = _get_trace_ids(span)
106
107 # datadog Span is initialized with a reference to the tracer which is
108 # used to record the span when it is finished. We can skip ignore this
109 # because we are not calling the finish method and explictly set the
110 # duration.
111 tracer = None
112
113 datadog_span = DatadogSpan(
114 tracer,
115 _get_span_name(span),
116 service=self.service,
117 resource=_get_resource(span),
118 span_type=_get_span_type(span),
119 trace_id=trace_id,
120 span_id=span_id,
121 parent_id=parent_id,
122 )
123 datadog_span.start_ns = span.start_time
124 datadog_span.duration_ns = span.end_time - span.start_time
125
126 if span.status.canonical_code is not StatusCanonicalCode.OK:
127 datadog_span.error = 1
128 if span.status.description:
129 exc_type, exc_val = _get_exc_info(span)
130 # no mapping for error.stack since traceback not recorded
131 datadog_span.set_tag("error.msg", exc_val)
132 datadog_span.set_tag("error.type", exc_type)
133
134 datadog_span.set_tags(span.attributes)
135
136 # add origin to root span
137 origin = _get_origin(span)
138 if origin and parent_id == 0:
139 datadog_span.set_tag(DD_ORIGIN, origin)
140
141 sampling_rate = _get_sampling_rate(span)
142 if sampling_rate is not None:
143 datadog_span.set_metric(SAMPLE_RATE_METRIC_KEY, sampling_rate)
144
145 # span events and span links are not supported
146
147 datadog_spans.append(datadog_span)
148
149 return datadog_spans
150
151
152 def _get_trace_ids(span):
153 """Extract tracer ids from span"""
154 ctx = span.get_context()
155 trace_id = ctx.trace_id
156 span_id = ctx.span_id
157
158 if isinstance(span.parent, trace_api.Span):
159 parent_id = span.parent.get_context().span_id
160 elif isinstance(span.parent, trace_api.SpanContext):
161 parent_id = span.parent.span_id
162 else:
163 parent_id = 0
164
165 trace_id = _convert_trace_id_uint64(trace_id)
166
167 return trace_id, parent_id, span_id
168
169
170 def _convert_trace_id_uint64(otel_id):
171 """Convert 128-bit int used for trace_id to 64-bit unsigned int"""
172 return otel_id & 0xFFFFFFFFFFFFFFFF
173
174
175 def _get_span_name(span):
176 """Get span name by using instrumentation and kind while backing off to
177 span.name
178 """
179 instrumentation_name = (
180 span.instrumentation_info.name if span.instrumentation_info else None
181 )
182 span_kind_name = span.kind.name if span.kind else None
183 name = (
184 "{}.{}".format(instrumentation_name, span_kind_name)
185 if instrumentation_name and span_kind_name
186 else span.name
187 )
188 return name
189
190
191 def _get_resource(span):
192 """Get resource name for span"""
193 if "http.method" in span.attributes:
194 route = span.attributes.get("http.route")
195 return (
196 span.attributes["http.method"] + " " + route
197 if route
198 else span.attributes["http.method"]
199 )
200
201 return span.name
202
203
204 def _get_span_type(span):
205 """Get Datadog span type"""
206 instrumentation_name = (
207 span.instrumentation_info.name if span.instrumentation_info else None
208 )
209 span_type = _INSTRUMENTATION_SPAN_TYPES.get(instrumentation_name)
210 return span_type
211
212
213 def _get_exc_info(span):
214 """Parse span status description for exception type and value"""
215 exc_type, exc_val = span.status.description.split(":", 1)
216 return exc_type, exc_val.strip()
217
218
219 def _get_origin(span):
220 ctx = span.get_context()
221 origin = ctx.trace_state.get(DD_ORIGIN)
222 return origin
223
224
225 def _get_sampling_rate(span):
226 ctx = span.get_context()
227 return (
228 span.sampler.rate
229 if ctx.trace_flags.sampled
230 and isinstance(span.sampler, trace_api.sampling.ProbabilitySampler)
231 else None
232 )
233
[end of ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/exporter.py]
[start of ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/constants.py]
1 DD_ORIGIN = "_dd_origin"
2 AUTO_REJECT = 0
3 AUTO_KEEP = 1
4 USER_KEEP = 2
5 SAMPLE_RATE_METRIC_KEY = "_sample_rate"
6 SAMPLING_PRIORITY_KEY = "_sampling_priority_v1"
7
[end of ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/constants.py b/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/constants.py
--- a/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/constants.py
+++ b/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/constants.py
@@ -4,3 +4,5 @@
USER_KEEP = 2
SAMPLE_RATE_METRIC_KEY = "_sample_rate"
SAMPLING_PRIORITY_KEY = "_sampling_priority_v1"
+ENV_KEY = "env"
+VERSION_KEY = "version"
diff --git a/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/exporter.py b/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/exporter.py
--- a/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/exporter.py
+++ b/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/exporter.py
@@ -25,7 +25,7 @@
from opentelemetry.trace.status import StatusCanonicalCode
# pylint:disable=relative-beyond-top-level
-from .constants import DD_ORIGIN, SAMPLE_RATE_METRIC_KEY
+from .constants import DD_ORIGIN, ENV_KEY, SAMPLE_RATE_METRIC_KEY, VERSION_KEY
logger = logging.getLogger(__name__)
@@ -56,16 +56,24 @@
Args:
agent_url: The url of the Datadog Agent or use ``DD_TRACE_AGENT_URL`` environment variable
- service: The service to be used for the application or use ``DD_SERVICE`` environment variable
+ service: The service name to be used for the application or use ``DD_SERVICE`` environment variable
+ env: Set the application’s environment or use ``DD_ENV`` environment variable
+ version: Set the application’s version or use ``DD_VERSION`` environment variable
+ tags: A list of default tags to be added to every span or use ``DD_TAGS`` environment variable
"""
- def __init__(self, agent_url=None, service=None):
+ def __init__(
+ self, agent_url=None, service=None, env=None, version=None, tags=None
+ ):
self.agent_url = (
agent_url
if agent_url
else os.environ.get("DD_TRACE_AGENT_URL", DEFAULT_AGENT_URL)
)
- self.service = service if service else os.environ.get("DD_SERVICE")
+ self.service = service or os.environ.get("DD_SERVICE")
+ self.env = env or os.environ.get("DD_ENV")
+ self.version = version or os.environ.get("DD_VERSION")
+ self.tags = _parse_tags_str(tags or os.environ.get("DD_TAGS"))
self._agent_writer = None
@property
@@ -133,6 +141,17 @@
datadog_span.set_tags(span.attributes)
+ # add configured env tag
+ if self.env is not None:
+ datadog_span.set_tag(ENV_KEY, self.env)
+
+ # add configured application version tag to only root span
+ if self.version is not None and parent_id == 0:
+ datadog_span.set_tag(VERSION_KEY, self.version)
+
+ # add configured global tags
+ datadog_span.set_tags(self.tags)
+
# add origin to root span
origin = _get_origin(span)
if origin and parent_id == 0:
@@ -230,3 +249,35 @@
and isinstance(span.sampler, trace_api.sampling.ProbabilitySampler)
else None
)
+
+
+def _parse_tags_str(tags_str):
+ """Parse a string of tags typically provided via environment variables.
+
+ The expected string is of the form::
+ "key1:value1,key2:value2"
+
+ :param tags_str: A string of the above form to parse tags from.
+ :return: A dict containing the tags that were parsed.
+ """
+ parsed_tags = {}
+ if not tags_str:
+ return parsed_tags
+
+ for tag in tags_str.split(","):
+ try:
+ key, value = tag.split(":", 1)
+
+ # Validate the tag
+ if key == "" or value == "" or value.endswith(":"):
+ raise ValueError
+ except ValueError:
+ logger.error(
+ "Malformed tag in tag pair '%s' from tag string '%s'.",
+ tag,
+ tags_str,
+ )
+ else:
+ parsed_tags[key] = value
+
+ return parsed_tags
|
{"golden_diff": "diff --git a/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/constants.py b/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/constants.py\n--- a/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/constants.py\n+++ b/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/constants.py\n@@ -4,3 +4,5 @@\n USER_KEEP = 2\n SAMPLE_RATE_METRIC_KEY = \"_sample_rate\"\n SAMPLING_PRIORITY_KEY = \"_sampling_priority_v1\"\n+ENV_KEY = \"env\"\n+VERSION_KEY = \"version\"\ndiff --git a/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/exporter.py b/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/exporter.py\n--- a/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/exporter.py\n+++ b/ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/exporter.py\n@@ -25,7 +25,7 @@\n from opentelemetry.trace.status import StatusCanonicalCode\n \n # pylint:disable=relative-beyond-top-level\n-from .constants import DD_ORIGIN, SAMPLE_RATE_METRIC_KEY\n+from .constants import DD_ORIGIN, ENV_KEY, SAMPLE_RATE_METRIC_KEY, VERSION_KEY\n \n logger = logging.getLogger(__name__)\n \n@@ -56,16 +56,24 @@\n \n Args:\n agent_url: The url of the Datadog Agent or use ``DD_TRACE_AGENT_URL`` environment variable\n- service: The service to be used for the application or use ``DD_SERVICE`` environment variable\n+ service: The service name to be used for the application or use ``DD_SERVICE`` environment variable\n+ env: Set the application\u2019s environment or use ``DD_ENV`` environment variable\n+ version: Set the application\u2019s version or use ``DD_VERSION`` environment variable\n+ tags: A list of default tags to be added to every span or use ``DD_TAGS`` environment variable\n \"\"\"\n \n- def __init__(self, agent_url=None, service=None):\n+ def __init__(\n+ self, agent_url=None, service=None, env=None, version=None, tags=None\n+ ):\n self.agent_url = (\n agent_url\n if agent_url\n else os.environ.get(\"DD_TRACE_AGENT_URL\", DEFAULT_AGENT_URL)\n )\n- self.service = service if service else os.environ.get(\"DD_SERVICE\")\n+ self.service = service or os.environ.get(\"DD_SERVICE\")\n+ self.env = env or os.environ.get(\"DD_ENV\")\n+ self.version = version or os.environ.get(\"DD_VERSION\")\n+ self.tags = _parse_tags_str(tags or os.environ.get(\"DD_TAGS\"))\n self._agent_writer = None\n \n @property\n@@ -133,6 +141,17 @@\n \n datadog_span.set_tags(span.attributes)\n \n+ # add configured env tag\n+ if self.env is not None:\n+ datadog_span.set_tag(ENV_KEY, self.env)\n+\n+ # add configured application version tag to only root span\n+ if self.version is not None and parent_id == 0:\n+ datadog_span.set_tag(VERSION_KEY, self.version)\n+\n+ # add configured global tags\n+ datadog_span.set_tags(self.tags)\n+\n # add origin to root span\n origin = _get_origin(span)\n if origin and parent_id == 0:\n@@ -230,3 +249,35 @@\n and isinstance(span.sampler, trace_api.sampling.ProbabilitySampler)\n else None\n )\n+\n+\n+def _parse_tags_str(tags_str):\n+ \"\"\"Parse a string of tags typically provided via environment variables.\n+\n+ The expected string is of the form::\n+ \"key1:value1,key2:value2\"\n+\n+ :param tags_str: A string of the above form to parse tags from.\n+ :return: A dict containing the tags that were parsed.\n+ \"\"\"\n+ parsed_tags = {}\n+ if not tags_str:\n+ return parsed_tags\n+\n+ for tag in tags_str.split(\",\"):\n+ try:\n+ key, value = tag.split(\":\", 1)\n+\n+ # Validate the tag\n+ if key == \"\" or value == \"\" or value.endswith(\":\"):\n+ raise ValueError\n+ except ValueError:\n+ logger.error(\n+ \"Malformed tag in tag pair '%s' from tag string '%s'.\",\n+ tag,\n+ tags_str,\n+ )\n+ else:\n+ parsed_tags[key] = value\n+\n+ return parsed_tags\n", "issue": "datadog: honor DD environment variables\nThere's a few environment variables that are not honored by the exporter currently. These are:\r\n\r\n* DD_AGENT_HOST, for the agent hostname\r\n* DATADOG_SERVICE_NAME, DD_SERVICE_NAME, DD_SERVICE for the datadog service name.\r\n\r\nThere is a more comprehensive list, as outlined here:\r\n\r\nhttps://docs.datadoghq.com/tracing/setup/python/#configuration\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nfrom urllib.parse import urlparse\n\nfrom ddtrace.ext import SpanTypes as DatadogSpanTypes\nfrom ddtrace.internal.writer import AgentWriter\nfrom ddtrace.span import Span as DatadogSpan\n\nimport opentelemetry.trace as trace_api\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\nfrom opentelemetry.trace.status import StatusCanonicalCode\n\n# pylint:disable=relative-beyond-top-level\nfrom .constants import DD_ORIGIN, SAMPLE_RATE_METRIC_KEY\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_AGENT_URL = \"http://localhost:8126\"\n_INSTRUMENTATION_SPAN_TYPES = {\n \"opentelemetry.ext.aiohttp-client\": DatadogSpanTypes.HTTP,\n \"opentelemetry.ext.asgi\": DatadogSpanTypes.WEB,\n \"opentelemetry.ext.dbapi\": DatadogSpanTypes.SQL,\n \"opentelemetry.ext.django\": DatadogSpanTypes.WEB,\n \"opentelemetry.ext.flask\": DatadogSpanTypes.WEB,\n \"opentelemetry.ext.grpc\": DatadogSpanTypes.GRPC,\n \"opentelemetry.ext.jinja2\": DatadogSpanTypes.TEMPLATE,\n \"opentelemetry.ext.mysql\": DatadogSpanTypes.SQL,\n \"opentelemetry.ext.psycopg2\": DatadogSpanTypes.SQL,\n \"opentelemetry.ext.pymemcache\": DatadogSpanTypes.CACHE,\n \"opentelemetry.ext.pymongo\": DatadogSpanTypes.MONGODB,\n \"opentelemetry.ext.pymysql\": DatadogSpanTypes.SQL,\n \"opentelemetry.ext.redis\": DatadogSpanTypes.REDIS,\n \"opentelemetry.ext.requests\": DatadogSpanTypes.HTTP,\n \"opentelemetry.ext.sqlalchemy\": DatadogSpanTypes.SQL,\n \"opentelemetry.ext.wsgi\": DatadogSpanTypes.WEB,\n}\n\n\nclass DatadogSpanExporter(SpanExporter):\n \"\"\"Datadog span exporter for OpenTelemetry.\n\n Args:\n agent_url: The url of the Datadog Agent or use ``DD_TRACE_AGENT_URL`` environment variable\n service: The service to be used for the application or use ``DD_SERVICE`` environment variable\n \"\"\"\n\n def __init__(self, agent_url=None, service=None):\n self.agent_url = (\n agent_url\n if agent_url\n else os.environ.get(\"DD_TRACE_AGENT_URL\", DEFAULT_AGENT_URL)\n )\n self.service = service if service else os.environ.get(\"DD_SERVICE\")\n self._agent_writer = None\n\n @property\n def agent_writer(self):\n if self._agent_writer is None:\n url_parsed = urlparse(self.agent_url)\n if url_parsed.scheme in (\"http\", \"https\"):\n self._agent_writer = AgentWriter(\n hostname=url_parsed.hostname,\n port=url_parsed.port,\n https=url_parsed.scheme == \"https\",\n )\n elif url_parsed.scheme == \"unix\":\n self._agent_writer = AgentWriter(uds_path=url_parsed.path)\n else:\n raise ValueError(\n \"Unknown scheme `%s` for agent URL\" % url_parsed.scheme\n )\n return self._agent_writer\n\n def export(self, spans):\n datadog_spans = self._translate_to_datadog(spans)\n\n self.agent_writer.write(spans=datadog_spans)\n\n return SpanExportResult.SUCCESS\n\n def shutdown(self):\n if self.agent_writer.started:\n self.agent_writer.stop()\n self.agent_writer.join(self.agent_writer.exit_timeout)\n\n def _translate_to_datadog(self, spans):\n datadog_spans = []\n\n for span in spans:\n trace_id, parent_id, span_id = _get_trace_ids(span)\n\n # datadog Span is initialized with a reference to the tracer which is\n # used to record the span when it is finished. We can skip ignore this\n # because we are not calling the finish method and explictly set the\n # duration.\n tracer = None\n\n datadog_span = DatadogSpan(\n tracer,\n _get_span_name(span),\n service=self.service,\n resource=_get_resource(span),\n span_type=_get_span_type(span),\n trace_id=trace_id,\n span_id=span_id,\n parent_id=parent_id,\n )\n datadog_span.start_ns = span.start_time\n datadog_span.duration_ns = span.end_time - span.start_time\n\n if span.status.canonical_code is not StatusCanonicalCode.OK:\n datadog_span.error = 1\n if span.status.description:\n exc_type, exc_val = _get_exc_info(span)\n # no mapping for error.stack since traceback not recorded\n datadog_span.set_tag(\"error.msg\", exc_val)\n datadog_span.set_tag(\"error.type\", exc_type)\n\n datadog_span.set_tags(span.attributes)\n\n # add origin to root span\n origin = _get_origin(span)\n if origin and parent_id == 0:\n datadog_span.set_tag(DD_ORIGIN, origin)\n\n sampling_rate = _get_sampling_rate(span)\n if sampling_rate is not None:\n datadog_span.set_metric(SAMPLE_RATE_METRIC_KEY, sampling_rate)\n\n # span events and span links are not supported\n\n datadog_spans.append(datadog_span)\n\n return datadog_spans\n\n\ndef _get_trace_ids(span):\n \"\"\"Extract tracer ids from span\"\"\"\n ctx = span.get_context()\n trace_id = ctx.trace_id\n span_id = ctx.span_id\n\n if isinstance(span.parent, trace_api.Span):\n parent_id = span.parent.get_context().span_id\n elif isinstance(span.parent, trace_api.SpanContext):\n parent_id = span.parent.span_id\n else:\n parent_id = 0\n\n trace_id = _convert_trace_id_uint64(trace_id)\n\n return trace_id, parent_id, span_id\n\n\ndef _convert_trace_id_uint64(otel_id):\n \"\"\"Convert 128-bit int used for trace_id to 64-bit unsigned int\"\"\"\n return otel_id & 0xFFFFFFFFFFFFFFFF\n\n\ndef _get_span_name(span):\n \"\"\"Get span name by using instrumentation and kind while backing off to\n span.name\n \"\"\"\n instrumentation_name = (\n span.instrumentation_info.name if span.instrumentation_info else None\n )\n span_kind_name = span.kind.name if span.kind else None\n name = (\n \"{}.{}\".format(instrumentation_name, span_kind_name)\n if instrumentation_name and span_kind_name\n else span.name\n )\n return name\n\n\ndef _get_resource(span):\n \"\"\"Get resource name for span\"\"\"\n if \"http.method\" in span.attributes:\n route = span.attributes.get(\"http.route\")\n return (\n span.attributes[\"http.method\"] + \" \" + route\n if route\n else span.attributes[\"http.method\"]\n )\n\n return span.name\n\n\ndef _get_span_type(span):\n \"\"\"Get Datadog span type\"\"\"\n instrumentation_name = (\n span.instrumentation_info.name if span.instrumentation_info else None\n )\n span_type = _INSTRUMENTATION_SPAN_TYPES.get(instrumentation_name)\n return span_type\n\n\ndef _get_exc_info(span):\n \"\"\"Parse span status description for exception type and value\"\"\"\n exc_type, exc_val = span.status.description.split(\":\", 1)\n return exc_type, exc_val.strip()\n\n\ndef _get_origin(span):\n ctx = span.get_context()\n origin = ctx.trace_state.get(DD_ORIGIN)\n return origin\n\n\ndef _get_sampling_rate(span):\n ctx = span.get_context()\n return (\n span.sampler.rate\n if ctx.trace_flags.sampled\n and isinstance(span.sampler, trace_api.sampling.ProbabilitySampler)\n else None\n )\n", "path": "ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/exporter.py"}, {"content": "DD_ORIGIN = \"_dd_origin\"\nAUTO_REJECT = 0\nAUTO_KEEP = 1\nUSER_KEEP = 2\nSAMPLE_RATE_METRIC_KEY = \"_sample_rate\"\nSAMPLING_PRIORITY_KEY = \"_sampling_priority_v1\"\n", "path": "ext/opentelemetry-ext-datadog/src/opentelemetry/ext/datadog/constants.py"}]}
| 3,166 | 1,016 |
gh_patches_debug_16825
|
rasdani/github-patches
|
git_diff
|
biopython__biopython-4759
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
scipy 1.14.0 breaks Bio.phenotype
Spotted via a regression on the master branch. See https://docs.scipy.org/doc/scipy/release/1.14.0-notes.html which says scipy 1.14.0 (released 15 June 2024) removed deprecated function ``scipy.integrate.trapz`` in favour of ``trapezoid``.
This requires updating ``Bio.phenotype`` but also I would have expected the test to have been skipped rather than failed:
```
======================================================================
ERROR: test_WellRecord (test_phenotype_fit.TestPhenoMicro.test_WellRecord)
Test basic functionalities of WellRecord objects.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/Bio/phenotype/pm_fitting.py", line 27, in <module>
from scipy.integrate import trapz
Skipping any tests requiring internet access
Python version: 3.11.9 (main, Jun 20 2024, 16:02:53) [GCC 11.4.0]
Operating system: posix linux
ImportError: cannot import name 'trapz' from 'scipy.integrate' (/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/scipy/integrate/__init__.py)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/runner/work/biopython/biopython/Tests/test_phenotype_fit.py", line 63, in test_WellRecord
w.fit()
File "/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/Bio/phenotype/phen_micro.py", line 882, in fit
from .pm_fitting import fit, get_area
File "/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/Bio/phenotype/pm_fitting.py", line 31, in <module>
raise MissingPythonDependencyError("Install scipy to extract curve parameters.")
Bio.MissingPythonDependencyError: Install scipy to extract curve parameters.
----------------------------------------------------------------------
```
</issue>
<code>
[start of Bio/phenotype/pm_fitting.py]
1 # Copyright 2014-2016 by Marco Galardini. All rights reserved.
2 #
3 # This file is part of the Biopython distribution and governed by your
4 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
5 # Please see the LICENSE file that should have been included as part of this
6 # package.
7 """Growth curves fitting and parameters extraction for phenotype data.
8
9 This module provides functions to perform sigmoid functions fitting to
10 Phenotype Microarray data. This module depends on scipy curve_fit function.
11 If not available, a warning is raised.
12
13 Functions:
14 logistic Logistic growth model.
15 gompertz Gompertz growth model.
16 richards Richards growth model.
17 guess_plateau Guess the plateau point to improve sigmoid fitting.
18 guess_lag Guess the lag point to improve sigmoid fitting.
19 fit Sigmoid functions fit.
20 get_area Calculate the area under the PM curve.
21 """
22
23 import numpy as np
24
25 try:
26 from scipy.optimize.minpack import curve_fit
27 from scipy.integrate import trapz
28 except ImportError:
29 from Bio import MissingPythonDependencyError
30
31 raise MissingPythonDependencyError("Install scipy to extract curve parameters.")
32
33
34 def logistic(x, A, u, d, v, y0):
35 """Logistic growth model.
36
37 Proposed in Zwietering et al., 1990 (PMID: 16348228)
38 """
39 y = (A / (1 + np.exp((((4 * u) / A) * (d - x)) + 2))) + y0
40 return y
41
42
43 def gompertz(x, A, u, d, v, y0):
44 """Gompertz growth model.
45
46 Proposed in Zwietering et al., 1990 (PMID: 16348228)
47 """
48 y = (A * np.exp(-np.exp((((u * np.e) / A) * (d - x)) + 1))) + y0
49 return y
50
51
52 def richards(x, A, u, d, v, y0):
53 """Richards growth model (equivalent to Stannard).
54
55 Proposed in Zwietering et al., 1990 (PMID: 16348228)
56 """
57 y = (
58 A
59 * pow(
60 1
61 + (
62 v
63 + (np.exp(1 + v) * np.exp((u / A) * (1 + v) * (1 + (1 / v)) * (d - x)))
64 ),
65 -(1 / v),
66 )
67 ) + y0
68 return y
69
70
71 def guess_lag(x, y):
72 """Given two axes returns a guess of the lag point.
73
74 The lag point is defined as the x point where the difference in y
75 with the next point is higher then the mean differences between
76 the points plus one standard deviation. If such point is not found
77 or x and y have different lengths the function returns zero.
78 """
79 if len(x) != len(y):
80 return 0
81
82 diffs = []
83 indexes = range(len(x))
84
85 for i in indexes:
86 if i + 1 not in indexes:
87 continue
88 diffs.append(y[i + 1] - y[i])
89 diffs = np.array(diffs)
90
91 flex = x[-1]
92 for i in indexes:
93 if i + 1 not in indexes:
94 continue
95 if (y[i + 1] - y[i]) > (diffs.mean() + (diffs.std())):
96 flex = x[i]
97 break
98
99 return flex
100
101
102 def guess_plateau(x, y):
103 """Given two axes returns a guess of the plateau point.
104
105 The plateau point is defined as the x point where the y point
106 is near one standard deviation of the differences between the y points to
107 the maximum y value. If such point is not found or x and y have
108 different lengths the function returns zero.
109 """
110 if len(x) != len(y):
111 return 0
112
113 diffs = []
114 indexes = range(len(y))
115
116 for i in indexes:
117 if i + 1 not in indexes:
118 continue
119 diffs.append(y[i + 1] - y[i])
120 diffs = np.array(diffs)
121
122 ymax = y[-1]
123 for i in indexes:
124 if y[i] > (ymax - diffs.std()) and y[i] < (ymax + diffs.std()):
125 ymax = y[i]
126 break
127
128 return ymax
129
130
131 def fit(function, x, y):
132 """Fit the provided function to the x and y values.
133
134 The function parameters and the parameters covariance.
135 """
136 # Compute guesses for the parameters
137 # This is necessary to get significant fits
138 p0 = [guess_plateau(x, y), 4.0, guess_lag(x, y), 0.1, min(y)]
139
140 params, pcov = curve_fit(function, x, y, p0=p0)
141 return params, pcov
142
143
144 def get_area(y, x):
145 """Get the area under the curve."""
146 return trapz(y=y, x=x)
147
[end of Bio/phenotype/pm_fitting.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/Bio/phenotype/pm_fitting.py b/Bio/phenotype/pm_fitting.py
--- a/Bio/phenotype/pm_fitting.py
+++ b/Bio/phenotype/pm_fitting.py
@@ -24,12 +24,17 @@
try:
from scipy.optimize.minpack import curve_fit
- from scipy.integrate import trapz
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError("Install scipy to extract curve parameters.")
+try:
+ from scipy.integrate import trapezoid
+except ImportError:
+ # Assume this is prior to scipy 1.12.0 and try old name:
+ from scipy.integrate import trapz as trapezoid
+
def logistic(x, A, u, d, v, y0):
"""Logistic growth model.
@@ -143,4 +148,4 @@
def get_area(y, x):
"""Get the area under the curve."""
- return trapz(y=y, x=x)
+ return trapezoid(y=y, x=x)
|
{"golden_diff": "diff --git a/Bio/phenotype/pm_fitting.py b/Bio/phenotype/pm_fitting.py\n--- a/Bio/phenotype/pm_fitting.py\n+++ b/Bio/phenotype/pm_fitting.py\n@@ -24,12 +24,17 @@\n \n try:\n from scipy.optimize.minpack import curve_fit\n- from scipy.integrate import trapz\n except ImportError:\n from Bio import MissingPythonDependencyError\n \n raise MissingPythonDependencyError(\"Install scipy to extract curve parameters.\")\n \n+try:\n+ from scipy.integrate import trapezoid\n+except ImportError:\n+ # Assume this is prior to scipy 1.12.0 and try old name:\n+ from scipy.integrate import trapz as trapezoid\n+\n \n def logistic(x, A, u, d, v, y0):\n \"\"\"Logistic growth model.\n@@ -143,4 +148,4 @@\n \n def get_area(y, x):\n \"\"\"Get the area under the curve.\"\"\"\n- return trapz(y=y, x=x)\n+ return trapezoid(y=y, x=x)\n", "issue": "scipy 1.14.0 breaks Bio.phenotype\nSpotted via a regression on the master branch. See https://docs.scipy.org/doc/scipy/release/1.14.0-notes.html which says scipy 1.14.0 (released 15 June 2024) removed deprecated function ``scipy.integrate.trapz`` in favour of ``trapezoid``.\r\n\r\nThis requires updating ``Bio.phenotype`` but also I would have expected the test to have been skipped rather than failed:\r\n\r\n```\r\n======================================================================\r\nERROR: test_WellRecord (test_phenotype_fit.TestPhenoMicro.test_WellRecord)\r\nTest basic functionalities of WellRecord objects.\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/Bio/phenotype/pm_fitting.py\", line 27, in <module>\r\n from scipy.integrate import trapz\r\nSkipping any tests requiring internet access\r\nPython version: 3.11.9 (main, Jun 20 2024, 16:02:53) [GCC 11.4.0]\r\nOperating system: posix linux\r\nImportError: cannot import name 'trapz' from 'scipy.integrate' (/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/scipy/integrate/__init__.py)\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/runner/work/biopython/biopython/Tests/test_phenotype_fit.py\", line 63, in test_WellRecord\r\n w.fit()\r\n File \"/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/Bio/phenotype/phen_micro.py\", line 882, in fit\r\n from .pm_fitting import fit, get_area\r\n File \"/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/Bio/phenotype/pm_fitting.py\", line 31, in <module>\r\n raise MissingPythonDependencyError(\"Install scipy to extract curve parameters.\")\r\nBio.MissingPythonDependencyError: Install scipy to extract curve parameters.\r\n\r\n----------------------------------------------------------------------\r\n```\n", "before_files": [{"content": "# Copyright 2014-2016 by Marco Galardini. All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\"\"\"Growth curves fitting and parameters extraction for phenotype data.\n\nThis module provides functions to perform sigmoid functions fitting to\nPhenotype Microarray data. This module depends on scipy curve_fit function.\nIf not available, a warning is raised.\n\nFunctions:\nlogistic Logistic growth model.\ngompertz Gompertz growth model.\nrichards Richards growth model.\nguess_plateau Guess the plateau point to improve sigmoid fitting.\nguess_lag Guess the lag point to improve sigmoid fitting.\nfit Sigmoid functions fit.\nget_area Calculate the area under the PM curve.\n\"\"\"\n\nimport numpy as np\n\ntry:\n from scipy.optimize.minpack import curve_fit\n from scipy.integrate import trapz\nexcept ImportError:\n from Bio import MissingPythonDependencyError\n\n raise MissingPythonDependencyError(\"Install scipy to extract curve parameters.\")\n\n\ndef logistic(x, A, u, d, v, y0):\n \"\"\"Logistic growth model.\n\n Proposed in Zwietering et al., 1990 (PMID: 16348228)\n \"\"\"\n y = (A / (1 + np.exp((((4 * u) / A) * (d - x)) + 2))) + y0\n return y\n\n\ndef gompertz(x, A, u, d, v, y0):\n \"\"\"Gompertz growth model.\n\n Proposed in Zwietering et al., 1990 (PMID: 16348228)\n \"\"\"\n y = (A * np.exp(-np.exp((((u * np.e) / A) * (d - x)) + 1))) + y0\n return y\n\n\ndef richards(x, A, u, d, v, y0):\n \"\"\"Richards growth model (equivalent to Stannard).\n\n Proposed in Zwietering et al., 1990 (PMID: 16348228)\n \"\"\"\n y = (\n A\n * pow(\n 1\n + (\n v\n + (np.exp(1 + v) * np.exp((u / A) * (1 + v) * (1 + (1 / v)) * (d - x)))\n ),\n -(1 / v),\n )\n ) + y0\n return y\n\n\ndef guess_lag(x, y):\n \"\"\"Given two axes returns a guess of the lag point.\n\n The lag point is defined as the x point where the difference in y\n with the next point is higher then the mean differences between\n the points plus one standard deviation. If such point is not found\n or x and y have different lengths the function returns zero.\n \"\"\"\n if len(x) != len(y):\n return 0\n\n diffs = []\n indexes = range(len(x))\n\n for i in indexes:\n if i + 1 not in indexes:\n continue\n diffs.append(y[i + 1] - y[i])\n diffs = np.array(diffs)\n\n flex = x[-1]\n for i in indexes:\n if i + 1 not in indexes:\n continue\n if (y[i + 1] - y[i]) > (diffs.mean() + (diffs.std())):\n flex = x[i]\n break\n\n return flex\n\n\ndef guess_plateau(x, y):\n \"\"\"Given two axes returns a guess of the plateau point.\n\n The plateau point is defined as the x point where the y point\n is near one standard deviation of the differences between the y points to\n the maximum y value. If such point is not found or x and y have\n different lengths the function returns zero.\n \"\"\"\n if len(x) != len(y):\n return 0\n\n diffs = []\n indexes = range(len(y))\n\n for i in indexes:\n if i + 1 not in indexes:\n continue\n diffs.append(y[i + 1] - y[i])\n diffs = np.array(diffs)\n\n ymax = y[-1]\n for i in indexes:\n if y[i] > (ymax - diffs.std()) and y[i] < (ymax + diffs.std()):\n ymax = y[i]\n break\n\n return ymax\n\n\ndef fit(function, x, y):\n \"\"\"Fit the provided function to the x and y values.\n\n The function parameters and the parameters covariance.\n \"\"\"\n # Compute guesses for the parameters\n # This is necessary to get significant fits\n p0 = [guess_plateau(x, y), 4.0, guess_lag(x, y), 0.1, min(y)]\n\n params, pcov = curve_fit(function, x, y, p0=p0)\n return params, pcov\n\n\ndef get_area(y, x):\n \"\"\"Get the area under the curve.\"\"\"\n return trapz(y=y, x=x)\n", "path": "Bio/phenotype/pm_fitting.py"}]}
| 2,536 | 246 |
gh_patches_debug_9374
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-1422
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Key versioning/key rotation support (wishlist)
It would be nice if Fernet had built-in support for key versioning. I don't know exactly what the API would look like, but the idea would be to allow encrypting new data under a new key while still retaining the ability to decrypt old data that was encrypted under older keys.
If Fernet supported this natively, then I could tell developers to "just use Fernet" when they first write their applications, and later when they get a real security engineer, that person could start doing key rotation without having to change application-level code or data structures.
</issue>
<code>
[start of cryptography/fernet.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 import base64
17 import binascii
18 import os
19 import struct
20 import time
21
22 import six
23
24 from cryptography.exceptions import InvalidSignature
25 from cryptography.hazmat.backends import default_backend
26 from cryptography.hazmat.primitives import hashes, padding
27 from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
28 from cryptography.hazmat.primitives.hmac import HMAC
29
30
31 class InvalidToken(Exception):
32 pass
33
34
35 _MAX_CLOCK_SKEW = 60
36
37
38 class Fernet(object):
39 def __init__(self, key, backend=None):
40 if backend is None:
41 backend = default_backend()
42
43 key = base64.urlsafe_b64decode(key)
44 if len(key) != 32:
45 raise ValueError(
46 "Fernet key must be 32 url-safe base64-encoded bytes."
47 )
48
49 self._signing_key = key[:16]
50 self._encryption_key = key[16:]
51 self._backend = backend
52
53 @classmethod
54 def generate_key(cls):
55 return base64.urlsafe_b64encode(os.urandom(32))
56
57 def encrypt(self, data):
58 current_time = int(time.time())
59 iv = os.urandom(16)
60 return self._encrypt_from_parts(data, current_time, iv)
61
62 def _encrypt_from_parts(self, data, current_time, iv):
63 if not isinstance(data, bytes):
64 raise TypeError("data must be bytes.")
65
66 padder = padding.PKCS7(algorithms.AES.block_size).padder()
67 padded_data = padder.update(data) + padder.finalize()
68 encryptor = Cipher(
69 algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend
70 ).encryptor()
71 ciphertext = encryptor.update(padded_data) + encryptor.finalize()
72
73 basic_parts = (
74 b"\x80" + struct.pack(">Q", current_time) + iv + ciphertext
75 )
76
77 h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)
78 h.update(basic_parts)
79 hmac = h.finalize()
80 return base64.urlsafe_b64encode(basic_parts + hmac)
81
82 def decrypt(self, token, ttl=None):
83 if not isinstance(token, bytes):
84 raise TypeError("token must be bytes.")
85
86 current_time = int(time.time())
87
88 try:
89 data = base64.urlsafe_b64decode(token)
90 except (TypeError, binascii.Error):
91 raise InvalidToken
92
93 if not data or six.indexbytes(data, 0) != 0x80:
94 raise InvalidToken
95
96 try:
97 timestamp, = struct.unpack(">Q", data[1:9])
98 except struct.error:
99 raise InvalidToken
100 if ttl is not None:
101 if timestamp + ttl < current_time:
102 raise InvalidToken
103 if current_time + _MAX_CLOCK_SKEW < timestamp:
104 raise InvalidToken
105 h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)
106 h.update(data[:-32])
107 try:
108 h.verify(data[-32:])
109 except InvalidSignature:
110 raise InvalidToken
111
112 iv = data[9:25]
113 ciphertext = data[25:-32]
114 decryptor = Cipher(
115 algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend
116 ).decryptor()
117 plaintext_padded = decryptor.update(ciphertext)
118 try:
119 plaintext_padded += decryptor.finalize()
120 except ValueError:
121 raise InvalidToken
122 unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()
123
124 unpadded = unpadder.update(plaintext_padded)
125 try:
126 unpadded += unpadder.finalize()
127 except ValueError:
128 raise InvalidToken
129 return unpadded
130
[end of cryptography/fernet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cryptography/fernet.py b/cryptography/fernet.py
--- a/cryptography/fernet.py
+++ b/cryptography/fernet.py
@@ -127,3 +127,24 @@
except ValueError:
raise InvalidToken
return unpadded
+
+
+class MultiFernet(object):
+ def __init__(self, fernets):
+ fernets = list(fernets)
+ if not fernets:
+ raise ValueError(
+ "MultiFernet requires at least one Fernet instance"
+ )
+ self._fernets = fernets
+
+ def encrypt(self, msg):
+ return self._fernets[0].encrypt(msg)
+
+ def decrypt(self, msg, ttl=None):
+ for f in self._fernets:
+ try:
+ return f.decrypt(msg, ttl)
+ except InvalidToken:
+ pass
+ raise InvalidToken
|
{"golden_diff": "diff --git a/cryptography/fernet.py b/cryptography/fernet.py\n--- a/cryptography/fernet.py\n+++ b/cryptography/fernet.py\n@@ -127,3 +127,24 @@\n except ValueError:\n raise InvalidToken\n return unpadded\n+\n+\n+class MultiFernet(object):\n+ def __init__(self, fernets):\n+ fernets = list(fernets)\n+ if not fernets:\n+ raise ValueError(\n+ \"MultiFernet requires at least one Fernet instance\"\n+ )\n+ self._fernets = fernets\n+\n+ def encrypt(self, msg):\n+ return self._fernets[0].encrypt(msg)\n+\n+ def decrypt(self, msg, ttl=None):\n+ for f in self._fernets:\n+ try:\n+ return f.decrypt(msg, ttl)\n+ except InvalidToken:\n+ pass\n+ raise InvalidToken\n", "issue": "Key versioning/key rotation support (wishlist)\nIt would be nice if Fernet had built-in support for key versioning. I don't know exactly what the API would look like, but the idea would be to allow encrypting new data under a new key while still retaining the ability to decrypt old data that was encrypted under older keys.\n\nIf Fernet supported this natively, then I could tell developers to \"just use Fernet\" when they first write their applications, and later when they get a real security engineer, that person could start doing key rotation without having to change application-level code or data structures.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport base64\nimport binascii\nimport os\nimport struct\nimport time\n\nimport six\n\nfrom cryptography.exceptions import InvalidSignature\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes, padding\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.primitives.hmac import HMAC\n\n\nclass InvalidToken(Exception):\n pass\n\n\n_MAX_CLOCK_SKEW = 60\n\n\nclass Fernet(object):\n def __init__(self, key, backend=None):\n if backend is None:\n backend = default_backend()\n\n key = base64.urlsafe_b64decode(key)\n if len(key) != 32:\n raise ValueError(\n \"Fernet key must be 32 url-safe base64-encoded bytes.\"\n )\n\n self._signing_key = key[:16]\n self._encryption_key = key[16:]\n self._backend = backend\n\n @classmethod\n def generate_key(cls):\n return base64.urlsafe_b64encode(os.urandom(32))\n\n def encrypt(self, data):\n current_time = int(time.time())\n iv = os.urandom(16)\n return self._encrypt_from_parts(data, current_time, iv)\n\n def _encrypt_from_parts(self, data, current_time, iv):\n if not isinstance(data, bytes):\n raise TypeError(\"data must be bytes.\")\n\n padder = padding.PKCS7(algorithms.AES.block_size).padder()\n padded_data = padder.update(data) + padder.finalize()\n encryptor = Cipher(\n algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend\n ).encryptor()\n ciphertext = encryptor.update(padded_data) + encryptor.finalize()\n\n basic_parts = (\n b\"\\x80\" + struct.pack(\">Q\", current_time) + iv + ciphertext\n )\n\n h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)\n h.update(basic_parts)\n hmac = h.finalize()\n return base64.urlsafe_b64encode(basic_parts + hmac)\n\n def decrypt(self, token, ttl=None):\n if not isinstance(token, bytes):\n raise TypeError(\"token must be bytes.\")\n\n current_time = int(time.time())\n\n try:\n data = base64.urlsafe_b64decode(token)\n except (TypeError, binascii.Error):\n raise InvalidToken\n\n if not data or six.indexbytes(data, 0) != 0x80:\n raise InvalidToken\n\n try:\n timestamp, = struct.unpack(\">Q\", data[1:9])\n except struct.error:\n raise InvalidToken\n if ttl is not None:\n if timestamp + ttl < current_time:\n raise InvalidToken\n if current_time + _MAX_CLOCK_SKEW < timestamp:\n raise InvalidToken\n h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)\n h.update(data[:-32])\n try:\n h.verify(data[-32:])\n except InvalidSignature:\n raise InvalidToken\n\n iv = data[9:25]\n ciphertext = data[25:-32]\n decryptor = Cipher(\n algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend\n ).decryptor()\n plaintext_padded = decryptor.update(ciphertext)\n try:\n plaintext_padded += decryptor.finalize()\n except ValueError:\n raise InvalidToken\n unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()\n\n unpadded = unpadder.update(plaintext_padded)\n try:\n unpadded += unpadder.finalize()\n except ValueError:\n raise InvalidToken\n return unpadded\n", "path": "cryptography/fernet.py"}]}
| 1,914 | 207 |
gh_patches_debug_24010
|
rasdani/github-patches
|
git_diff
|
jazzband__pip-tools-798
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--cert option for pip-sync
pip-sync is missing the `--cert` and `--client-cert` options, that are (somehow, see #712) implemented for pip-compile.
</issue>
<code>
[start of piptools/scripts/sync.py]
1 # coding: utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import os
5 import sys
6
7 from .. import click, sync
8 from .._compat import get_installed_distributions, parse_requirements
9 from ..exceptions import PipToolsError
10 from ..logging import log
11 from ..utils import flat_map
12
13 DEFAULT_REQUIREMENTS_FILE = "requirements.txt"
14
15
16 @click.command()
17 @click.version_option()
18 @click.option(
19 "-n",
20 "--dry-run",
21 is_flag=True,
22 help="Only show what would happen, don't change anything",
23 )
24 @click.option("--force", is_flag=True, help="Proceed even if conflicts are found")
25 @click.option(
26 "-f",
27 "--find-links",
28 multiple=True,
29 help="Look for archives in this directory or on this HTML page",
30 envvar="PIP_FIND_LINKS",
31 )
32 @click.option(
33 "-i",
34 "--index-url",
35 help="Change index URL (defaults to PyPI)",
36 envvar="PIP_INDEX_URL",
37 )
38 @click.option(
39 "--extra-index-url",
40 multiple=True,
41 help="Add additional index URL to search",
42 envvar="PIP_EXTRA_INDEX_URL",
43 )
44 @click.option(
45 "--trusted-host",
46 multiple=True,
47 help="Mark this host as trusted, even though it does not have valid or any HTTPS.",
48 )
49 @click.option(
50 "--no-index",
51 is_flag=True,
52 help="Ignore package index (only looking at --find-links URLs instead)",
53 )
54 @click.option("-q", "--quiet", default=False, is_flag=True, help="Give less output")
55 @click.option(
56 "--user", "user_only", is_flag=True, help="Restrict attention to user directory"
57 )
58 @click.argument("src_files", required=False, type=click.Path(exists=True), nargs=-1)
59 def cli(
60 dry_run,
61 force,
62 find_links,
63 index_url,
64 extra_index_url,
65 trusted_host,
66 no_index,
67 quiet,
68 user_only,
69 src_files,
70 ):
71 """Synchronize virtual environment with requirements.txt."""
72 if not src_files:
73 if os.path.exists(DEFAULT_REQUIREMENTS_FILE):
74 src_files = (DEFAULT_REQUIREMENTS_FILE,)
75 else:
76 msg = "No requirement files given and no {} found in the current directory"
77 log.error(msg.format(DEFAULT_REQUIREMENTS_FILE))
78 sys.exit(2)
79
80 if any(src_file.endswith(".in") for src_file in src_files):
81 msg = (
82 "Some input files have the .in extension, which is most likely an error "
83 "and can cause weird behaviour. You probably meant to use "
84 "the corresponding *.txt file?"
85 )
86 if force:
87 log.warning("WARNING: " + msg)
88 else:
89 log.error("ERROR: " + msg)
90 sys.exit(2)
91
92 requirements = flat_map(
93 lambda src: parse_requirements(src, session=True), src_files
94 )
95
96 try:
97 requirements = sync.merge(requirements, ignore_conflicts=force)
98 except PipToolsError as e:
99 log.error(str(e))
100 sys.exit(2)
101
102 installed_dists = get_installed_distributions(skip=[], user_only=user_only)
103 to_install, to_uninstall = sync.diff(requirements, installed_dists)
104
105 install_flags = []
106 for link in find_links or []:
107 install_flags.extend(["-f", link])
108 if no_index:
109 install_flags.append("--no-index")
110 if index_url:
111 install_flags.extend(["-i", index_url])
112 if extra_index_url:
113 for extra_index in extra_index_url:
114 install_flags.extend(["--extra-index-url", extra_index])
115 if trusted_host:
116 for host in trusted_host:
117 install_flags.extend(["--trusted-host", host])
118 if user_only:
119 install_flags.append("--user")
120
121 sys.exit(
122 sync.sync(
123 to_install,
124 to_uninstall,
125 verbose=(not quiet),
126 dry_run=dry_run,
127 install_flags=install_flags,
128 )
129 )
130
[end of piptools/scripts/sync.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/piptools/scripts/sync.py b/piptools/scripts/sync.py
--- a/piptools/scripts/sync.py
+++ b/piptools/scripts/sync.py
@@ -55,6 +55,12 @@
@click.option(
"--user", "user_only", is_flag=True, help="Restrict attention to user directory"
)
[email protected]("--cert", help="Path to alternate CA bundle.")
[email protected](
+ "--client-cert",
+ help="Path to SSL client certificate, a single file containing "
+ "the private key and the certificate in PEM format.",
+)
@click.argument("src_files", required=False, type=click.Path(exists=True), nargs=-1)
def cli(
dry_run,
@@ -66,6 +72,8 @@
no_index,
quiet,
user_only,
+ cert,
+ client_cert,
src_files,
):
"""Synchronize virtual environment with requirements.txt."""
@@ -117,6 +125,10 @@
install_flags.extend(["--trusted-host", host])
if user_only:
install_flags.append("--user")
+ if cert:
+ install_flags.extend(["--cert", cert])
+ if client_cert:
+ install_flags.extend(["--client-cert", client_cert])
sys.exit(
sync.sync(
|
{"golden_diff": "diff --git a/piptools/scripts/sync.py b/piptools/scripts/sync.py\n--- a/piptools/scripts/sync.py\n+++ b/piptools/scripts/sync.py\n@@ -55,6 +55,12 @@\n @click.option(\n \"--user\", \"user_only\", is_flag=True, help=\"Restrict attention to user directory\"\n )\[email protected](\"--cert\", help=\"Path to alternate CA bundle.\")\[email protected](\n+ \"--client-cert\",\n+ help=\"Path to SSL client certificate, a single file containing \"\n+ \"the private key and the certificate in PEM format.\",\n+)\n @click.argument(\"src_files\", required=False, type=click.Path(exists=True), nargs=-1)\n def cli(\n dry_run,\n@@ -66,6 +72,8 @@\n no_index,\n quiet,\n user_only,\n+ cert,\n+ client_cert,\n src_files,\n ):\n \"\"\"Synchronize virtual environment with requirements.txt.\"\"\"\n@@ -117,6 +125,10 @@\n install_flags.extend([\"--trusted-host\", host])\n if user_only:\n install_flags.append(\"--user\")\n+ if cert:\n+ install_flags.extend([\"--cert\", cert])\n+ if client_cert:\n+ install_flags.extend([\"--client-cert\", client_cert])\n \n sys.exit(\n sync.sync(\n", "issue": "--cert option for pip-sync\npip-sync is missing the `--cert` and `--client-cert` options, that are (somehow, see #712) implemented for pip-compile.\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport sys\n\nfrom .. import click, sync\nfrom .._compat import get_installed_distributions, parse_requirements\nfrom ..exceptions import PipToolsError\nfrom ..logging import log\nfrom ..utils import flat_map\n\nDEFAULT_REQUIREMENTS_FILE = \"requirements.txt\"\n\n\[email protected]()\[email protected]_option()\[email protected](\n \"-n\",\n \"--dry-run\",\n is_flag=True,\n help=\"Only show what would happen, don't change anything\",\n)\[email protected](\"--force\", is_flag=True, help=\"Proceed even if conflicts are found\")\[email protected](\n \"-f\",\n \"--find-links\",\n multiple=True,\n help=\"Look for archives in this directory or on this HTML page\",\n envvar=\"PIP_FIND_LINKS\",\n)\[email protected](\n \"-i\",\n \"--index-url\",\n help=\"Change index URL (defaults to PyPI)\",\n envvar=\"PIP_INDEX_URL\",\n)\[email protected](\n \"--extra-index-url\",\n multiple=True,\n help=\"Add additional index URL to search\",\n envvar=\"PIP_EXTRA_INDEX_URL\",\n)\[email protected](\n \"--trusted-host\",\n multiple=True,\n help=\"Mark this host as trusted, even though it does not have valid or any HTTPS.\",\n)\[email protected](\n \"--no-index\",\n is_flag=True,\n help=\"Ignore package index (only looking at --find-links URLs instead)\",\n)\[email protected](\"-q\", \"--quiet\", default=False, is_flag=True, help=\"Give less output\")\[email protected](\n \"--user\", \"user_only\", is_flag=True, help=\"Restrict attention to user directory\"\n)\[email protected](\"src_files\", required=False, type=click.Path(exists=True), nargs=-1)\ndef cli(\n dry_run,\n force,\n find_links,\n index_url,\n extra_index_url,\n trusted_host,\n no_index,\n quiet,\n user_only,\n src_files,\n):\n \"\"\"Synchronize virtual environment with requirements.txt.\"\"\"\n if not src_files:\n if os.path.exists(DEFAULT_REQUIREMENTS_FILE):\n src_files = (DEFAULT_REQUIREMENTS_FILE,)\n else:\n msg = \"No requirement files given and no {} found in the current directory\"\n log.error(msg.format(DEFAULT_REQUIREMENTS_FILE))\n sys.exit(2)\n\n if any(src_file.endswith(\".in\") for src_file in src_files):\n msg = (\n \"Some input files have the .in extension, which is most likely an error \"\n \"and can cause weird behaviour. You probably meant to use \"\n \"the corresponding *.txt file?\"\n )\n if force:\n log.warning(\"WARNING: \" + msg)\n else:\n log.error(\"ERROR: \" + msg)\n sys.exit(2)\n\n requirements = flat_map(\n lambda src: parse_requirements(src, session=True), src_files\n )\n\n try:\n requirements = sync.merge(requirements, ignore_conflicts=force)\n except PipToolsError as e:\n log.error(str(e))\n sys.exit(2)\n\n installed_dists = get_installed_distributions(skip=[], user_only=user_only)\n to_install, to_uninstall = sync.diff(requirements, installed_dists)\n\n install_flags = []\n for link in find_links or []:\n install_flags.extend([\"-f\", link])\n if no_index:\n install_flags.append(\"--no-index\")\n if index_url:\n install_flags.extend([\"-i\", index_url])\n if extra_index_url:\n for extra_index in extra_index_url:\n install_flags.extend([\"--extra-index-url\", extra_index])\n if trusted_host:\n for host in trusted_host:\n install_flags.extend([\"--trusted-host\", host])\n if user_only:\n install_flags.append(\"--user\")\n\n sys.exit(\n sync.sync(\n to_install,\n to_uninstall,\n verbose=(not quiet),\n dry_run=dry_run,\n install_flags=install_flags,\n )\n )\n", "path": "piptools/scripts/sync.py"}]}
| 1,717 | 293 |
gh_patches_debug_24380
|
rasdani/github-patches
|
git_diff
|
getredash__redash-4624
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Databricks Data Source Broken
<!--
We use GitHub only for bug reports 🐛
Anything else should be posted to https://discuss.redash.io 👫
🚨For support, help & questions use https://discuss.redash.io/c/support
💡For feature requests & ideas use https://discuss.redash.io/c/feature-requests
**Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key.
-->
### Issue Summary
Databricks data source does not work. The authorization token for Databricks data source needs to be converted into a byte string as it currently raises `TypeError: a bytes-like object is required, not 'str'`
Calling `.encode()` to transform to a byte string makes the data source work.
https://github.com/getredash/redash/blob/b089f5f0eff9b047c093dcc7abbd0ae5bfcf643c/redash/query_runner/databricks.py#L51-L52
### Steps to Reproduce
1. Create a Databricks data source
2. Run test connection and you will get the message: a bytes-like object is required, not 'str'
Any other info e.g. Why do you consider this to be a bug? What did you expect to happen instead?
### Technical details:
* Redash Version: master
* Browser/OS: Chrome
* How did you install Redash: Docker development environment
</issue>
<code>
[start of redash/query_runner/hive_ds.py]
1 import logging
2 import sys
3 import base64
4
5 from redash.query_runner import *
6 from redash.utils import json_dumps
7
8 logger = logging.getLogger(__name__)
9
10 try:
11 from pyhive import hive
12 from pyhive.exc import DatabaseError
13 from thrift.transport import THttpClient
14
15 enabled = True
16 except ImportError:
17 enabled = False
18
19 COLUMN_NAME = 0
20 COLUMN_TYPE = 1
21
22 types_map = {
23 "BIGINT_TYPE": TYPE_INTEGER,
24 "TINYINT_TYPE": TYPE_INTEGER,
25 "SMALLINT_TYPE": TYPE_INTEGER,
26 "INT_TYPE": TYPE_INTEGER,
27 "DOUBLE_TYPE": TYPE_FLOAT,
28 "DECIMAL_TYPE": TYPE_FLOAT,
29 "FLOAT_TYPE": TYPE_FLOAT,
30 "REAL_TYPE": TYPE_FLOAT,
31 "BOOLEAN_TYPE": TYPE_BOOLEAN,
32 "TIMESTAMP_TYPE": TYPE_DATETIME,
33 "DATE_TYPE": TYPE_DATE,
34 "CHAR_TYPE": TYPE_STRING,
35 "STRING_TYPE": TYPE_STRING,
36 "VARCHAR_TYPE": TYPE_STRING,
37 }
38
39
40 class Hive(BaseSQLQueryRunner):
41 should_annotate_query = False
42 noop_query = "SELECT 1"
43
44 @classmethod
45 def configuration_schema(cls):
46 return {
47 "type": "object",
48 "properties": {
49 "host": {"type": "string"},
50 "port": {"type": "number"},
51 "database": {"type": "string"},
52 "username": {"type": "string"},
53 },
54 "order": ["host", "port", "database", "username"],
55 "required": ["host"],
56 }
57
58 @classmethod
59 def type(cls):
60 return "hive"
61
62 @classmethod
63 def enabled(cls):
64 return enabled
65
66 def _get_tables(self, schema):
67 schemas_query = "show schemas"
68
69 tables_query = "show tables in %s"
70
71 columns_query = "show columns in %s.%s"
72
73 for schema_name in [
74 a
75 for a in [
76 str(a["database_name"]) for a in self._run_query_internal(schemas_query)
77 ]
78 if len(a) > 0
79 ]:
80 for table_name in [
81 a
82 for a in [
83 str(a["tab_name"])
84 for a in self._run_query_internal(tables_query % schema_name)
85 ]
86 if len(a) > 0
87 ]:
88 columns = [
89 a
90 for a in [
91 str(a["field"])
92 for a in self._run_query_internal(
93 columns_query % (schema_name, table_name)
94 )
95 ]
96 if len(a) > 0
97 ]
98
99 if schema_name != "default":
100 table_name = "{}.{}".format(schema_name, table_name)
101
102 schema[table_name] = {"name": table_name, "columns": columns}
103 return list(schema.values())
104
105 def _get_connection(self):
106 host = self.configuration["host"]
107
108 connection = hive.connect(
109 host=host,
110 port=self.configuration.get("port", None),
111 database=self.configuration.get("database", "default"),
112 username=self.configuration.get("username", None),
113 )
114
115 return connection
116
117 def run_query(self, query, user):
118 connection = None
119 try:
120 connection = self._get_connection()
121 cursor = connection.cursor()
122
123 cursor.execute(query)
124
125 column_names = []
126 columns = []
127
128 for column in cursor.description:
129 column_name = column[COLUMN_NAME]
130 column_names.append(column_name)
131
132 columns.append(
133 {
134 "name": column_name,
135 "friendly_name": column_name,
136 "type": types_map.get(column[COLUMN_TYPE], None),
137 }
138 )
139
140 rows = [dict(zip(column_names, row)) for row in cursor]
141
142 data = {"columns": columns, "rows": rows}
143 json_data = json_dumps(data)
144 error = None
145 except KeyboardInterrupt:
146 if connection:
147 connection.cancel()
148 error = "Query cancelled by user."
149 json_data = None
150 except DatabaseError as e:
151 try:
152 error = e.args[0].status.errorMessage
153 except AttributeError:
154 error = str(e)
155 json_data = None
156 finally:
157 if connection:
158 connection.close()
159
160 return json_data, error
161
162
163 class HiveHttp(Hive):
164 @classmethod
165 def name(cls):
166 return "Hive (HTTP)"
167
168 @classmethod
169 def type(cls):
170 return "hive_http"
171
172 @classmethod
173 def configuration_schema(cls):
174 return {
175 "type": "object",
176 "properties": {
177 "host": {"type": "string"},
178 "port": {"type": "number"},
179 "database": {"type": "string"},
180 "username": {"type": "string"},
181 "http_scheme": {
182 "type": "string",
183 "title": "HTTP Scheme (http or https)",
184 "default": "https",
185 },
186 "http_path": {"type": "string", "title": "HTTP Path"},
187 "http_password": {"type": "string", "title": "Password"},
188 },
189 "order": [
190 "host",
191 "port",
192 "http_path",
193 "username",
194 "http_password",
195 "database",
196 "http_scheme",
197 ],
198 "secret": ["http_password"],
199 "required": ["host", "http_path"],
200 }
201
202 def _get_connection(self):
203 host = self.configuration["host"]
204
205 scheme = self.configuration.get("http_scheme", "https")
206
207 # if path is set but is missing initial slash, append it
208 path = self.configuration.get("http_path", "")
209 if path and path[0] != "/":
210 path = "/" + path
211
212 # if port is set prepend colon
213 port = self.configuration.get("port", "")
214 if port:
215 port = ":" + str(port)
216
217 http_uri = "{}://{}{}{}".format(scheme, host, port, path)
218
219 # create transport
220 transport = THttpClient.THttpClient(http_uri)
221
222 # if username or password is set, add Authorization header
223 username = self.configuration.get("username", "")
224 password = self.configuration.get("http_password", "")
225 if username or password:
226 auth = base64.b64encode(username + ":" + password)
227 transport.setCustomHeaders({"Authorization": "Basic " + auth})
228
229 # create connection
230 connection = hive.connect(thrift_transport=transport)
231
232 return connection
233
234
235 register(Hive)
236 register(HiveHttp)
237
[end of redash/query_runner/hive_ds.py]
[start of redash/query_runner/databricks.py]
1 import base64
2 from .hive_ds import Hive
3 from redash.query_runner import register
4
5 try:
6 from pyhive import hive
7 from thrift.transport import THttpClient
8
9 enabled = True
10 except ImportError:
11 enabled = False
12
13
14 class Databricks(Hive):
15 @classmethod
16 def type(cls):
17 return "databricks"
18
19 @classmethod
20 def enabled(cls):
21 return enabled
22
23 @classmethod
24 def configuration_schema(cls):
25 return {
26 "type": "object",
27 "properties": {
28 "host": {"type": "string"},
29 "database": {"type": "string"},
30 "http_path": {"type": "string", "title": "HTTP Path"},
31 "http_password": {"type": "string", "title": "Access Token"},
32 },
33 "order": ["host", "http_path", "http_password", "database"],
34 "secret": ["http_password"],
35 "required": ["host", "database", "http_path", "http_password"],
36 }
37
38 def _get_connection(self):
39 host = self.configuration["host"]
40
41 # if path is set but is missing initial slash, append it
42 path = self.configuration.get("http_path", "")
43 if path and path[0] != "/":
44 path = "/" + path
45
46 http_uri = "https://{}{}".format(host, path)
47
48 transport = THttpClient.THttpClient(http_uri)
49
50 password = self.configuration.get("http_password", "")
51 auth = base64.b64encode("token:" + password)
52 transport.setCustomHeaders({"Authorization": "Basic " + auth})
53
54 connection = hive.connect(thrift_transport=transport)
55 return connection
56
57 def _get_tables(self, schema):
58 schemas_query = "show schemas"
59 tables_query = "show tables in %s"
60 columns_query = "show columns in %s.%s"
61
62 schemas = self._run_query_internal(schemas_query)
63
64 for schema_name in [
65 a for a in [str(a["databaseName"]) for a in schemas] if len(a) > 0
66 ]:
67 for table_name in [
68 a
69 for a in [
70 str(a["tableName"])
71 for a in self._run_query_internal(tables_query % schema_name)
72 ]
73 if len(a) > 0
74 ]:
75 columns = [
76 a
77 for a in [
78 str(a["col_name"])
79 for a in self._run_query_internal(
80 columns_query % (schema_name, table_name)
81 )
82 ]
83 if len(a) > 0
84 ]
85
86 if schema_name != "default":
87 table_name = "{}.{}".format(schema_name, table_name)
88
89 schema[table_name] = {"name": table_name, "columns": columns}
90 return list(schema.values())
91
92
93 register(Databricks)
94
[end of redash/query_runner/databricks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/redash/query_runner/databricks.py b/redash/query_runner/databricks.py
--- a/redash/query_runner/databricks.py
+++ b/redash/query_runner/databricks.py
@@ -48,8 +48,8 @@
transport = THttpClient.THttpClient(http_uri)
password = self.configuration.get("http_password", "")
- auth = base64.b64encode("token:" + password)
- transport.setCustomHeaders({"Authorization": "Basic " + auth})
+ auth = base64.b64encode(b"token:" + password.encode("ascii"))
+ transport.setCustomHeaders({"Authorization": "Basic " + auth.decode()})
connection = hive.connect(thrift_transport=transport)
return connection
diff --git a/redash/query_runner/hive_ds.py b/redash/query_runner/hive_ds.py
--- a/redash/query_runner/hive_ds.py
+++ b/redash/query_runner/hive_ds.py
@@ -223,8 +223,8 @@
username = self.configuration.get("username", "")
password = self.configuration.get("http_password", "")
if username or password:
- auth = base64.b64encode(username + ":" + password)
- transport.setCustomHeaders({"Authorization": "Basic " + auth})
+ auth = base64.b64encode(username.encode("ascii") + b":" + password.encode("ascii"))
+ transport.setCustomHeaders({"Authorization": "Basic " + auth.decode()})
# create connection
connection = hive.connect(thrift_transport=transport)
|
{"golden_diff": "diff --git a/redash/query_runner/databricks.py b/redash/query_runner/databricks.py\n--- a/redash/query_runner/databricks.py\n+++ b/redash/query_runner/databricks.py\n@@ -48,8 +48,8 @@\n transport = THttpClient.THttpClient(http_uri)\n \n password = self.configuration.get(\"http_password\", \"\")\n- auth = base64.b64encode(\"token:\" + password)\n- transport.setCustomHeaders({\"Authorization\": \"Basic \" + auth})\n+ auth = base64.b64encode(b\"token:\" + password.encode(\"ascii\"))\n+ transport.setCustomHeaders({\"Authorization\": \"Basic \" + auth.decode()})\n \n connection = hive.connect(thrift_transport=transport)\n return connection\ndiff --git a/redash/query_runner/hive_ds.py b/redash/query_runner/hive_ds.py\n--- a/redash/query_runner/hive_ds.py\n+++ b/redash/query_runner/hive_ds.py\n@@ -223,8 +223,8 @@\n username = self.configuration.get(\"username\", \"\")\n password = self.configuration.get(\"http_password\", \"\")\n if username or password:\n- auth = base64.b64encode(username + \":\" + password)\n- transport.setCustomHeaders({\"Authorization\": \"Basic \" + auth})\n+ auth = base64.b64encode(username.encode(\"ascii\") + b\":\" + password.encode(\"ascii\"))\n+ transport.setCustomHeaders({\"Authorization\": \"Basic \" + auth.decode()})\n \n # create connection\n connection = hive.connect(thrift_transport=transport)\n", "issue": "Databricks Data Source Broken\n<!--\r\n\r\nWe use GitHub only for bug reports \ud83d\udc1b\r\n\r\nAnything else should be posted to https://discuss.redash.io \ud83d\udc6b\r\n\r\n\ud83d\udea8For support, help & questions use https://discuss.redash.io/c/support\r\n\ud83d\udca1For feature requests & ideas use https://discuss.redash.io/c/feature-requests\r\n\r\n**Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key.\r\n\r\n-->\r\n\r\n### Issue Summary\r\nDatabricks data source does not work. The authorization token for Databricks data source needs to be converted into a byte string as it currently raises `TypeError: a bytes-like object is required, not 'str'`\r\nCalling `.encode()` to transform to a byte string makes the data source work.\r\n\r\nhttps://github.com/getredash/redash/blob/b089f5f0eff9b047c093dcc7abbd0ae5bfcf643c/redash/query_runner/databricks.py#L51-L52\r\n\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create a Databricks data source\r\n2. Run test connection and you will get the message: a bytes-like object is required, not 'str'\r\n\r\nAny other info e.g. Why do you consider this to be a bug? What did you expect to happen instead?\r\n\r\n### Technical details:\r\n\r\n* Redash Version: master\r\n* Browser/OS: Chrome\r\n* How did you install Redash: Docker development environment\r\n\n", "before_files": [{"content": "import logging\nimport sys\nimport base64\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps\n\nlogger = logging.getLogger(__name__)\n\ntry:\n from pyhive import hive\n from pyhive.exc import DatabaseError\n from thrift.transport import THttpClient\n\n enabled = True\nexcept ImportError:\n enabled = False\n\nCOLUMN_NAME = 0\nCOLUMN_TYPE = 1\n\ntypes_map = {\n \"BIGINT_TYPE\": TYPE_INTEGER,\n \"TINYINT_TYPE\": TYPE_INTEGER,\n \"SMALLINT_TYPE\": TYPE_INTEGER,\n \"INT_TYPE\": TYPE_INTEGER,\n \"DOUBLE_TYPE\": TYPE_FLOAT,\n \"DECIMAL_TYPE\": TYPE_FLOAT,\n \"FLOAT_TYPE\": TYPE_FLOAT,\n \"REAL_TYPE\": TYPE_FLOAT,\n \"BOOLEAN_TYPE\": TYPE_BOOLEAN,\n \"TIMESTAMP_TYPE\": TYPE_DATETIME,\n \"DATE_TYPE\": TYPE_DATE,\n \"CHAR_TYPE\": TYPE_STRING,\n \"STRING_TYPE\": TYPE_STRING,\n \"VARCHAR_TYPE\": TYPE_STRING,\n}\n\n\nclass Hive(BaseSQLQueryRunner):\n should_annotate_query = False\n noop_query = \"SELECT 1\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"host\": {\"type\": \"string\"},\n \"port\": {\"type\": \"number\"},\n \"database\": {\"type\": \"string\"},\n \"username\": {\"type\": \"string\"},\n },\n \"order\": [\"host\", \"port\", \"database\", \"username\"],\n \"required\": [\"host\"],\n }\n\n @classmethod\n def type(cls):\n return \"hive\"\n\n @classmethod\n def enabled(cls):\n return enabled\n\n def _get_tables(self, schema):\n schemas_query = \"show schemas\"\n\n tables_query = \"show tables in %s\"\n\n columns_query = \"show columns in %s.%s\"\n\n for schema_name in [\n a\n for a in [\n str(a[\"database_name\"]) for a in self._run_query_internal(schemas_query)\n ]\n if len(a) > 0\n ]:\n for table_name in [\n a\n for a in [\n str(a[\"tab_name\"])\n for a in self._run_query_internal(tables_query % schema_name)\n ]\n if len(a) > 0\n ]:\n columns = [\n a\n for a in [\n str(a[\"field\"])\n for a in self._run_query_internal(\n columns_query % (schema_name, table_name)\n )\n ]\n if len(a) > 0\n ]\n\n if schema_name != \"default\":\n table_name = \"{}.{}\".format(schema_name, table_name)\n\n schema[table_name] = {\"name\": table_name, \"columns\": columns}\n return list(schema.values())\n\n def _get_connection(self):\n host = self.configuration[\"host\"]\n\n connection = hive.connect(\n host=host,\n port=self.configuration.get(\"port\", None),\n database=self.configuration.get(\"database\", \"default\"),\n username=self.configuration.get(\"username\", None),\n )\n\n return connection\n\n def run_query(self, query, user):\n connection = None\n try:\n connection = self._get_connection()\n cursor = connection.cursor()\n\n cursor.execute(query)\n\n column_names = []\n columns = []\n\n for column in cursor.description:\n column_name = column[COLUMN_NAME]\n column_names.append(column_name)\n\n columns.append(\n {\n \"name\": column_name,\n \"friendly_name\": column_name,\n \"type\": types_map.get(column[COLUMN_TYPE], None),\n }\n )\n\n rows = [dict(zip(column_names, row)) for row in cursor]\n\n data = {\"columns\": columns, \"rows\": rows}\n json_data = json_dumps(data)\n error = None\n except KeyboardInterrupt:\n if connection:\n connection.cancel()\n error = \"Query cancelled by user.\"\n json_data = None\n except DatabaseError as e:\n try:\n error = e.args[0].status.errorMessage\n except AttributeError:\n error = str(e)\n json_data = None\n finally:\n if connection:\n connection.close()\n\n return json_data, error\n\n\nclass HiveHttp(Hive):\n @classmethod\n def name(cls):\n return \"Hive (HTTP)\"\n\n @classmethod\n def type(cls):\n return \"hive_http\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"host\": {\"type\": \"string\"},\n \"port\": {\"type\": \"number\"},\n \"database\": {\"type\": \"string\"},\n \"username\": {\"type\": \"string\"},\n \"http_scheme\": {\n \"type\": \"string\",\n \"title\": \"HTTP Scheme (http or https)\",\n \"default\": \"https\",\n },\n \"http_path\": {\"type\": \"string\", \"title\": \"HTTP Path\"},\n \"http_password\": {\"type\": \"string\", \"title\": \"Password\"},\n },\n \"order\": [\n \"host\",\n \"port\",\n \"http_path\",\n \"username\",\n \"http_password\",\n \"database\",\n \"http_scheme\",\n ],\n \"secret\": [\"http_password\"],\n \"required\": [\"host\", \"http_path\"],\n }\n\n def _get_connection(self):\n host = self.configuration[\"host\"]\n\n scheme = self.configuration.get(\"http_scheme\", \"https\")\n\n # if path is set but is missing initial slash, append it\n path = self.configuration.get(\"http_path\", \"\")\n if path and path[0] != \"/\":\n path = \"/\" + path\n\n # if port is set prepend colon\n port = self.configuration.get(\"port\", \"\")\n if port:\n port = \":\" + str(port)\n\n http_uri = \"{}://{}{}{}\".format(scheme, host, port, path)\n\n # create transport\n transport = THttpClient.THttpClient(http_uri)\n\n # if username or password is set, add Authorization header\n username = self.configuration.get(\"username\", \"\")\n password = self.configuration.get(\"http_password\", \"\")\n if username or password:\n auth = base64.b64encode(username + \":\" + password)\n transport.setCustomHeaders({\"Authorization\": \"Basic \" + auth})\n\n # create connection\n connection = hive.connect(thrift_transport=transport)\n\n return connection\n\n\nregister(Hive)\nregister(HiveHttp)\n", "path": "redash/query_runner/hive_ds.py"}, {"content": "import base64\nfrom .hive_ds import Hive\nfrom redash.query_runner import register\n\ntry:\n from pyhive import hive\n from thrift.transport import THttpClient\n\n enabled = True\nexcept ImportError:\n enabled = False\n\n\nclass Databricks(Hive):\n @classmethod\n def type(cls):\n return \"databricks\"\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"host\": {\"type\": \"string\"},\n \"database\": {\"type\": \"string\"},\n \"http_path\": {\"type\": \"string\", \"title\": \"HTTP Path\"},\n \"http_password\": {\"type\": \"string\", \"title\": \"Access Token\"},\n },\n \"order\": [\"host\", \"http_path\", \"http_password\", \"database\"],\n \"secret\": [\"http_password\"],\n \"required\": [\"host\", \"database\", \"http_path\", \"http_password\"],\n }\n\n def _get_connection(self):\n host = self.configuration[\"host\"]\n\n # if path is set but is missing initial slash, append it\n path = self.configuration.get(\"http_path\", \"\")\n if path and path[0] != \"/\":\n path = \"/\" + path\n\n http_uri = \"https://{}{}\".format(host, path)\n\n transport = THttpClient.THttpClient(http_uri)\n\n password = self.configuration.get(\"http_password\", \"\")\n auth = base64.b64encode(\"token:\" + password)\n transport.setCustomHeaders({\"Authorization\": \"Basic \" + auth})\n\n connection = hive.connect(thrift_transport=transport)\n return connection\n\n def _get_tables(self, schema):\n schemas_query = \"show schemas\"\n tables_query = \"show tables in %s\"\n columns_query = \"show columns in %s.%s\"\n\n schemas = self._run_query_internal(schemas_query)\n\n for schema_name in [\n a for a in [str(a[\"databaseName\"]) for a in schemas] if len(a) > 0\n ]:\n for table_name in [\n a\n for a in [\n str(a[\"tableName\"])\n for a in self._run_query_internal(tables_query % schema_name)\n ]\n if len(a) > 0\n ]:\n columns = [\n a\n for a in [\n str(a[\"col_name\"])\n for a in self._run_query_internal(\n columns_query % (schema_name, table_name)\n )\n ]\n if len(a) > 0\n ]\n\n if schema_name != \"default\":\n table_name = \"{}.{}\".format(schema_name, table_name)\n\n schema[table_name] = {\"name\": table_name, \"columns\": columns}\n return list(schema.values())\n\n\nregister(Databricks)\n", "path": "redash/query_runner/databricks.py"}]}
| 3,727 | 338 |
gh_patches_debug_11925
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1155
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RemovedInDjango41Warning
**Describe the bug**: ...
```shell
../../../../.pyenv/versions/3.8.2/lib/python3.8/site-packages/django/apps/registry.py:91
/Users/mingyu.wu/.pyenv/versions/3.8.2/lib/python3.8/site-packages/django/apps/registry.py:91: RemovedInDjango41Warning: 'elasticapm.contrib.django' defines default_app_config = 'elasticapm.contrib.django.apps.ElasticAPMConfig'. Django now detects this configuration automatically. You can remove default_app_config.
app_config = AppConfig.create(entry)
-- Docs: https://docs.pytest.org/en/stable/warnings.html
```
**To Reproduce**
1. run unit test or start server
**Environment (please complete the following information)**
- OS: Linux/MacOS
- Python version:3.8.2
- Framework and version: Django 3.2.2
- APM Server version: *
- Agent version: 6.1.3
</issue>
<code>
[start of elasticapm/contrib/django/__init__.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from elasticapm.contrib.django.client import * # noqa E401
32
33 default_app_config = "elasticapm.contrib.django.apps.ElasticAPMConfig"
34
[end of elasticapm/contrib/django/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticapm/contrib/django/__init__.py b/elasticapm/contrib/django/__init__.py
--- a/elasticapm/contrib/django/__init__.py
+++ b/elasticapm/contrib/django/__init__.py
@@ -27,7 +27,9 @@
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from django import VERSION as DJANGO_VERSION
from elasticapm.contrib.django.client import * # noqa E401
-default_app_config = "elasticapm.contrib.django.apps.ElasticAPMConfig"
+if DJANGO_VERSION < (3, 2):
+ default_app_config = "elasticapm.contrib.django.apps.ElasticAPMConfig"
|
{"golden_diff": "diff --git a/elasticapm/contrib/django/__init__.py b/elasticapm/contrib/django/__init__.py\n--- a/elasticapm/contrib/django/__init__.py\n+++ b/elasticapm/contrib/django/__init__.py\n@@ -27,7 +27,9 @@\n # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+from django import VERSION as DJANGO_VERSION\n \n from elasticapm.contrib.django.client import * # noqa E401\n \n-default_app_config = \"elasticapm.contrib.django.apps.ElasticAPMConfig\"\n+if DJANGO_VERSION < (3, 2):\n+ default_app_config = \"elasticapm.contrib.django.apps.ElasticAPMConfig\"\n", "issue": "RemovedInDjango41Warning\n**Describe the bug**: ...\r\n\r\n```shell\r\n../../../../.pyenv/versions/3.8.2/lib/python3.8/site-packages/django/apps/registry.py:91\r\n /Users/mingyu.wu/.pyenv/versions/3.8.2/lib/python3.8/site-packages/django/apps/registry.py:91: RemovedInDjango41Warning: 'elasticapm.contrib.django' defines default_app_config = 'elasticapm.contrib.django.apps.ElasticAPMConfig'. Django now detects this configuration automatically. You can remove default_app_config.\r\n app_config = AppConfig.create(entry)\r\n\r\n-- Docs: https://docs.pytest.org/en/stable/warnings.html\r\n```\r\n\r\n**To Reproduce**\r\n\r\n1. run unit test or start server\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux/MacOS\r\n- Python version:3.8.2\r\n- Framework and version: Django 3.2.2\r\n- APM Server version: *\r\n- Agent version: 6.1.3\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm.contrib.django.client import * # noqa E401\n\ndefault_app_config = \"elasticapm.contrib.django.apps.ElasticAPMConfig\"\n", "path": "elasticapm/contrib/django/__init__.py"}]}
| 1,215 | 202 |
gh_patches_debug_48094
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-6866
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
httpauth is not attached to replay request
#### Problem Description
I set mitmproxy to run in reverse mode as a proxy to real server, and then protect mitmproxy with a pair of user:pass in the proxyauth option. A regular request would go through, but a reply of that same request would return 401 Unauthorized
#### Steps to reproduce the behavior:
1. turn on reverse mode in mitmweb
2. set basic auth in proxyauth in 'username:pass' format
3. initiate a success request
4. replay the request
#### System Information
Mitmproxy: 10.1.5
Python: 3.11.6
OpenSSL: OpenSSL 3.1.4 24 Oct 2023
Platform: Linux-4.14.276-211.499.amzn2.x86_64-x86_64-with-glibc2.31
</issue>
<code>
[start of mitmproxy/addons/proxyauth.py]
1 from __future__ import annotations
2
3 import binascii
4 import weakref
5 from abc import ABC
6 from abc import abstractmethod
7 from collections.abc import MutableMapping
8 from typing import Optional
9
10 import ldap3
11 import passlib.apache
12
13 from mitmproxy import connection
14 from mitmproxy import ctx
15 from mitmproxy import exceptions
16 from mitmproxy import http
17 from mitmproxy.net.http import status_codes
18 from mitmproxy.proxy import mode_specs
19 from mitmproxy.proxy.layers import modes
20
21 REALM = "mitmproxy"
22
23
24 class ProxyAuth:
25 validator: Validator | None = None
26
27 def __init__(self) -> None:
28 self.authenticated: MutableMapping[connection.Client, tuple[str, str]] = (
29 weakref.WeakKeyDictionary()
30 )
31 """Contains all connections that are permanently authenticated after an HTTP CONNECT"""
32
33 def load(self, loader):
34 loader.add_option(
35 "proxyauth",
36 Optional[str],
37 None,
38 """
39 Require proxy authentication. Format:
40 "username:pass",
41 "any" to accept any user/pass combination,
42 "@path" to use an Apache htpasswd file,
43 or "ldap[s]:url_server_ldap[:port]:dn_auth:password:dn_subtree[?search_filter_key=...]" for LDAP authentication.
44 """,
45 )
46
47 def configure(self, updated):
48 if "proxyauth" in updated:
49 auth = ctx.options.proxyauth
50 if auth:
51 if auth == "any":
52 self.validator = AcceptAll()
53 elif auth.startswith("@"):
54 self.validator = Htpasswd(auth)
55 elif ctx.options.proxyauth.startswith("ldap"):
56 self.validator = Ldap(auth)
57 elif ":" in ctx.options.proxyauth:
58 self.validator = SingleUser(auth)
59 else:
60 raise exceptions.OptionsError("Invalid proxyauth specification.")
61 else:
62 self.validator = None
63
64 def socks5_auth(self, data: modes.Socks5AuthData) -> None:
65 if self.validator and self.validator(data.username, data.password):
66 data.valid = True
67 self.authenticated[data.client_conn] = data.username, data.password
68
69 def http_connect(self, f: http.HTTPFlow) -> None:
70 if self.validator and self.authenticate_http(f):
71 # Make a note that all further requests over this connection are ok.
72 self.authenticated[f.client_conn] = f.metadata["proxyauth"]
73
74 def requestheaders(self, f: http.HTTPFlow) -> None:
75 if self.validator:
76 # Is this connection authenticated by a previous HTTP CONNECT?
77 if f.client_conn in self.authenticated:
78 f.metadata["proxyauth"] = self.authenticated[f.client_conn]
79 else:
80 self.authenticate_http(f)
81
82 def authenticate_http(self, f: http.HTTPFlow) -> bool:
83 """
84 Authenticate an HTTP request, returns if authentication was successful.
85
86 If valid credentials are found, the matching authentication header is removed.
87 In no or invalid credentials are found, flow.response is set to an error page.
88 """
89 assert self.validator
90 username = None
91 password = None
92 is_valid = False
93
94 is_proxy = is_http_proxy(f)
95 auth_header = http_auth_header(is_proxy)
96 try:
97 auth_value = f.request.headers.get(auth_header, "")
98 scheme, username, password = parse_http_basic_auth(auth_value)
99 is_valid = self.validator(username, password)
100 except Exception:
101 pass
102
103 if is_valid:
104 f.metadata["proxyauth"] = (username, password)
105 del f.request.headers[auth_header]
106 return True
107 else:
108 f.response = make_auth_required_response(is_proxy)
109 return False
110
111
112 def make_auth_required_response(is_proxy: bool) -> http.Response:
113 if is_proxy:
114 status_code = status_codes.PROXY_AUTH_REQUIRED
115 headers = {"Proxy-Authenticate": f'Basic realm="{REALM}"'}
116 else:
117 status_code = status_codes.UNAUTHORIZED
118 headers = {"WWW-Authenticate": f'Basic realm="{REALM}"'}
119
120 reason = http.status_codes.RESPONSES[status_code]
121 return http.Response.make(
122 status_code,
123 (
124 f"<html>"
125 f"<head><title>{status_code} {reason}</title></head>"
126 f"<body><h1>{status_code} {reason}</h1></body>"
127 f"</html>"
128 ),
129 headers,
130 )
131
132
133 def http_auth_header(is_proxy: bool) -> str:
134 if is_proxy:
135 return "Proxy-Authorization"
136 else:
137 return "Authorization"
138
139
140 def is_http_proxy(f: http.HTTPFlow) -> bool:
141 """
142 Returns:
143 - True, if authentication is done as if mitmproxy is a proxy
144 - False, if authentication is done as if mitmproxy is an HTTP server
145 """
146 return isinstance(
147 f.client_conn.proxy_mode, (mode_specs.RegularMode, mode_specs.UpstreamMode)
148 )
149
150
151 def mkauth(username: str, password: str, scheme: str = "basic") -> str:
152 """
153 Craft a basic auth string
154 """
155 v = binascii.b2a_base64((username + ":" + password).encode("utf8")).decode("ascii")
156 return scheme + " " + v
157
158
159 def parse_http_basic_auth(s: str) -> tuple[str, str, str]:
160 """
161 Parse a basic auth header.
162 Raises a ValueError if the input is invalid.
163 """
164 scheme, authinfo = s.split()
165 if scheme.lower() != "basic":
166 raise ValueError("Unknown scheme")
167 try:
168 user, password = (
169 binascii.a2b_base64(authinfo.encode()).decode("utf8", "replace").split(":")
170 )
171 except binascii.Error as e:
172 raise ValueError(str(e))
173 return scheme, user, password
174
175
176 class Validator(ABC):
177 """Base class for all username/password validators."""
178
179 @abstractmethod
180 def __call__(self, username: str, password: str) -> bool:
181 raise NotImplementedError
182
183
184 class AcceptAll(Validator):
185 def __call__(self, username: str, password: str) -> bool:
186 return True
187
188
189 class SingleUser(Validator):
190 def __init__(self, proxyauth: str):
191 try:
192 self.username, self.password = proxyauth.split(":")
193 except ValueError:
194 raise exceptions.OptionsError("Invalid single-user auth specification.")
195
196 def __call__(self, username: str, password: str) -> bool:
197 return self.username == username and self.password == password
198
199
200 class Htpasswd(Validator):
201 def __init__(self, proxyauth: str):
202 path = proxyauth[1:]
203 try:
204 self.htpasswd = passlib.apache.HtpasswdFile(path)
205 except (ValueError, OSError):
206 raise exceptions.OptionsError(f"Could not open htpasswd file: {path}")
207
208 def __call__(self, username: str, password: str) -> bool:
209 return self.htpasswd.check_password(username, password)
210
211
212 class Ldap(Validator):
213 conn: ldap3.Connection
214 server: ldap3.Server
215 dn_subtree: str
216 filter_key: str
217
218 def __init__(self, proxyauth: str):
219 (
220 use_ssl,
221 url,
222 port,
223 ldap_user,
224 ldap_pass,
225 self.dn_subtree,
226 self.filter_key,
227 ) = self.parse_spec(proxyauth)
228 server = ldap3.Server(url, port=port, use_ssl=use_ssl)
229 conn = ldap3.Connection(server, ldap_user, ldap_pass, auto_bind=True)
230 self.conn = conn
231 self.server = server
232
233 @staticmethod
234 def parse_spec(spec: str) -> tuple[bool, str, int | None, str, str, str, str]:
235 try:
236 if spec.count(":") > 4:
237 (
238 security,
239 url,
240 port_str,
241 ldap_user,
242 ldap_pass,
243 dn_subtree,
244 ) = spec.split(":")
245 port = int(port_str)
246 else:
247 security, url, ldap_user, ldap_pass, dn_subtree = spec.split(":")
248 port = None
249
250 if "?" in dn_subtree:
251 dn_subtree, search_str = dn_subtree.split("?")
252 key, value = search_str.split("=")
253 if key == "search_filter_key":
254 search_filter_key = value
255 else:
256 raise ValueError
257 else:
258 search_filter_key = "cn"
259
260 if security == "ldaps":
261 use_ssl = True
262 elif security == "ldap":
263 use_ssl = False
264 else:
265 raise ValueError
266
267 return (
268 use_ssl,
269 url,
270 port,
271 ldap_user,
272 ldap_pass,
273 dn_subtree,
274 search_filter_key,
275 )
276 except ValueError:
277 raise exceptions.OptionsError(f"Invalid LDAP specification: {spec}")
278
279 def __call__(self, username: str, password: str) -> bool:
280 if not username or not password:
281 return False
282 self.conn.search(self.dn_subtree, f"({self.filter_key}={username})")
283 if self.conn.response:
284 c = ldap3.Connection(
285 self.server, self.conn.response[0]["dn"], password, auto_bind=True
286 )
287 if c:
288 return True
289 return False
290
[end of mitmproxy/addons/proxyauth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mitmproxy/addons/proxyauth.py b/mitmproxy/addons/proxyauth.py
--- a/mitmproxy/addons/proxyauth.py
+++ b/mitmproxy/addons/proxyauth.py
@@ -76,6 +76,8 @@
# Is this connection authenticated by a previous HTTP CONNECT?
if f.client_conn in self.authenticated:
f.metadata["proxyauth"] = self.authenticated[f.client_conn]
+ elif f.is_replay:
+ pass
else:
self.authenticate_http(f)
|
{"golden_diff": "diff --git a/mitmproxy/addons/proxyauth.py b/mitmproxy/addons/proxyauth.py\n--- a/mitmproxy/addons/proxyauth.py\n+++ b/mitmproxy/addons/proxyauth.py\n@@ -76,6 +76,8 @@\n # Is this connection authenticated by a previous HTTP CONNECT?\n if f.client_conn in self.authenticated:\n f.metadata[\"proxyauth\"] = self.authenticated[f.client_conn]\n+ elif f.is_replay:\n+ pass\n else:\n self.authenticate_http(f)\n", "issue": "httpauth is not attached to replay request \n#### Problem Description\r\nI set mitmproxy to run in reverse mode as a proxy to real server, and then protect mitmproxy with a pair of user:pass in the proxyauth option. A regular request would go through, but a reply of that same request would return 401 Unauthorized\r\n\r\n#### Steps to reproduce the behavior:\r\n1. turn on reverse mode in mitmweb\r\n2. set basic auth in proxyauth in 'username:pass' format\r\n3. initiate a success request\r\n4. replay the request\r\n\r\n#### System Information\r\nMitmproxy: 10.1.5\r\nPython: 3.11.6\r\nOpenSSL: OpenSSL 3.1.4 24 Oct 2023\r\nPlatform: Linux-4.14.276-211.499.amzn2.x86_64-x86_64-with-glibc2.31\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport binascii\nimport weakref\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom collections.abc import MutableMapping\nfrom typing import Optional\n\nimport ldap3\nimport passlib.apache\n\nfrom mitmproxy import connection\nfrom mitmproxy import ctx\nfrom mitmproxy import exceptions\nfrom mitmproxy import http\nfrom mitmproxy.net.http import status_codes\nfrom mitmproxy.proxy import mode_specs\nfrom mitmproxy.proxy.layers import modes\n\nREALM = \"mitmproxy\"\n\n\nclass ProxyAuth:\n validator: Validator | None = None\n\n def __init__(self) -> None:\n self.authenticated: MutableMapping[connection.Client, tuple[str, str]] = (\n weakref.WeakKeyDictionary()\n )\n \"\"\"Contains all connections that are permanently authenticated after an HTTP CONNECT\"\"\"\n\n def load(self, loader):\n loader.add_option(\n \"proxyauth\",\n Optional[str],\n None,\n \"\"\"\n Require proxy authentication. Format:\n \"username:pass\",\n \"any\" to accept any user/pass combination,\n \"@path\" to use an Apache htpasswd file,\n or \"ldap[s]:url_server_ldap[:port]:dn_auth:password:dn_subtree[?search_filter_key=...]\" for LDAP authentication.\n \"\"\",\n )\n\n def configure(self, updated):\n if \"proxyauth\" in updated:\n auth = ctx.options.proxyauth\n if auth:\n if auth == \"any\":\n self.validator = AcceptAll()\n elif auth.startswith(\"@\"):\n self.validator = Htpasswd(auth)\n elif ctx.options.proxyauth.startswith(\"ldap\"):\n self.validator = Ldap(auth)\n elif \":\" in ctx.options.proxyauth:\n self.validator = SingleUser(auth)\n else:\n raise exceptions.OptionsError(\"Invalid proxyauth specification.\")\n else:\n self.validator = None\n\n def socks5_auth(self, data: modes.Socks5AuthData) -> None:\n if self.validator and self.validator(data.username, data.password):\n data.valid = True\n self.authenticated[data.client_conn] = data.username, data.password\n\n def http_connect(self, f: http.HTTPFlow) -> None:\n if self.validator and self.authenticate_http(f):\n # Make a note that all further requests over this connection are ok.\n self.authenticated[f.client_conn] = f.metadata[\"proxyauth\"]\n\n def requestheaders(self, f: http.HTTPFlow) -> None:\n if self.validator:\n # Is this connection authenticated by a previous HTTP CONNECT?\n if f.client_conn in self.authenticated:\n f.metadata[\"proxyauth\"] = self.authenticated[f.client_conn]\n else:\n self.authenticate_http(f)\n\n def authenticate_http(self, f: http.HTTPFlow) -> bool:\n \"\"\"\n Authenticate an HTTP request, returns if authentication was successful.\n\n If valid credentials are found, the matching authentication header is removed.\n In no or invalid credentials are found, flow.response is set to an error page.\n \"\"\"\n assert self.validator\n username = None\n password = None\n is_valid = False\n\n is_proxy = is_http_proxy(f)\n auth_header = http_auth_header(is_proxy)\n try:\n auth_value = f.request.headers.get(auth_header, \"\")\n scheme, username, password = parse_http_basic_auth(auth_value)\n is_valid = self.validator(username, password)\n except Exception:\n pass\n\n if is_valid:\n f.metadata[\"proxyauth\"] = (username, password)\n del f.request.headers[auth_header]\n return True\n else:\n f.response = make_auth_required_response(is_proxy)\n return False\n\n\ndef make_auth_required_response(is_proxy: bool) -> http.Response:\n if is_proxy:\n status_code = status_codes.PROXY_AUTH_REQUIRED\n headers = {\"Proxy-Authenticate\": f'Basic realm=\"{REALM}\"'}\n else:\n status_code = status_codes.UNAUTHORIZED\n headers = {\"WWW-Authenticate\": f'Basic realm=\"{REALM}\"'}\n\n reason = http.status_codes.RESPONSES[status_code]\n return http.Response.make(\n status_code,\n (\n f\"<html>\"\n f\"<head><title>{status_code} {reason}</title></head>\"\n f\"<body><h1>{status_code} {reason}</h1></body>\"\n f\"</html>\"\n ),\n headers,\n )\n\n\ndef http_auth_header(is_proxy: bool) -> str:\n if is_proxy:\n return \"Proxy-Authorization\"\n else:\n return \"Authorization\"\n\n\ndef is_http_proxy(f: http.HTTPFlow) -> bool:\n \"\"\"\n Returns:\n - True, if authentication is done as if mitmproxy is a proxy\n - False, if authentication is done as if mitmproxy is an HTTP server\n \"\"\"\n return isinstance(\n f.client_conn.proxy_mode, (mode_specs.RegularMode, mode_specs.UpstreamMode)\n )\n\n\ndef mkauth(username: str, password: str, scheme: str = \"basic\") -> str:\n \"\"\"\n Craft a basic auth string\n \"\"\"\n v = binascii.b2a_base64((username + \":\" + password).encode(\"utf8\")).decode(\"ascii\")\n return scheme + \" \" + v\n\n\ndef parse_http_basic_auth(s: str) -> tuple[str, str, str]:\n \"\"\"\n Parse a basic auth header.\n Raises a ValueError if the input is invalid.\n \"\"\"\n scheme, authinfo = s.split()\n if scheme.lower() != \"basic\":\n raise ValueError(\"Unknown scheme\")\n try:\n user, password = (\n binascii.a2b_base64(authinfo.encode()).decode(\"utf8\", \"replace\").split(\":\")\n )\n except binascii.Error as e:\n raise ValueError(str(e))\n return scheme, user, password\n\n\nclass Validator(ABC):\n \"\"\"Base class for all username/password validators.\"\"\"\n\n @abstractmethod\n def __call__(self, username: str, password: str) -> bool:\n raise NotImplementedError\n\n\nclass AcceptAll(Validator):\n def __call__(self, username: str, password: str) -> bool:\n return True\n\n\nclass SingleUser(Validator):\n def __init__(self, proxyauth: str):\n try:\n self.username, self.password = proxyauth.split(\":\")\n except ValueError:\n raise exceptions.OptionsError(\"Invalid single-user auth specification.\")\n\n def __call__(self, username: str, password: str) -> bool:\n return self.username == username and self.password == password\n\n\nclass Htpasswd(Validator):\n def __init__(self, proxyauth: str):\n path = proxyauth[1:]\n try:\n self.htpasswd = passlib.apache.HtpasswdFile(path)\n except (ValueError, OSError):\n raise exceptions.OptionsError(f\"Could not open htpasswd file: {path}\")\n\n def __call__(self, username: str, password: str) -> bool:\n return self.htpasswd.check_password(username, password)\n\n\nclass Ldap(Validator):\n conn: ldap3.Connection\n server: ldap3.Server\n dn_subtree: str\n filter_key: str\n\n def __init__(self, proxyauth: str):\n (\n use_ssl,\n url,\n port,\n ldap_user,\n ldap_pass,\n self.dn_subtree,\n self.filter_key,\n ) = self.parse_spec(proxyauth)\n server = ldap3.Server(url, port=port, use_ssl=use_ssl)\n conn = ldap3.Connection(server, ldap_user, ldap_pass, auto_bind=True)\n self.conn = conn\n self.server = server\n\n @staticmethod\n def parse_spec(spec: str) -> tuple[bool, str, int | None, str, str, str, str]:\n try:\n if spec.count(\":\") > 4:\n (\n security,\n url,\n port_str,\n ldap_user,\n ldap_pass,\n dn_subtree,\n ) = spec.split(\":\")\n port = int(port_str)\n else:\n security, url, ldap_user, ldap_pass, dn_subtree = spec.split(\":\")\n port = None\n\n if \"?\" in dn_subtree:\n dn_subtree, search_str = dn_subtree.split(\"?\")\n key, value = search_str.split(\"=\")\n if key == \"search_filter_key\":\n search_filter_key = value\n else:\n raise ValueError\n else:\n search_filter_key = \"cn\"\n\n if security == \"ldaps\":\n use_ssl = True\n elif security == \"ldap\":\n use_ssl = False\n else:\n raise ValueError\n\n return (\n use_ssl,\n url,\n port,\n ldap_user,\n ldap_pass,\n dn_subtree,\n search_filter_key,\n )\n except ValueError:\n raise exceptions.OptionsError(f\"Invalid LDAP specification: {spec}\")\n\n def __call__(self, username: str, password: str) -> bool:\n if not username or not password:\n return False\n self.conn.search(self.dn_subtree, f\"({self.filter_key}={username})\")\n if self.conn.response:\n c = ldap3.Connection(\n self.server, self.conn.response[0][\"dn\"], password, auto_bind=True\n )\n if c:\n return True\n return False\n", "path": "mitmproxy/addons/proxyauth.py"}]}
| 3,526 | 118 |
gh_patches_debug_11882
|
rasdani/github-patches
|
git_diff
|
translate__pootle-6471
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: 'QuerySet' object has no attribute 'keys'
The following appears when you first go to `/` and Pootle tries to redirect you to your language based on your Accept-Lang headers. Note this will set a cookie so you need to have cookies reset to replicate.
```python
AttributeError: 'QuerySet' object has no attribute 'keys'
File "pootle_app/views/index/index.py", line 94, in dispatch
else self.languages).values_list('code', flat=True))
File "pootle/i18n/override.py", line 81, in get_lang_from_http_header
for lang in supported.keys():
AttributeError: 'QuerySet' object has no attribute 'keys'
```
</issue>
<code>
[start of pootle/apps/pootle_app/views/index/index.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django.contrib.auth import REDIRECT_FIELD_NAME
10 from django.shortcuts import redirect
11 from django.urls import reverse
12 from django.utils.functional import cached_property
13 from django.utils.translation import get_language
14 from django.views.generic import TemplateView, View
15
16 from pootle.core.decorators import persistent_property
17 from pootle.core.delegate import revision, scores
18 from pootle.i18n.override import get_lang_from_http_header
19 from pootle_language.models import Language
20 from pootle_project.models import Project, ProjectSet
21
22
23 COOKIE_NAME = 'pootle-language'
24
25
26 class WelcomeView(TemplateView):
27 ns = "pootle.web.welcome"
28 template_name = "welcome.html"
29
30 @property
31 def revision(self):
32 return revision.get(self.project_set.directory.__class__)(
33 self.project_set.directory).get(key="stats")
34
35 @property
36 def cache_key(self):
37 return (
38 "%s.%s.%s"
39 % (self.request.user.username,
40 self.revision,
41 self.request_lang))
42
43 @cached_property
44 def project_set(self):
45 user_projects = Project.accessible_by_user(self.request.user)
46 user_projects = (
47 Project.objects.for_user(self.request.user)
48 .filter(code__in=user_projects))
49 return ProjectSet(user_projects)
50
51 @property
52 def request_lang(self):
53 return get_language()
54
55 @persistent_property
56 def score_data(self):
57 return scores.get(ProjectSet)(
58 self.project_set).display(language=self.request_lang)
59
60 def get_context_data(self, **kwargs):
61 context = super(WelcomeView, self).get_context_data(**kwargs)
62 context.update(dict(score_data=self.score_data))
63 return context
64
65
66 class IndexView(View):
67
68 @property
69 def active_languages(self):
70 return Language.objects.filter(
71 translationproject__isnull=False,
72 translationproject__directory__obsolete=False)
73
74 @property
75 def all_languages(self):
76 return self.active_languages
77
78 @property
79 def languages(self):
80 return self.active_languages.filter(
81 translationproject__project__disabled=False)
82
83 def dispatch(self, request, *args, **kwargs):
84 if not request.user.is_authenticated:
85 ctx = {
86 'next': request.GET.get(REDIRECT_FIELD_NAME, '')}
87 return WelcomeView.as_view()(request, ctx)
88 lang = request.COOKIES.get(COOKIE_NAME, None)
89 if lang is None:
90 lang = get_lang_from_http_header(
91 request,
92 (self.all_languages
93 if request.user.is_superuser
94 else self.languages).values_list('code', flat=True))
95 if lang is not None and lang not in ('projects', ''):
96 url = reverse('pootle-language-browse', args=[lang])
97 else:
98 url = reverse('pootle-projects-browse')
99 # Preserve query strings
100 args = request.GET.urlencode()
101 qs = '?%s' % args if args else ''
102 redirect_url = '%s%s' % (url, qs)
103 return redirect(redirect_url)
104
105
106 class AboutView(TemplateView):
107 template_name = 'about.html'
108
109 def get_context_data(self, **kwargs):
110 from translate.__version__ import sver as toolkit_version
111 from pootle import __version__
112
113 return {
114 'pootle_version': __version__,
115 'toolkit_version': toolkit_version,
116 }
117
[end of pootle/apps/pootle_app/views/index/index.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pootle/apps/pootle_app/views/index/index.py b/pootle/apps/pootle_app/views/index/index.py
--- a/pootle/apps/pootle_app/views/index/index.py
+++ b/pootle/apps/pootle_app/views/index/index.py
@@ -89,9 +89,9 @@
if lang is None:
lang = get_lang_from_http_header(
request,
- (self.all_languages
- if request.user.is_superuser
- else self.languages).values_list('code', flat=True))
+ dict((self.all_languages
+ if request.user.is_superuser
+ else self.languages).values_list('code', 'fullname')))
if lang is not None and lang not in ('projects', ''):
url = reverse('pootle-language-browse', args=[lang])
else:
|
{"golden_diff": "diff --git a/pootle/apps/pootle_app/views/index/index.py b/pootle/apps/pootle_app/views/index/index.py\n--- a/pootle/apps/pootle_app/views/index/index.py\n+++ b/pootle/apps/pootle_app/views/index/index.py\n@@ -89,9 +89,9 @@\n if lang is None:\n lang = get_lang_from_http_header(\n request,\n- (self.all_languages\n- if request.user.is_superuser\n- else self.languages).values_list('code', flat=True))\n+ dict((self.all_languages\n+ if request.user.is_superuser\n+ else self.languages).values_list('code', 'fullname')))\n if lang is not None and lang not in ('projects', ''):\n url = reverse('pootle-language-browse', args=[lang])\n else:\n", "issue": "AttributeError: 'QuerySet' object has no attribute 'keys'\nThe following appears when you first go to `/` and Pootle tries to redirect you to your language based on your Accept-Lang headers. Note this will set a cookie so you need to have cookies reset to replicate.\r\n\r\n```python\r\nAttributeError: 'QuerySet' object has no attribute 'keys'\r\n File \"pootle_app/views/index/index.py\", line 94, in dispatch\r\n else self.languages).values_list('code', flat=True))\r\n File \"pootle/i18n/override.py\", line 81, in get_lang_from_http_header\r\n for lang in supported.keys():\r\n\r\nAttributeError: 'QuerySet' object has no attribute 'keys'\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.contrib.auth import REDIRECT_FIELD_NAME\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import get_language\nfrom django.views.generic import TemplateView, View\n\nfrom pootle.core.decorators import persistent_property\nfrom pootle.core.delegate import revision, scores\nfrom pootle.i18n.override import get_lang_from_http_header\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project, ProjectSet\n\n\nCOOKIE_NAME = 'pootle-language'\n\n\nclass WelcomeView(TemplateView):\n ns = \"pootle.web.welcome\"\n template_name = \"welcome.html\"\n\n @property\n def revision(self):\n return revision.get(self.project_set.directory.__class__)(\n self.project_set.directory).get(key=\"stats\")\n\n @property\n def cache_key(self):\n return (\n \"%s.%s.%s\"\n % (self.request.user.username,\n self.revision,\n self.request_lang))\n\n @cached_property\n def project_set(self):\n user_projects = Project.accessible_by_user(self.request.user)\n user_projects = (\n Project.objects.for_user(self.request.user)\n .filter(code__in=user_projects))\n return ProjectSet(user_projects)\n\n @property\n def request_lang(self):\n return get_language()\n\n @persistent_property\n def score_data(self):\n return scores.get(ProjectSet)(\n self.project_set).display(language=self.request_lang)\n\n def get_context_data(self, **kwargs):\n context = super(WelcomeView, self).get_context_data(**kwargs)\n context.update(dict(score_data=self.score_data))\n return context\n\n\nclass IndexView(View):\n\n @property\n def active_languages(self):\n return Language.objects.filter(\n translationproject__isnull=False,\n translationproject__directory__obsolete=False)\n\n @property\n def all_languages(self):\n return self.active_languages\n\n @property\n def languages(self):\n return self.active_languages.filter(\n translationproject__project__disabled=False)\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_authenticated:\n ctx = {\n 'next': request.GET.get(REDIRECT_FIELD_NAME, '')}\n return WelcomeView.as_view()(request, ctx)\n lang = request.COOKIES.get(COOKIE_NAME, None)\n if lang is None:\n lang = get_lang_from_http_header(\n request,\n (self.all_languages\n if request.user.is_superuser\n else self.languages).values_list('code', flat=True))\n if lang is not None and lang not in ('projects', ''):\n url = reverse('pootle-language-browse', args=[lang])\n else:\n url = reverse('pootle-projects-browse')\n # Preserve query strings\n args = request.GET.urlencode()\n qs = '?%s' % args if args else ''\n redirect_url = '%s%s' % (url, qs)\n return redirect(redirect_url)\n\n\nclass AboutView(TemplateView):\n template_name = 'about.html'\n\n def get_context_data(self, **kwargs):\n from translate.__version__ import sver as toolkit_version\n from pootle import __version__\n\n return {\n 'pootle_version': __version__,\n 'toolkit_version': toolkit_version,\n }\n", "path": "pootle/apps/pootle_app/views/index/index.py"}]}
| 1,748 | 183 |
gh_patches_debug_55319
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-2906
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pip installing sunpy dev fails
With `ci-helpers` I run into this issue for both travis/linux and appveyor:
Please have a look and open a PR in `ci-helpers` with the suggested fix for the case of `SUNPY_VERSION='dev'`
```
+++++python -m pip install git+https://github.com/sunpy/sunpy.git#egg=sunpy --upgrade --no-deps
Collecting sunpy from git+https://github.com/sunpy/sunpy.git#egg=sunpy
Cloning https://github.com/sunpy/sunpy.git to /tmp/pip-install-uowizwdl/sunpy
Installing build dependencies ... done
Getting requirements to build wheel ... error
Complete output from command /home/travis/miniconda/envs/test/bin/python /home/travis/miniconda/envs/test/lib/python3.6/site-packages/pip/_vendor/pep517/_in_process.py get_requires_for_build_wheel /tmp/tmp6h0521mp:
Traceback (most recent call last):
File "/home/travis/miniconda/envs/test/lib/python3.6/site-packages/pip/_vendor/pep517/_in_process.py", line 207, in <module>
main()
File "/home/travis/miniconda/envs/test/lib/python3.6/site-packages/pip/_vendor/pep517/_in_process.py", line 197, in main
json_out['return_val'] = hook(**hook_input['kwargs'])
File "/home/travis/miniconda/envs/test/lib/python3.6/site-packages/pip/_vendor/pep517/_in_process.py", line 54, in get_requires_for_build_wheel
return hook(config_settings)
File "/tmp/pip-build-env-3jro2te9/overlay/lib/python3.6/site-packages/setuptools/build_meta.py", line 115, in get_requires_for_build_wheel
return _get_build_requires(config_settings, requirements=['wheel'])
File "/tmp/pip-build-env-3jro2te9/overlay/lib/python3.6/site-packages/setuptools/build_meta.py", line 101, in _get_build_requires
_run_setup()
File "/tmp/pip-build-env-3jro2te9/overlay/lib/python3.6/site-packages/setuptools/build_meta.py", line 85, in _run_setup
exec(compile(code, __file__, 'exec'), locals())
File "setup.py", line 46, in <module>
import ah_bootstrap # noqa
ModuleNotFoundError: No module named 'ah_bootstrap'
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # This file is based havily on the astropy version here:
3 # https://github.com/astropy/package-template/blob/master/setup.py
4 # Which is licensed under the astropy license, see licenses/ASTROPY.rst.
5
6 ################################################################################
7 ###### YOU SHOULD NOT HAVE TO EDIT THIS FILE, YOU SHOULD EDIT setup.cfg. #######
8 ################################################################################
9 # Note: This file needs to be Python 2 / <3.6 compatible, so that the nice
10 # "SunPy only supports Python 3.6+" error prints without syntax errors etc.
11
12 import os
13 import sys
14 import glob
15 import builtins # noqa
16 import itertools
17
18 try:
19 from configparser import ConfigParser
20 except ImportError:
21 from ConfigParser import ConfigParser
22
23 # Get some values from the setup.cfg
24 conf = ConfigParser()
25 conf.read(['setup.cfg'])
26 metadata = dict(conf.items('metadata'))
27
28 PACKAGENAME = metadata.get('package_name', 'sunpy')
29 DESCRIPTION = metadata.get('description', 'SunPy: Python for Solar Physics')
30 AUTHOR = metadata.get('author', 'The SunPy Community')
31 AUTHOR_EMAIL = metadata.get('author_email', '')
32 LICENSE = metadata.get('license', 'unknown')
33 URL = metadata.get('url', 'https://sunpy.org')
34 __minimum_python_version__ = metadata.get("minimum_python_version", "3.6")
35
36 # Enforce Python version check - this is the same check as in __init__.py but
37 # this one has to happen before importing ah_bootstrap.
38 if sys.version_info < tuple((int(val) for val in __minimum_python_version__.split('.'))):
39 sys.stderr.write("ERROR: SunPy requires Python {} or later\n".format(__minimum_python_version__))
40 sys.exit(1)
41
42 with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'), encoding='utf-8') as f:
43 LONG_DESCRIPTION = f.read()
44
45 # Import ah_bootstrap after the python version validation
46 import ah_bootstrap # noqa
47 from setuptools import setup # noqa
48 from astropy_helpers.git_helpers import get_git_devstr # noqa
49 from astropy_helpers.setup_helpers import get_package_info # noqa
50 from astropy_helpers.setup_helpers import get_debug_option, register_commands
51 from astropy_helpers.version_helpers import generate_version_py # noqa
52
53 builtins._SUNPY_SETUP_ = True
54
55
56 # -- Read the Docs Setup -----------------------------------------------------
57
58 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
59
60 if on_rtd:
61 os.environ['HOME'] = '/home/docs/'
62 os.environ['SUNPY_CONFIGDIR'] = '/home/docs/'
63
64 # Store the package name in a built-in variable so it's easy
65 # to get from other parts of the setup infrastructure
66 # This is used by get_pkg_data in astropy amongst other things
67 builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
68
69 # VERSION should be PEP440 compatible (http://www.python.org/dev/peps/pep-0440)
70 VERSION = metadata.get('version', '0.0.dev0')
71
72 # Indicates if this version is a release version
73 RELEASE = 'dev' not in VERSION
74
75 if not RELEASE:
76 VERSION += get_git_devstr(False)
77
78 # Populate the dict of setup command overrides; this should be done before
79 # invoking any other functionality from distutils since it can potentially
80 # modify distutils' behaviour.
81 cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
82
83 try:
84 from sunpy.tests.setup_command import SunPyTest
85 # Overwrite the Astropy Testing framework
86 cmdclassd['test'] = type('SunPyTest', (SunPyTest,),
87 {'package_name': 'sunpy'})
88
89 except Exception:
90 # Catch everything, if it doesn't work, we still want SunPy to install.
91 pass
92
93 # Freeze build information in version.py
94 generate_version_py(PACKAGENAME, VERSION, RELEASE,
95 get_debug_option(PACKAGENAME))
96
97 # Treat everything in scripts except README* as a script to be installed
98 scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
99 if not os.path.basename(fname).startswith('README')]
100
101
102 # Get configuration information from all of the various subpackages.
103 # See the docstring for setup_helpers.update_package_files for more
104 # details.
105 package_info = get_package_info()
106
107 # Add the project-global data
108 package_info['package_data'].setdefault(PACKAGENAME, [])
109 package_info['package_data'][PACKAGENAME].append('data/*')
110
111 # Define entry points for command-line scripts
112 entry_points = {'console_scripts': []}
113
114 if conf.has_section('entry_points'):
115 entry_point_list = conf.items('entry_points')
116 for entry_point in entry_point_list:
117 entry_points['console_scripts'].append('{0} = {1}'.format(
118 entry_point[0], entry_point[1]))
119
120 # Include all .c files, recursively, including those generated by
121 # Cython, since we can not do this in MANIFEST.in with a "dynamic"
122 # directory name.
123 c_files = []
124 for root, dirs, files in os.walk(PACKAGENAME):
125 for filename in files:
126 if filename.endswith('.c'):
127 c_files.append(
128 os.path.join(
129 os.path.relpath(root, PACKAGENAME), filename))
130 package_info['package_data'][PACKAGENAME].extend(c_files)
131
132
133 extra_tags = [m.strip() for m in metadata.get("extra_requires", "").split(',')]
134 if extra_tags:
135 extras_require = {tag: [m.strip() for m in metadata["{tag}_requires".format(tag=tag)].split(',')]
136 for tag in extra_tags}
137 extras_require['all'] = list(itertools.chain.from_iterable(extras_require.values()))
138 else:
139 extras_require = None
140
141 # Entry points
142 entry_points['asdf_extensions'] = [
143 'sunpy = sunpy.io.special.asdf.extension:SunpyExtension',
144 ]
145
146 setup(name=PACKAGENAME,
147 version=VERSION,
148 description=DESCRIPTION,
149 scripts=scripts,
150 setup_requires=[s.strip() for s in metadata.get("setup_requires", "").split(',')],
151 install_requires=[s.strip() for s in metadata['install_requires'].split(',')],
152 extras_require=extras_require,
153 tests_require=extras_require.get("all", ""),
154 author=AUTHOR,
155 author_email=AUTHOR_EMAIL,
156 license=LICENSE,
157 url=URL,
158 project_urls={'Funding': 'https://www.flipcause.com/widget/widget_home/MTgxMTU=',
159 'Source': 'https://github.com/sunpy/sunpy/',
160 'Tracker': 'https://github.com/sunpy/sunpy/issues'
161 },
162 long_description=LONG_DESCRIPTION,
163 long_description_content_type='text/x-rst',
164 cmdclass=cmdclassd,
165 zip_safe=False,
166 entry_points=entry_points,
167 python_requires='>={}'.format(__minimum_python_version__),
168 include_package_data=True,
169 **package_info
170 )
171
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -15,6 +15,9 @@
import builtins # noqa
import itertools
+# Fix for https://github.com/pypa/pip/issues/6163
+sys.path.insert(0, os.path.dirname(__file__))
+
try:
from configparser import ConfigParser
except ImportError:
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,6 +15,9 @@\n import builtins # noqa\n import itertools\n \n+# Fix for https://github.com/pypa/pip/issues/6163\n+sys.path.insert(0, os.path.dirname(__file__))\n+\n try:\n from configparser import ConfigParser\n except ImportError:\n", "issue": "Pip installing sunpy dev fails\nWith `ci-helpers` I run into this issue for both travis/linux and appveyor:\r\n\r\nPlease have a look and open a PR in `ci-helpers` with the suggested fix for the case of `SUNPY_VERSION='dev'`\r\n\r\n```\r\n+++++python -m pip install git+https://github.com/sunpy/sunpy.git#egg=sunpy --upgrade --no-deps\r\nCollecting sunpy from git+https://github.com/sunpy/sunpy.git#egg=sunpy\r\n Cloning https://github.com/sunpy/sunpy.git to /tmp/pip-install-uowizwdl/sunpy\r\n Installing build dependencies ... done\r\n Getting requirements to build wheel ... error\r\n Complete output from command /home/travis/miniconda/envs/test/bin/python /home/travis/miniconda/envs/test/lib/python3.6/site-packages/pip/_vendor/pep517/_in_process.py get_requires_for_build_wheel /tmp/tmp6h0521mp:\r\n Traceback (most recent call last):\r\n File \"/home/travis/miniconda/envs/test/lib/python3.6/site-packages/pip/_vendor/pep517/_in_process.py\", line 207, in <module>\r\n main()\r\n File \"/home/travis/miniconda/envs/test/lib/python3.6/site-packages/pip/_vendor/pep517/_in_process.py\", line 197, in main\r\n json_out['return_val'] = hook(**hook_input['kwargs'])\r\n File \"/home/travis/miniconda/envs/test/lib/python3.6/site-packages/pip/_vendor/pep517/_in_process.py\", line 54, in get_requires_for_build_wheel\r\n return hook(config_settings)\r\n File \"/tmp/pip-build-env-3jro2te9/overlay/lib/python3.6/site-packages/setuptools/build_meta.py\", line 115, in get_requires_for_build_wheel\r\n return _get_build_requires(config_settings, requirements=['wheel'])\r\n File \"/tmp/pip-build-env-3jro2te9/overlay/lib/python3.6/site-packages/setuptools/build_meta.py\", line 101, in _get_build_requires\r\n _run_setup()\r\n File \"/tmp/pip-build-env-3jro2te9/overlay/lib/python3.6/site-packages/setuptools/build_meta.py\", line 85, in _run_setup\r\n exec(compile(code, __file__, 'exec'), locals())\r\n File \"setup.py\", line 46, in <module>\r\n import ah_bootstrap # noqa\r\n ModuleNotFoundError: No module named 'ah_bootstrap'\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# This file is based havily on the astropy version here:\n# https://github.com/astropy/package-template/blob/master/setup.py\n# Which is licensed under the astropy license, see licenses/ASTROPY.rst.\n\n################################################################################\n###### YOU SHOULD NOT HAVE TO EDIT THIS FILE, YOU SHOULD EDIT setup.cfg. #######\n################################################################################\n# Note: This file needs to be Python 2 / <3.6 compatible, so that the nice\n# \"SunPy only supports Python 3.6+\" error prints without syntax errors etc.\n\nimport os\nimport sys\nimport glob\nimport builtins # noqa\nimport itertools\n\ntry:\n from configparser import ConfigParser\nexcept ImportError:\n from ConfigParser import ConfigParser\n\n# Get some values from the setup.cfg\nconf = ConfigParser()\nconf.read(['setup.cfg'])\nmetadata = dict(conf.items('metadata'))\n\nPACKAGENAME = metadata.get('package_name', 'sunpy')\nDESCRIPTION = metadata.get('description', 'SunPy: Python for Solar Physics')\nAUTHOR = metadata.get('author', 'The SunPy Community')\nAUTHOR_EMAIL = metadata.get('author_email', '')\nLICENSE = metadata.get('license', 'unknown')\nURL = metadata.get('url', 'https://sunpy.org')\n__minimum_python_version__ = metadata.get(\"minimum_python_version\", \"3.6\")\n\n# Enforce Python version check - this is the same check as in __init__.py but\n# this one has to happen before importing ah_bootstrap.\nif sys.version_info < tuple((int(val) for val in __minimum_python_version__.split('.'))):\n sys.stderr.write(\"ERROR: SunPy requires Python {} or later\\n\".format(__minimum_python_version__))\n sys.exit(1)\n\nwith open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'), encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\n# Import ah_bootstrap after the python version validation\nimport ah_bootstrap # noqa\nfrom setuptools import setup # noqa\nfrom astropy_helpers.git_helpers import get_git_devstr # noqa\nfrom astropy_helpers.setup_helpers import get_package_info # noqa\nfrom astropy_helpers.setup_helpers import get_debug_option, register_commands\nfrom astropy_helpers.version_helpers import generate_version_py # noqa\n\nbuiltins._SUNPY_SETUP_ = True\n\n\n# -- Read the Docs Setup -----------------------------------------------------\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif on_rtd:\n os.environ['HOME'] = '/home/docs/'\n os.environ['SUNPY_CONFIGDIR'] = '/home/docs/'\n\n# Store the package name in a built-in variable so it's easy\n# to get from other parts of the setup infrastructure\n# This is used by get_pkg_data in astropy amongst other things\nbuiltins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME\n\n# VERSION should be PEP440 compatible (http://www.python.org/dev/peps/pep-0440)\nVERSION = metadata.get('version', '0.0.dev0')\n\n# Indicates if this version is a release version\nRELEASE = 'dev' not in VERSION\n\nif not RELEASE:\n VERSION += get_git_devstr(False)\n\n# Populate the dict of setup command overrides; this should be done before\n# invoking any other functionality from distutils since it can potentially\n# modify distutils' behaviour.\ncmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)\n\ntry:\n from sunpy.tests.setup_command import SunPyTest\n # Overwrite the Astropy Testing framework\n cmdclassd['test'] = type('SunPyTest', (SunPyTest,),\n {'package_name': 'sunpy'})\n\nexcept Exception:\n # Catch everything, if it doesn't work, we still want SunPy to install.\n pass\n\n# Freeze build information in version.py\ngenerate_version_py(PACKAGENAME, VERSION, RELEASE,\n get_debug_option(PACKAGENAME))\n\n# Treat everything in scripts except README* as a script to be installed\nscripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))\n if not os.path.basename(fname).startswith('README')]\n\n\n# Get configuration information from all of the various subpackages.\n# See the docstring for setup_helpers.update_package_files for more\n# details.\npackage_info = get_package_info()\n\n# Add the project-global data\npackage_info['package_data'].setdefault(PACKAGENAME, [])\npackage_info['package_data'][PACKAGENAME].append('data/*')\n\n# Define entry points for command-line scripts\nentry_points = {'console_scripts': []}\n\nif conf.has_section('entry_points'):\n entry_point_list = conf.items('entry_points')\n for entry_point in entry_point_list:\n entry_points['console_scripts'].append('{0} = {1}'.format(\n entry_point[0], entry_point[1]))\n\n# Include all .c files, recursively, including those generated by\n# Cython, since we can not do this in MANIFEST.in with a \"dynamic\"\n# directory name.\nc_files = []\nfor root, dirs, files in os.walk(PACKAGENAME):\n for filename in files:\n if filename.endswith('.c'):\n c_files.append(\n os.path.join(\n os.path.relpath(root, PACKAGENAME), filename))\npackage_info['package_data'][PACKAGENAME].extend(c_files)\n\n\nextra_tags = [m.strip() for m in metadata.get(\"extra_requires\", \"\").split(',')]\nif extra_tags:\n extras_require = {tag: [m.strip() for m in metadata[\"{tag}_requires\".format(tag=tag)].split(',')]\n for tag in extra_tags}\n extras_require['all'] = list(itertools.chain.from_iterable(extras_require.values()))\nelse:\n extras_require = None\n\n# Entry points\nentry_points['asdf_extensions'] = [\n 'sunpy = sunpy.io.special.asdf.extension:SunpyExtension',\n]\n\nsetup(name=PACKAGENAME,\n version=VERSION,\n description=DESCRIPTION,\n scripts=scripts,\n setup_requires=[s.strip() for s in metadata.get(\"setup_requires\", \"\").split(',')],\n install_requires=[s.strip() for s in metadata['install_requires'].split(',')],\n extras_require=extras_require,\n tests_require=extras_require.get(\"all\", \"\"),\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n license=LICENSE,\n url=URL,\n project_urls={'Funding': 'https://www.flipcause.com/widget/widget_home/MTgxMTU=',\n 'Source': 'https://github.com/sunpy/sunpy/',\n 'Tracker': 'https://github.com/sunpy/sunpy/issues'\n },\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/x-rst',\n cmdclass=cmdclassd,\n zip_safe=False,\n entry_points=entry_points,\n python_requires='>={}'.format(__minimum_python_version__),\n include_package_data=True,\n **package_info\n )\n", "path": "setup.py"}]}
| 3,027 | 87 |
gh_patches_debug_35393
|
rasdani/github-patches
|
git_diff
|
celery__kombu-479
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
unexpected keyword argument 'query' when using pymongo 3.0
```
File "/opt/ampli/apps/marvintaskmgr/venv/lib/python2.7/site-packages/kombu/transport/virtual/__init__.py", line 487, in queue_bind
self._queue_bind(exchange, *meta)
File "/opt/ampli/apps/marvintaskmgr/venv/lib/python2.7/site-packages/kombu/transport/mongodb.py", line 244, in _queue_bind
self.create_broadcast_cursor(exchange, routing_key, pattern, queue)
File "/opt/ampli/apps/marvintaskmgr/venv/lib/python2.7/site-packages/kombu/transport/mongodb.py", line 302, in create_broadcast_cursor
tailable=True,
File "/opt/ampli/apps/marvintaskmgr/venv/lib/python2.7/site-packages/pymongo/collection.py", line 929, in find
return Cursor(self, *args, **kwargs)
TypeError: __init__() got an unexpected keyword argument 'query'
```
https://github.com/celery/kombu/blob/8e6aed9fcf978b6c34108c0d37b720bd125f0352/kombu/transport/mongodb.py#L297-L300
query and tailable are invalid arguments with pymongo 3.0
</issue>
<code>
[start of kombu/transport/mongodb.py]
1 """
2 kombu.transport.mongodb
3 =======================
4
5 MongoDB transport.
6
7 :copyright: (c) 2010 - 2013 by Flavio Percoco Premoli.
8 :license: BSD, see LICENSE for more details.
9
10 """
11 from __future__ import absolute_import
12
13 import pymongo
14
15 from pymongo import errors
16 from pymongo import MongoClient, uri_parser
17
18 from kombu.five import Empty
19 from kombu.syn import _detect_environment
20 from kombu.utils.encoding import bytes_to_str
21 from kombu.utils.json import loads, dumps
22
23 from . import virtual
24
25 DEFAULT_HOST = '127.0.0.1'
26 DEFAULT_PORT = 27017
27
28 DEFAULT_MESSAGES_COLLECTION = 'messages'
29 DEFAULT_ROUTING_COLLECTION = 'messages.routing'
30 DEFAULT_BROADCAST_COLLECTION = 'messages.broadcast'
31
32
33 class BroadcastCursor(object):
34 """Cursor for broadcast queues."""
35
36 def __init__(self, cursor):
37 self._cursor = cursor
38
39 self.purge(rewind=False)
40
41 def get_size(self):
42 return self._cursor.count() - self._offset
43
44 def close(self):
45 self._cursor.close()
46
47 def purge(self, rewind=True):
48 if rewind:
49 self._cursor.rewind()
50
51 # Fast forward the cursor past old events
52 self._offset = self._cursor.count()
53 self._cursor = self._cursor.skip(self._offset)
54
55 def __iter__(self):
56 return self
57
58 def __next__(self):
59 while True:
60 try:
61 msg = next(self._cursor)
62 except pymongo.errors.OperationFailure as exc:
63 # In some cases tailed cursor can become invalid
64 # and have to be reinitalized
65 if 'not valid at server' in exc.message:
66 self.purge()
67
68 continue
69
70 raise
71 else:
72 break
73
74 self._offset += 1
75
76 return msg
77 next = __next__
78
79
80 class Channel(virtual.Channel):
81 _client = None
82 supports_fanout = True
83 _fanout_queues = {}
84
85 def __init__(self, *vargs, **kwargs):
86 super(Channel, self).__init__(*vargs, **kwargs)
87
88 self._broadcast_cursors = {}
89
90 # Evaluate connection
91 self._create_client()
92
93 def _new_queue(self, queue, **kwargs):
94 pass
95
96 def _get(self, queue):
97 if queue in self._fanout_queues:
98 try:
99 msg = next(self.get_broadcast_cursor(queue))
100 except StopIteration:
101 msg = None
102 else:
103 msg = self.get_messages().find_and_modify(
104 query={'queue': queue},
105 sort=[('priority', pymongo.ASCENDING),
106 ('_id', pymongo.ASCENDING)],
107 remove=True,
108 )
109
110 if msg is None:
111 raise Empty()
112
113 return loads(bytes_to_str(msg['payload']))
114
115 def _size(self, queue):
116 if queue in self._fanout_queues:
117 return self.get_broadcast_cursor(queue).get_size()
118
119 return self.get_messages().find({'queue': queue}).count()
120
121 def _put(self, queue, message, **kwargs):
122 self.get_messages().insert({
123 'payload': dumps(message),
124 'queue': queue,
125 'priority': self._get_message_priority(message, reverse=True),
126 })
127
128 def _purge(self, queue):
129 size = self._size(queue)
130
131 if queue in self._fanout_queues:
132 self.get_broadcaset_cursor(queue).purge()
133 else:
134 self.get_messages().remove({'queue': queue})
135
136 return size
137
138 def _parse_uri(self, scheme='mongodb://'):
139 # See mongodb uri documentation:
140 # http://docs.mongodb.org/manual/reference/connection-string/
141 client = self.connection.client
142 hostname = client.hostname
143
144 if not hostname.startswith(scheme):
145 hostname = scheme + hostname
146
147 if not hostname[len(scheme):]:
148 hostname += DEFAULT_HOST
149
150 if client.userid and '@' not in hostname:
151 head, tail = hostname.split('://')
152
153 credentials = client.userid
154 if client.password:
155 credentials += ':' + client.password
156
157 hostname = head + '://' + credentials + '@' + tail
158
159 port = client.port if client.port is not None else DEFAULT_PORT
160
161 parsed = uri_parser.parse_uri(hostname, port)
162
163 dbname = parsed['database'] or client.virtual_host
164
165 if dbname in ('/', None):
166 dbname = 'kombu_default'
167
168 options = {
169 'auto_start_request': True,
170 'ssl': client.ssl,
171 'connectTimeoutMS': (int(client.connect_timeout * 1000)
172 if client.connect_timeout else None),
173 }
174 options.update(client.transport_options)
175 options.update(parsed['options'])
176
177 return hostname, dbname, options
178
179 def _prepare_client_options(self, options):
180 if pymongo.version_tuple >= (3, ):
181 options.pop('auto_start_request', None)
182
183 def _open(self, scheme='mongodb://'):
184 hostname, dbname, options = self._parse_uri(scheme=scheme)
185
186 self._prepare_client_options(options)
187 mongoconn = MongoClient(
188 host=hostname, ssl=options['ssl'],
189 auto_start_request=options['auto_start_request'],
190 connectTimeoutMS=options['connectTimeoutMS'],
191 use_greenlets=_detect_environment() != 'default',
192 )
193 database = mongoconn[dbname]
194
195 version = mongoconn.server_info()['version']
196 if tuple(map(int, version.split('.')[:2])) < (1, 3):
197 raise NotImplementedError(
198 'Kombu requires MongoDB version 1.3+ (server is {0})'.format(
199 version))
200
201 self._create_broadcast(database, options)
202
203 self._client = database
204
205 def _create_broadcast(self, database, options):
206 '''Create capped collection for broadcast messages.'''
207 if DEFAULT_BROADCAST_COLLECTION in database.collection_names():
208 return
209
210 capsize = options.get('capped_queue_size') or 100000
211 database.create_collection(DEFAULT_BROADCAST_COLLECTION,
212 size=capsize, capped=True)
213
214 def _ensure_indexes(self):
215 '''Ensure indexes on collections.'''
216 self.get_messages().ensure_index(
217 [('queue', 1), ('priority', 1), ('_id', 1)], background=True,
218 )
219 self.get_broadcast().ensure_index([('queue', 1)])
220 self.get_routing().ensure_index([('queue', 1), ('exchange', 1)])
221
222 def get_table(self, exchange):
223 """Get table of bindings for ``exchange``."""
224 # TODO Store a more complete exchange metatable in the
225 # routing collection
226 localRoutes = frozenset(self.state.exchanges[exchange]['table'])
227 brokerRoutes = self.get_messages().routing.find(
228 {'exchange': exchange}
229 )
230
231 return localRoutes | frozenset((r['routing_key'],
232 r['pattern'],
233 r['queue']) for r in brokerRoutes)
234
235 def _put_fanout(self, exchange, message, routing_key, **kwargs):
236 """Deliver fanout message."""
237 self.get_broadcast().insert({'payload': dumps(message),
238 'queue': exchange})
239
240 def _queue_bind(self, exchange, routing_key, pattern, queue):
241 if self.typeof(exchange).type == 'fanout':
242 self.create_broadcast_cursor(exchange, routing_key, pattern, queue)
243 self._fanout_queues[queue] = exchange
244
245 meta = {'exchange': exchange,
246 'queue': queue,
247 'routing_key': routing_key,
248 'pattern': pattern}
249 self.get_routing().update(meta, meta, upsert=True)
250
251 def queue_delete(self, queue, **kwargs):
252 self.get_routing().remove({'queue': queue})
253
254 super(Channel, self).queue_delete(queue, **kwargs)
255
256 if queue in self._fanout_queues:
257 try:
258 cursor = self._broadcast_cursors.pop(queue)
259 except KeyError:
260 pass
261 else:
262 cursor.close()
263
264 self._fanout_queues.pop(queue)
265
266 def _create_client(self):
267 self._open()
268 self._ensure_indexes()
269
270 @property
271 def client(self):
272 if self._client is None:
273 self._create_client()
274 return self._client
275
276 def get_messages(self):
277 return self.client[DEFAULT_MESSAGES_COLLECTION]
278
279 def get_routing(self):
280 return self.client[DEFAULT_ROUTING_COLLECTION]
281
282 def get_broadcast(self):
283 return self.client[DEFAULT_BROADCAST_COLLECTION]
284
285 def get_broadcast_cursor(self, queue):
286 try:
287 return self._broadcast_cursors[queue]
288 except KeyError:
289 # Cursor may be absent when Channel created more than once.
290 # _fanout_queues is a class-level mutable attribute so it's
291 # shared over all Channel instances.
292 return self.create_broadcast_cursor(
293 self._fanout_queues[queue], None, None, queue,
294 )
295
296 def create_broadcast_cursor(self, exchange, routing_key, pattern, queue):
297 cursor = self.get_broadcast().find(
298 query={'queue': exchange},
299 sort=[('$natural', 1)],
300 tailable=True,
301 )
302 ret = self._broadcast_cursors[queue] = BroadcastCursor(cursor)
303 return ret
304
305
306 class Transport(virtual.Transport):
307 Channel = Channel
308
309 can_parse_url = True
310 polling_interval = 1
311 default_port = DEFAULT_PORT
312 connection_errors = (
313 virtual.Transport.connection_errors + (errors.ConnectionFailure, )
314 )
315 channel_errors = (
316 virtual.Transport.channel_errors + (
317 errors.ConnectionFailure,
318 errors.OperationFailure)
319 )
320 driver_type = 'mongodb'
321 driver_name = 'pymongo'
322
323 implements = virtual.Transport.implements.extend(
324 exchange_types=frozenset(['direct', 'topic', 'fanout']),
325 )
326
327 def driver_version(self):
328 return pymongo.version
329
[end of kombu/transport/mongodb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kombu/transport/mongodb.py b/kombu/transport/mongodb.py
--- a/kombu/transport/mongodb.py
+++ b/kombu/transport/mongodb.py
@@ -14,6 +14,7 @@
from pymongo import errors
from pymongo import MongoClient, uri_parser
+from pymongo.cursor import CursorType
from kombu.five import Empty
from kombu.syn import _detect_environment
@@ -179,17 +180,23 @@
def _prepare_client_options(self, options):
if pymongo.version_tuple >= (3, ):
options.pop('auto_start_request', None)
+ return options
def _open(self, scheme='mongodb://'):
hostname, dbname, options = self._parse_uri(scheme=scheme)
- self._prepare_client_options(options)
- mongoconn = MongoClient(
- host=hostname, ssl=options['ssl'],
- auto_start_request=options['auto_start_request'],
- connectTimeoutMS=options['connectTimeoutMS'],
- use_greenlets=_detect_environment() != 'default',
- )
+ conf = self._prepare_client_options(options)
+ conf['host'] = hostname
+
+ env = _detect_environment()
+ if env == 'gevent':
+ from gevent import monkey
+ monkey.patch_all()
+ elif env == 'eventlet':
+ from eventlet import monkey_patch
+ monkey_patch()
+
+ mongoconn = MongoClient(**conf)
database = mongoconn[dbname]
version = mongoconn.server_info()['version']
@@ -294,11 +301,18 @@
)
def create_broadcast_cursor(self, exchange, routing_key, pattern, queue):
- cursor = self.get_broadcast().find(
- query={'queue': exchange},
- sort=[('$natural', 1)],
- tailable=True,
- )
+ if pymongo.version_tuple >= (3, ):
+ query = dict(filter={'queue': exchange},
+ sort=[('$natural', 1)],
+ cursor_type=CursorType.TAILABLE
+ )
+ else:
+ query = dict(query={'queue': exchange},
+ sort=[('$natural', 1)],
+ tailable=True
+ )
+
+ cursor = self.get_broadcast().find(**query)
ret = self._broadcast_cursors[queue] = BroadcastCursor(cursor)
return ret
|
{"golden_diff": "diff --git a/kombu/transport/mongodb.py b/kombu/transport/mongodb.py\n--- a/kombu/transport/mongodb.py\n+++ b/kombu/transport/mongodb.py\n@@ -14,6 +14,7 @@\n \n from pymongo import errors\n from pymongo import MongoClient, uri_parser\n+from pymongo.cursor import CursorType\n \n from kombu.five import Empty\n from kombu.syn import _detect_environment\n@@ -179,17 +180,23 @@\n def _prepare_client_options(self, options):\n if pymongo.version_tuple >= (3, ):\n options.pop('auto_start_request', None)\n+ return options\n \n def _open(self, scheme='mongodb://'):\n hostname, dbname, options = self._parse_uri(scheme=scheme)\n \n- self._prepare_client_options(options)\n- mongoconn = MongoClient(\n- host=hostname, ssl=options['ssl'],\n- auto_start_request=options['auto_start_request'],\n- connectTimeoutMS=options['connectTimeoutMS'],\n- use_greenlets=_detect_environment() != 'default',\n- )\n+ conf = self._prepare_client_options(options)\n+ conf['host'] = hostname\n+\n+ env = _detect_environment()\n+ if env == 'gevent':\n+ from gevent import monkey\n+ monkey.patch_all()\n+ elif env == 'eventlet':\n+ from eventlet import monkey_patch\n+ monkey_patch()\n+\n+ mongoconn = MongoClient(**conf)\n database = mongoconn[dbname]\n \n version = mongoconn.server_info()['version']\n@@ -294,11 +301,18 @@\n )\n \n def create_broadcast_cursor(self, exchange, routing_key, pattern, queue):\n- cursor = self.get_broadcast().find(\n- query={'queue': exchange},\n- sort=[('$natural', 1)],\n- tailable=True,\n- )\n+ if pymongo.version_tuple >= (3, ):\n+ query = dict(filter={'queue': exchange},\n+ sort=[('$natural', 1)],\n+ cursor_type=CursorType.TAILABLE\n+ )\n+ else:\n+ query = dict(query={'queue': exchange},\n+ sort=[('$natural', 1)],\n+ tailable=True\n+ )\n+\n+ cursor = self.get_broadcast().find(**query)\n ret = self._broadcast_cursors[queue] = BroadcastCursor(cursor)\n return ret\n", "issue": "unexpected keyword argument 'query' when using pymongo 3.0\n```\n File \"/opt/ampli/apps/marvintaskmgr/venv/lib/python2.7/site-packages/kombu/transport/virtual/__init__.py\", line 487, in queue_bind\n self._queue_bind(exchange, *meta)\n File \"/opt/ampli/apps/marvintaskmgr/venv/lib/python2.7/site-packages/kombu/transport/mongodb.py\", line 244, in _queue_bind\n self.create_broadcast_cursor(exchange, routing_key, pattern, queue)\n File \"/opt/ampli/apps/marvintaskmgr/venv/lib/python2.7/site-packages/kombu/transport/mongodb.py\", line 302, in create_broadcast_cursor\n tailable=True,\n File \"/opt/ampli/apps/marvintaskmgr/venv/lib/python2.7/site-packages/pymongo/collection.py\", line 929, in find\n return Cursor(self, *args, **kwargs)\nTypeError: __init__() got an unexpected keyword argument 'query'\n```\n\nhttps://github.com/celery/kombu/blob/8e6aed9fcf978b6c34108c0d37b720bd125f0352/kombu/transport/mongodb.py#L297-L300\n\nquery and tailable are invalid arguments with pymongo 3.0\n\n", "before_files": [{"content": "\"\"\"\nkombu.transport.mongodb\n=======================\n\nMongoDB transport.\n\n:copyright: (c) 2010 - 2013 by Flavio Percoco Premoli.\n:license: BSD, see LICENSE for more details.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport pymongo\n\nfrom pymongo import errors\nfrom pymongo import MongoClient, uri_parser\n\nfrom kombu.five import Empty\nfrom kombu.syn import _detect_environment\nfrom kombu.utils.encoding import bytes_to_str\nfrom kombu.utils.json import loads, dumps\n\nfrom . import virtual\n\nDEFAULT_HOST = '127.0.0.1'\nDEFAULT_PORT = 27017\n\nDEFAULT_MESSAGES_COLLECTION = 'messages'\nDEFAULT_ROUTING_COLLECTION = 'messages.routing'\nDEFAULT_BROADCAST_COLLECTION = 'messages.broadcast'\n\n\nclass BroadcastCursor(object):\n \"\"\"Cursor for broadcast queues.\"\"\"\n\n def __init__(self, cursor):\n self._cursor = cursor\n\n self.purge(rewind=False)\n\n def get_size(self):\n return self._cursor.count() - self._offset\n\n def close(self):\n self._cursor.close()\n\n def purge(self, rewind=True):\n if rewind:\n self._cursor.rewind()\n\n # Fast forward the cursor past old events\n self._offset = self._cursor.count()\n self._cursor = self._cursor.skip(self._offset)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n while True:\n try:\n msg = next(self._cursor)\n except pymongo.errors.OperationFailure as exc:\n # In some cases tailed cursor can become invalid\n # and have to be reinitalized\n if 'not valid at server' in exc.message:\n self.purge()\n\n continue\n\n raise\n else:\n break\n\n self._offset += 1\n\n return msg\n next = __next__\n\n\nclass Channel(virtual.Channel):\n _client = None\n supports_fanout = True\n _fanout_queues = {}\n\n def __init__(self, *vargs, **kwargs):\n super(Channel, self).__init__(*vargs, **kwargs)\n\n self._broadcast_cursors = {}\n\n # Evaluate connection\n self._create_client()\n\n def _new_queue(self, queue, **kwargs):\n pass\n\n def _get(self, queue):\n if queue in self._fanout_queues:\n try:\n msg = next(self.get_broadcast_cursor(queue))\n except StopIteration:\n msg = None\n else:\n msg = self.get_messages().find_and_modify(\n query={'queue': queue},\n sort=[('priority', pymongo.ASCENDING),\n ('_id', pymongo.ASCENDING)],\n remove=True,\n )\n\n if msg is None:\n raise Empty()\n\n return loads(bytes_to_str(msg['payload']))\n\n def _size(self, queue):\n if queue in self._fanout_queues:\n return self.get_broadcast_cursor(queue).get_size()\n\n return self.get_messages().find({'queue': queue}).count()\n\n def _put(self, queue, message, **kwargs):\n self.get_messages().insert({\n 'payload': dumps(message),\n 'queue': queue,\n 'priority': self._get_message_priority(message, reverse=True),\n })\n\n def _purge(self, queue):\n size = self._size(queue)\n\n if queue in self._fanout_queues:\n self.get_broadcaset_cursor(queue).purge()\n else:\n self.get_messages().remove({'queue': queue})\n\n return size\n\n def _parse_uri(self, scheme='mongodb://'):\n # See mongodb uri documentation:\n # http://docs.mongodb.org/manual/reference/connection-string/\n client = self.connection.client\n hostname = client.hostname\n\n if not hostname.startswith(scheme):\n hostname = scheme + hostname\n\n if not hostname[len(scheme):]:\n hostname += DEFAULT_HOST\n\n if client.userid and '@' not in hostname:\n head, tail = hostname.split('://')\n\n credentials = client.userid\n if client.password:\n credentials += ':' + client.password\n\n hostname = head + '://' + credentials + '@' + tail\n\n port = client.port if client.port is not None else DEFAULT_PORT\n\n parsed = uri_parser.parse_uri(hostname, port)\n\n dbname = parsed['database'] or client.virtual_host\n\n if dbname in ('/', None):\n dbname = 'kombu_default'\n\n options = {\n 'auto_start_request': True,\n 'ssl': client.ssl,\n 'connectTimeoutMS': (int(client.connect_timeout * 1000)\n if client.connect_timeout else None),\n }\n options.update(client.transport_options)\n options.update(parsed['options'])\n\n return hostname, dbname, options\n\n def _prepare_client_options(self, options):\n if pymongo.version_tuple >= (3, ):\n options.pop('auto_start_request', None)\n\n def _open(self, scheme='mongodb://'):\n hostname, dbname, options = self._parse_uri(scheme=scheme)\n\n self._prepare_client_options(options)\n mongoconn = MongoClient(\n host=hostname, ssl=options['ssl'],\n auto_start_request=options['auto_start_request'],\n connectTimeoutMS=options['connectTimeoutMS'],\n use_greenlets=_detect_environment() != 'default',\n )\n database = mongoconn[dbname]\n\n version = mongoconn.server_info()['version']\n if tuple(map(int, version.split('.')[:2])) < (1, 3):\n raise NotImplementedError(\n 'Kombu requires MongoDB version 1.3+ (server is {0})'.format(\n version))\n\n self._create_broadcast(database, options)\n\n self._client = database\n\n def _create_broadcast(self, database, options):\n '''Create capped collection for broadcast messages.'''\n if DEFAULT_BROADCAST_COLLECTION in database.collection_names():\n return\n\n capsize = options.get('capped_queue_size') or 100000\n database.create_collection(DEFAULT_BROADCAST_COLLECTION,\n size=capsize, capped=True)\n\n def _ensure_indexes(self):\n '''Ensure indexes on collections.'''\n self.get_messages().ensure_index(\n [('queue', 1), ('priority', 1), ('_id', 1)], background=True,\n )\n self.get_broadcast().ensure_index([('queue', 1)])\n self.get_routing().ensure_index([('queue', 1), ('exchange', 1)])\n\n def get_table(self, exchange):\n \"\"\"Get table of bindings for ``exchange``.\"\"\"\n # TODO Store a more complete exchange metatable in the\n # routing collection\n localRoutes = frozenset(self.state.exchanges[exchange]['table'])\n brokerRoutes = self.get_messages().routing.find(\n {'exchange': exchange}\n )\n\n return localRoutes | frozenset((r['routing_key'],\n r['pattern'],\n r['queue']) for r in brokerRoutes)\n\n def _put_fanout(self, exchange, message, routing_key, **kwargs):\n \"\"\"Deliver fanout message.\"\"\"\n self.get_broadcast().insert({'payload': dumps(message),\n 'queue': exchange})\n\n def _queue_bind(self, exchange, routing_key, pattern, queue):\n if self.typeof(exchange).type == 'fanout':\n self.create_broadcast_cursor(exchange, routing_key, pattern, queue)\n self._fanout_queues[queue] = exchange\n\n meta = {'exchange': exchange,\n 'queue': queue,\n 'routing_key': routing_key,\n 'pattern': pattern}\n self.get_routing().update(meta, meta, upsert=True)\n\n def queue_delete(self, queue, **kwargs):\n self.get_routing().remove({'queue': queue})\n\n super(Channel, self).queue_delete(queue, **kwargs)\n\n if queue in self._fanout_queues:\n try:\n cursor = self._broadcast_cursors.pop(queue)\n except KeyError:\n pass\n else:\n cursor.close()\n\n self._fanout_queues.pop(queue)\n\n def _create_client(self):\n self._open()\n self._ensure_indexes()\n\n @property\n def client(self):\n if self._client is None:\n self._create_client()\n return self._client\n\n def get_messages(self):\n return self.client[DEFAULT_MESSAGES_COLLECTION]\n\n def get_routing(self):\n return self.client[DEFAULT_ROUTING_COLLECTION]\n\n def get_broadcast(self):\n return self.client[DEFAULT_BROADCAST_COLLECTION]\n\n def get_broadcast_cursor(self, queue):\n try:\n return self._broadcast_cursors[queue]\n except KeyError:\n # Cursor may be absent when Channel created more than once.\n # _fanout_queues is a class-level mutable attribute so it's\n # shared over all Channel instances.\n return self.create_broadcast_cursor(\n self._fanout_queues[queue], None, None, queue,\n )\n\n def create_broadcast_cursor(self, exchange, routing_key, pattern, queue):\n cursor = self.get_broadcast().find(\n query={'queue': exchange},\n sort=[('$natural', 1)],\n tailable=True,\n )\n ret = self._broadcast_cursors[queue] = BroadcastCursor(cursor)\n return ret\n\n\nclass Transport(virtual.Transport):\n Channel = Channel\n\n can_parse_url = True\n polling_interval = 1\n default_port = DEFAULT_PORT\n connection_errors = (\n virtual.Transport.connection_errors + (errors.ConnectionFailure, )\n )\n channel_errors = (\n virtual.Transport.channel_errors + (\n errors.ConnectionFailure,\n errors.OperationFailure)\n )\n driver_type = 'mongodb'\n driver_name = 'pymongo'\n\n implements = virtual.Transport.implements.extend(\n exchange_types=frozenset(['direct', 'topic', 'fanout']),\n )\n\n def driver_version(self):\n return pymongo.version\n", "path": "kombu/transport/mongodb.py"}]}
| 3,876 | 527 |
gh_patches_debug_9007
|
rasdani/github-patches
|
git_diff
|
privacyidea__privacyidea-3156
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update matrix tests to use Python 3.10
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import print_function
3 from setuptools import setup, find_packages
4 import os
5 import stat
6 import sys
7
8 #VERSION = "2.1dev4"
9 VERSION = "3.7.1"
10
11 # Taken from kennethreitz/requests/setup.py
12 package_directory = os.path.realpath(os.path.dirname(__file__))
13
14
15 def get_file_contents(file_path):
16 """Get the context of the file using full path name."""
17 content = ""
18 try:
19 full_path = os.path.join(package_directory, file_path)
20 content = open(full_path, 'r').read()
21 except:
22 print("### could not open file {0!r}".format(file_path), file=sys.stderr)
23 return content
24
25
26 def get_file_list(file_path):
27 full_path = os.path.join(package_directory, file_path)
28 file_list = os.listdir(full_path)
29 # now we need to add the path to the files
30 return [file_path + f for f in file_list]
31
32
33 install_requires = ["beautifulsoup4[lxml]>=4.3.2",
34 "cbor2>=5.0.1",
35 "configobj>=5.0.6",
36 "croniter>=0.3.8",
37 "cryptography>=2.4.2",
38 "defusedxml>=0.4.1",
39 "Flask>=0.10.1,<2.0",
40 "Flask-Babel>=0.9",
41 "Flask-Migrate>=1.2.0,<3.0",
42 "Flask-Script>=2.0.5",
43 "Flask-SQLAlchemy>=2.0",
44 "Flask-Versioned>=0.9.4",
45 "future>=0.18.2;python_version<'3.0'",
46 "google-auth>=1.23.0",
47 "huey[redis]>=1.11.0",
48 "importlib_metadata>=2.1.1",
49 "ldap3>=2.6",
50 "netaddr>=0.7.12",
51 "passlib[bcrypt]>=1.7.0",
52 "argon2_cffi>=20.1.0",
53 "Pillow>=6.2.1",
54 "pydash>=4.7.4",
55 "PyJWT>=1.3.0",
56 "PyMySQL>=0.6.6",
57 "pyOpenSSL>=17.5",
58 "pyrad>=2.0",
59 "python-dateutil>=2.7.3",
60 "python-gnupg>=0.4.4",
61 "PyYAML>=5.1",
62 "qrcode>=6.1",
63 "requests>=2.7.0",
64 "smpplib>=2.0",
65 "SQLAlchemy>=1.3.0,<1.4.0",
66 "sqlsoup>=0.9.0"]
67
68
69 def get_man_pages(dir):
70 """
71 Get man pages in a directory.
72 :param dir:
73 :return: list of file names
74 """
75 files = os.listdir(dir)
76 r_files = []
77 for file in files:
78 if file.endswith(".1"):
79 r_files.append(dir + "/" + file)
80 return r_files
81
82
83 def get_scripts(dir):
84 """
85 Get files that are executable
86 :param dir:
87 :return: list of file names
88 """
89 files = os.listdir(dir)
90 r_files = []
91 for file in files:
92 if os.stat(dir + "/" + file)[stat.ST_MODE] & stat.S_IEXEC:
93 r_files.append(dir + "/" + file)
94 return r_files
95
96
97 setup(
98 name='privacyIDEA',
99 version=VERSION,
100 description='privacyIDEA: identity, multifactor authentication (OTP), '
101 'authorization, audit',
102 author='privacyidea.org',
103 license='AGPLv3',
104 author_email='[email protected]',
105 url='http://www.privacyidea.org',
106 keywords='OTP, two factor authentication, management, security',
107 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
108 packages=find_packages(),
109 scripts=["pi-manage"] + get_scripts("tools"),
110 extras_require={
111 'doc': ["Pallets-Sphinx-Themes>=1.2.3",
112 "Sphinx>=1.3.1",
113 "sphinxcontrib-httpdomain>=1.3.0",
114 "sphinxcontrib-plantuml>=0.18",
115 "sphinxcontrib-spelling>=7.0.0"],
116 'test': ["mock>=2.0.0",
117 "pytest>=3.6.0",
118 "pytest-cov>=2.5.1",
119 "responses>=0.9.0",
120 "testfixtures>=6.14.2"],
121 'postgres': ['psycopg2>=2.8.3'],
122 'hsm': ['PyKCS11>=1.5.10']
123 },
124 install_requires=install_requires,
125 include_package_data=True,
126 data_files=[('etc/privacyidea/',
127 ['deploy/apache/privacyideaapp.wsgi',
128 'deploy/privacyidea/dictionary']),
129 ('share/man/man1', get_man_pages("tools")),
130 ('lib/privacyidea/migrations',
131 ["migrations/alembic.ini",
132 "migrations/env.py",
133 "migrations/README",
134 "migrations/script.py.mako"]),
135 ('lib/privacyidea/migrations/versions',
136 get_file_list("migrations/versions/")),
137 ('lib/privacyidea/', ['requirements.txt'])
138 ],
139 classifiers=["Framework :: Flask",
140 "License :: OSI Approved :: "
141 "GNU Affero General Public License v3",
142 "Programming Language :: Python",
143 "Development Status :: 5 - Production/Stable",
144 "Topic :: Internet",
145 "Topic :: Security",
146 "Topic :: System ::"
147 " Systems Administration :: Authentication/Directory",
148 'Programming Language :: Python',
149 'Programming Language :: Python :: 2',
150 'Programming Language :: Python :: 2.7',
151 'Programming Language :: Python :: 3',
152 'Programming Language :: Python :: 3.5',
153 'Programming Language :: Python :: 3.6',
154 'Programming Language :: Python :: 3.7',
155 'Programming Language :: Python :: 3.8',
156 'Programming Language :: Python :: 3.9'
157 ],
158 zip_safe=False,
159 long_description=get_file_contents('README.rst')
160 )
161
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -153,7 +153,8 @@
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
- 'Programming Language :: Python :: 3.9'
+ 'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10'
],
zip_safe=False,
long_description=get_file_contents('README.rst')
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -153,7 +153,8 @@\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n- 'Programming Language :: Python :: 3.9'\n+ 'Programming Language :: Python :: 3.9',\n+ 'Programming Language :: Python :: 3.10'\n ],\n zip_safe=False,\n long_description=get_file_contents('README.rst')\n", "issue": "Update matrix tests to use Python 3.10\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom setuptools import setup, find_packages\nimport os\nimport stat\nimport sys\n\n#VERSION = \"2.1dev4\"\nVERSION = \"3.7.1\"\n\n# Taken from kennethreitz/requests/setup.py\npackage_directory = os.path.realpath(os.path.dirname(__file__))\n\n\ndef get_file_contents(file_path):\n \"\"\"Get the context of the file using full path name.\"\"\"\n content = \"\"\n try:\n full_path = os.path.join(package_directory, file_path)\n content = open(full_path, 'r').read()\n except:\n print(\"### could not open file {0!r}\".format(file_path), file=sys.stderr)\n return content\n\n\ndef get_file_list(file_path):\n full_path = os.path.join(package_directory, file_path)\n file_list = os.listdir(full_path)\n # now we need to add the path to the files\n return [file_path + f for f in file_list]\n\n\ninstall_requires = [\"beautifulsoup4[lxml]>=4.3.2\",\n \"cbor2>=5.0.1\",\n \"configobj>=5.0.6\",\n \"croniter>=0.3.8\",\n \"cryptography>=2.4.2\",\n \"defusedxml>=0.4.1\",\n \"Flask>=0.10.1,<2.0\",\n \"Flask-Babel>=0.9\",\n \"Flask-Migrate>=1.2.0,<3.0\",\n \"Flask-Script>=2.0.5\",\n \"Flask-SQLAlchemy>=2.0\",\n \"Flask-Versioned>=0.9.4\",\n \"future>=0.18.2;python_version<'3.0'\",\n \"google-auth>=1.23.0\",\n \"huey[redis]>=1.11.0\",\n \"importlib_metadata>=2.1.1\",\n \"ldap3>=2.6\",\n \"netaddr>=0.7.12\",\n \"passlib[bcrypt]>=1.7.0\",\n \"argon2_cffi>=20.1.0\",\n \"Pillow>=6.2.1\",\n \"pydash>=4.7.4\",\n \"PyJWT>=1.3.0\",\n \"PyMySQL>=0.6.6\",\n \"pyOpenSSL>=17.5\",\n \"pyrad>=2.0\",\n \"python-dateutil>=2.7.3\",\n \"python-gnupg>=0.4.4\",\n \"PyYAML>=5.1\",\n \"qrcode>=6.1\",\n \"requests>=2.7.0\",\n \"smpplib>=2.0\",\n \"SQLAlchemy>=1.3.0,<1.4.0\",\n \"sqlsoup>=0.9.0\"]\n\n\ndef get_man_pages(dir):\n \"\"\"\n Get man pages in a directory.\n :param dir:\n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if file.endswith(\".1\"):\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\ndef get_scripts(dir):\n \"\"\"\n Get files that are executable\n :param dir:\n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if os.stat(dir + \"/\" + file)[stat.ST_MODE] & stat.S_IEXEC:\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\nsetup(\n name='privacyIDEA',\n version=VERSION,\n description='privacyIDEA: identity, multifactor authentication (OTP), '\n 'authorization, audit',\n author='privacyidea.org',\n license='AGPLv3',\n author_email='[email protected]',\n url='http://www.privacyidea.org',\n keywords='OTP, two factor authentication, management, security',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n packages=find_packages(),\n scripts=[\"pi-manage\"] + get_scripts(\"tools\"),\n extras_require={\n 'doc': [\"Pallets-Sphinx-Themes>=1.2.3\",\n \"Sphinx>=1.3.1\",\n \"sphinxcontrib-httpdomain>=1.3.0\",\n \"sphinxcontrib-plantuml>=0.18\",\n \"sphinxcontrib-spelling>=7.0.0\"],\n 'test': [\"mock>=2.0.0\",\n \"pytest>=3.6.0\",\n \"pytest-cov>=2.5.1\",\n \"responses>=0.9.0\",\n \"testfixtures>=6.14.2\"],\n 'postgres': ['psycopg2>=2.8.3'],\n 'hsm': ['PyKCS11>=1.5.10']\n },\n install_requires=install_requires,\n include_package_data=True,\n data_files=[('etc/privacyidea/',\n ['deploy/apache/privacyideaapp.wsgi',\n 'deploy/privacyidea/dictionary']),\n ('share/man/man1', get_man_pages(\"tools\")),\n ('lib/privacyidea/migrations',\n [\"migrations/alembic.ini\",\n \"migrations/env.py\",\n \"migrations/README\",\n \"migrations/script.py.mako\"]),\n ('lib/privacyidea/migrations/versions',\n get_file_list(\"migrations/versions/\")),\n ('lib/privacyidea/', ['requirements.txt'])\n ],\n classifiers=[\"Framework :: Flask\",\n \"License :: OSI Approved :: \"\n \"GNU Affero General Public License v3\",\n \"Programming Language :: Python\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Internet\",\n \"Topic :: Security\",\n \"Topic :: System ::\"\n \" Systems Administration :: Authentication/Directory\",\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9'\n ],\n zip_safe=False,\n long_description=get_file_contents('README.rst')\n)\n", "path": "setup.py"}]}
| 2,344 | 128 |
gh_patches_debug_10221
|
rasdani/github-patches
|
git_diff
|
data-for-change__anyway-164
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move to postgres
That's the database of choice hasadna uses
<!---
@huboard:{"order":125.5,"milestone_order":81,"custom_state":""}
-->
</issue>
<code>
[start of config.py]
1 import os
2
3 #
4 # This is the configuration file of the application
5 #
6 # Please make sure you don't store here any secret information and use environment
7 # variables
8 #
9
10
11 SQLALCHEMY_DATABASE_URI = os.environ.get('CLEARDB_DATABASE_URL')
12 SQLALCHEMY_POOL_RECYCLE = 60
13
14
15 SECRET_KEY = 'aiosdjsaodjoidjioewnioewfnoeijfoisdjf'
16
17 FACEBOOK_KEY = "157028231131213"
18 FACEBOOK_SECRET = "0437ee70207dca46609219b990be0614"
19
[end of config.py]
[start of mysqlshell.py]
1 #!/usr/bin/env python
2 import sys
3 import os
4 import urlparse
5
6 def main():
7 DATABASE_URI = os.getenv('CLEARDB_DATABASE_URL')
8
9 if not DATABASE_URI:
10 print >>sys.stderr, 'Environment CLEARDB_DATABASE_URL not set'
11 sys.exit(1)
12
13 db = urlparse.urlparse(DATABASE_URI)
14 os.execlp('mysql', 'mysql', '-u', db.username, '-p' + db.password, '-h', db.hostname, db.path[1:])
15
16 if __name__ == '__main__':
17 main()
18
[end of mysqlshell.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/config.py b/config.py
--- a/config.py
+++ b/config.py
@@ -8,7 +8,7 @@
#
-SQLALCHEMY_DATABASE_URI = os.environ.get('CLEARDB_DATABASE_URL')
+SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
SQLALCHEMY_POOL_RECYCLE = 60
diff --git a/mysqlshell.py b/mysqlshell.py
deleted file mode 100755
--- a/mysqlshell.py
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env python
-import sys
-import os
-import urlparse
-
-def main():
- DATABASE_URI = os.getenv('CLEARDB_DATABASE_URL')
-
- if not DATABASE_URI:
- print >>sys.stderr, 'Environment CLEARDB_DATABASE_URL not set'
- sys.exit(1)
-
- db = urlparse.urlparse(DATABASE_URI)
- os.execlp('mysql', 'mysql', '-u', db.username, '-p' + db.password, '-h', db.hostname, db.path[1:])
-
-if __name__ == '__main__':
- main()
|
{"golden_diff": "diff --git a/config.py b/config.py\n--- a/config.py\n+++ b/config.py\n@@ -8,7 +8,7 @@\n #\n \n \n-SQLALCHEMY_DATABASE_URI = os.environ.get('CLEARDB_DATABASE_URL')\n+SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')\n SQLALCHEMY_POOL_RECYCLE = 60\n \n \ndiff --git a/mysqlshell.py b/mysqlshell.py\ndeleted file mode 100755\n--- a/mysqlshell.py\n+++ /dev/null\n@@ -1,17 +0,0 @@\n-#!/usr/bin/env python\n-import sys\n-import os\n-import urlparse\n-\n-def main():\n- DATABASE_URI = os.getenv('CLEARDB_DATABASE_URL')\n-\n- if not DATABASE_URI:\n- print >>sys.stderr, 'Environment CLEARDB_DATABASE_URL not set'\n- sys.exit(1)\n-\n- db = urlparse.urlparse(DATABASE_URI)\n- os.execlp('mysql', 'mysql', '-u', db.username, '-p' + db.password, '-h', db.hostname, db.path[1:])\n-\n-if __name__ == '__main__':\n- main()\n", "issue": "Move to postgres\nThat's the database of choice hasadna uses\n\n<!---\n@huboard:{\"order\":125.5,\"milestone_order\":81,\"custom_state\":\"\"}\n-->\n\n", "before_files": [{"content": "import os\n\n#\n# This is the configuration file of the application\n#\n# Please make sure you don't store here any secret information and use environment\n# variables\n#\n\n\nSQLALCHEMY_DATABASE_URI = os.environ.get('CLEARDB_DATABASE_URL')\nSQLALCHEMY_POOL_RECYCLE = 60\n\n\nSECRET_KEY = 'aiosdjsaodjoidjioewnioewfnoeijfoisdjf'\n\nFACEBOOK_KEY = \"157028231131213\"\nFACEBOOK_SECRET = \"0437ee70207dca46609219b990be0614\"\n", "path": "config.py"}, {"content": "#!/usr/bin/env python\nimport sys\nimport os\nimport urlparse\n\ndef main():\n DATABASE_URI = os.getenv('CLEARDB_DATABASE_URL')\n\n if not DATABASE_URI:\n print >>sys.stderr, 'Environment CLEARDB_DATABASE_URL not set'\n sys.exit(1)\n\n db = urlparse.urlparse(DATABASE_URI)\n os.execlp('mysql', 'mysql', '-u', db.username, '-p' + db.password, '-h', db.hostname, db.path[1:])\n\nif __name__ == '__main__':\n main()\n", "path": "mysqlshell.py"}]}
| 906 | 242 |
gh_patches_debug_21300
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-5719
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]: fine train llama-2-7b-hf prepare data set error , `bos_token` and `eos_token` should be the same with `conversation_template.seps`.
### Is there an existing issue for this bug?
- [X] I have searched the existing issues
### 🐛 Describe the bug
python prepare_sft_dataset.py --data_input_dirs /models/train-data-dir --tokenizer_dir /models/llama-2-13b-hf --data_output_dirs /models/train-data-dir/out --num_spliced_dataset_bins 1
File "/ColossalAI/applications/Colossal-LLaMA/colossal_llama/dataset/spliced_and_tokenized_dataset.py", line 90, in supervised_tokenize_sft
tokenizer.bos_token == conversation_template.seps[0] and tokenizer.eos_token == conversation_template.seps[1]
AssertionError: `bos_token` and `eos_token` should be the same with `conversation_template.seps`.
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/ColossalAI/applications/Colossal-LLaMA/prepare_sft_dataset.py", line 146, in <module>
main()
File "/ColossalAI/applications/Colossal-LLaMA/prepare_sft_dataset.py", line 102, in main
dataset = dataset.map(
File "/usr/local/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 602, in wrapper
out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 567, in wrapper
out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 3248, in map
for rank, done, content in iflatmap_unordered(
File "/usr/local/lib/python3.10/site-packages/datasets/utils/py_utils.py", line 718, in iflatmap_unordered
[async_result.get(timeout=0.05) for async_result in async_results]
File "/usr/local/lib/python3.10/site-packages/datasets/utils/py_utils.py", line 718, in <listcomp>
[async_result.get(timeout=0.05) for async_result in async_results]
File "/usr/local/lib/python3.10/site-packages/multiprocess/pool.py", line 774, in get
raise self._value
**AssertionError: `bos_token` and `eos_token` should be the same with `conversation_template.seps`.**
### Environment
------------ Environment ------------
Colossal-AI version: 0.3.7
PyTorch version: 2.1.0
System CUDA version: 11.0
CUDA version required by PyTorch: 11.8
tokenizer_config.json
"add_bos_token": true,
"add_eos_token": false,
"bos_token": {
"__type": "AddedToken",
"content": "<s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"clean_up_tokenization_spaces": false,
"eos_token": {
"__type": "AddedToken",
"content": "</s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/Colossal-LLaMA/prepare_sft_dataset.py]
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 """
4 Prepare sft dataset for fine-tuning
5 """
6
7 import argparse
8 import json
9 import math
10 import os
11 from multiprocessing import cpu_count
12
13 from colossal_llama.dataset.conversation import default_conversation
14 from colossal_llama.dataset.spliced_and_tokenized_dataset import supervised_tokenize_sft
15 from datasets import dataset_dict, load_dataset
16 from transformers import AddedToken, AutoTokenizer
17
18 from colossalai.logging import get_dist_logger
19
20 logger = get_dist_logger()
21
22
23 def main():
24 parser = argparse.ArgumentParser()
25 parser.add_argument(
26 "--data_input_dirs",
27 type=str,
28 required=True,
29 default=None,
30 help="Comma(i.e., ',') separated list of all data directories containing `.jsonl` data files.",
31 )
32 parser.add_argument(
33 "--tokenizer_dir", type=str, required=True, default=None, help="A directory containing the tokenizer"
34 )
35 parser.add_argument("--data_output_dirs", type=str, default="data_output_dirs", help="Data output directory")
36 parser.add_argument("--max_length", type=int, default=8192, help="Max length of each spliced tokenized sequence")
37 parser.add_argument("--num_spliced_dataset_bins", type=int, default=10, help="Number of spliced dataset bins")
38 parser.add_argument("--llama_version", type=int, default=3, help="LLaMA version")
39 args = parser.parse_args()
40
41 if args.num_spliced_dataset_bins >= 100000:
42 raise ValueError("Too many spliced divisions, must be smaller than 100000")
43
44 args.data_cache_dir = os.path.join(args.data_output_dirs, "cache")
45 args.data_jsonl_output_dir = os.path.join(args.data_output_dirs, "jsonl")
46 args.data_arrow_output_dir = os.path.join(args.data_output_dirs, "arrow")
47
48 if not os.path.exists(args.data_cache_dir):
49 os.makedirs(args.data_cache_dir)
50 if not os.path.exists(args.data_jsonl_output_dir):
51 os.makedirs(args.data_jsonl_output_dir)
52 if not os.path.exists(args.data_arrow_output_dir):
53 os.makedirs(args.data_arrow_output_dir)
54
55 # Prepare to all input datasets
56 input_data_paths = []
57 input_data_dirs = args.data_input_dirs.split(",")
58 for ds_dir in input_data_dirs:
59 ds_dir = os.path.abspath(ds_dir)
60 assert os.path.exists(ds_dir), f"Not find data dir {ds_dir}"
61 ds_files = [name for name in os.listdir(ds_dir) if name.endswith(".jsonl")]
62 ds_paths = [os.path.join(ds_dir, name) for name in ds_files]
63 input_data_paths.extend(ds_paths)
64
65 # Prepare to data splitting.
66 train_splits = []
67 split_interval = math.ceil(100 / args.num_spliced_dataset_bins)
68 for i in range(0, 100, split_interval):
69 start = i
70 end = i + split_interval
71 if end > 100:
72 end = 100
73 train_splits.append(f"train[{start}%:{end}%]")
74
75 # Prepare to the tokenizer.
76 tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_dir)
77
78 # Fix </s> split issue: https://github.com/huggingface/transformers/issues/23833
79 if args.llama_version == 2:
80 tokenizer.add_tokens(AddedToken("</s>", normalized=False, special=True), special_tokens=True)
81
82 tokenizer.add_bos_token = False
83 tokenizer.add_eos_token = False
84 if tokenizer.pad_token is None:
85 if tokenizer.unk_token is not None:
86 tokenizer.pad_token = tokenizer.unk_token
87 else:
88 tokenizer.pad_token = tokenizer.eos_token
89 tokenizer.unk_token = tokenizer.eos_token
90
91 list_dataset = load_dataset(
92 path="json",
93 data_files=input_data_paths,
94 cache_dir=os.path.join(args.data_cache_dir, "raw"),
95 keep_in_memory=False,
96 split=train_splits,
97 num_proc=cpu_count(),
98 )
99 for index, dataset in enumerate(list_dataset):
100 assert isinstance(dataset, dataset_dict.Dataset)
101 logger.info(f"Start to process part-{index}/{len(list_dataset)} of all original datasets.")
102 dataset = dataset.map(
103 function=supervised_tokenize_sft,
104 fn_kwargs={
105 "tokenizer": tokenizer,
106 "conversation_template": default_conversation,
107 "max_length": args.max_length,
108 },
109 keep_in_memory=False,
110 num_proc=min(len(dataset), cpu_count()),
111 )
112
113 dataset = dataset.filter(lambda data: data["labels"] is not None)
114 dataset = dataset.sort(column_names=("seq_category", "seq_length"), reverse=False, keep_in_memory=False)
115
116 # We don't concatenate data samples here.
117 spliced_dataset = dataset
118 # Save each jsonl spliced dataset.
119 output_index = "0" * (5 - len(str(index))) + str(index)
120 output_name = f"part-{output_index}"
121 output_jsonl_path = os.path.join(args.data_jsonl_output_dir, output_name + ".jsonl")
122 # st = time.time()
123 with open(file=output_jsonl_path, mode="w", encoding="utf-8") as fp_writer:
124 spliced_count = 0
125 for spliced_data_point in spliced_dataset:
126 if spliced_count % 500 == 0:
127 logger.info(f"processing {spliced_count} spliced data points for {fp_writer.name}")
128 spliced_count += 1
129 fp_writer.write(json.dumps(spliced_data_point, ensure_ascii=False) + "\n")
130
131 # Save each arrow spliced dataset
132 output_arrow_path = os.path.join(args.data_arrow_output_dir, output_name)
133 logger.info(f"Start to save {output_arrow_path}")
134 spliced_dataset = load_dataset(
135 path="json",
136 data_files=[output_jsonl_path],
137 cache_dir=os.path.join(args.data_cache_dir, "spliced_and_tokenized"),
138 keep_in_memory=False,
139 num_proc=cpu_count(),
140 split="train",
141 )
142 spliced_dataset.save_to_disk(dataset_path=output_arrow_path, num_proc=min(len(spliced_dataset), cpu_count()))
143
144
145 if __name__ == "__main__":
146 main()
147
[end of applications/Colossal-LLaMA/prepare_sft_dataset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/applications/Colossal-LLaMA/prepare_sft_dataset.py b/applications/Colossal-LLaMA/prepare_sft_dataset.py
--- a/applications/Colossal-LLaMA/prepare_sft_dataset.py
+++ b/applications/Colossal-LLaMA/prepare_sft_dataset.py
@@ -10,7 +10,7 @@
import os
from multiprocessing import cpu_count
-from colossal_llama.dataset.conversation import default_conversation
+from colossal_llama.dataset.conversation import LLaMA2_Conv
from colossal_llama.dataset.spliced_and_tokenized_dataset import supervised_tokenize_sft
from datasets import dataset_dict, load_dataset
from transformers import AddedToken, AutoTokenizer
@@ -78,6 +78,7 @@
# Fix </s> split issue: https://github.com/huggingface/transformers/issues/23833
if args.llama_version == 2:
tokenizer.add_tokens(AddedToken("</s>", normalized=False, special=True), special_tokens=True)
+ default_conversation = LLaMA2_Conv
tokenizer.add_bos_token = False
tokenizer.add_eos_token = False
|
{"golden_diff": "diff --git a/applications/Colossal-LLaMA/prepare_sft_dataset.py b/applications/Colossal-LLaMA/prepare_sft_dataset.py\n--- a/applications/Colossal-LLaMA/prepare_sft_dataset.py\n+++ b/applications/Colossal-LLaMA/prepare_sft_dataset.py\n@@ -10,7 +10,7 @@\n import os\n from multiprocessing import cpu_count\n \n-from colossal_llama.dataset.conversation import default_conversation\n+from colossal_llama.dataset.conversation import LLaMA2_Conv\n from colossal_llama.dataset.spliced_and_tokenized_dataset import supervised_tokenize_sft\n from datasets import dataset_dict, load_dataset\n from transformers import AddedToken, AutoTokenizer\n@@ -78,6 +78,7 @@\n # Fix </s> split issue: https://github.com/huggingface/transformers/issues/23833\n if args.llama_version == 2:\n tokenizer.add_tokens(AddedToken(\"</s>\", normalized=False, special=True), special_tokens=True)\n+ default_conversation = LLaMA2_Conv\n \n tokenizer.add_bos_token = False\n tokenizer.add_eos_token = False\n", "issue": "[BUG]: fine train llama-2-7b-hf prepare data set error , `bos_token` and `eos_token` should be the same with `conversation_template.seps`.\n### Is there an existing issue for this bug?\n\n- [X] I have searched the existing issues\n\n### \ud83d\udc1b Describe the bug\n\n python prepare_sft_dataset.py --data_input_dirs /models/train-data-dir --tokenizer_dir /models/llama-2-13b-hf --data_output_dirs /models/train-data-dir/out --num_spliced_dataset_bins 1\r\n\r\n\r\n\r\n\r\n File \"/ColossalAI/applications/Colossal-LLaMA/colossal_llama/dataset/spliced_and_tokenized_dataset.py\", line 90, in supervised_tokenize_sft\r\n tokenizer.bos_token == conversation_template.seps[0] and tokenizer.eos_token == conversation_template.seps[1]\r\nAssertionError: `bos_token` and `eos_token` should be the same with `conversation_template.seps`.\r\n\"\"\"\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/ColossalAI/applications/Colossal-LLaMA/prepare_sft_dataset.py\", line 146, in <module>\r\n main()\r\n File \"/ColossalAI/applications/Colossal-LLaMA/prepare_sft_dataset.py\", line 102, in main\r\n dataset = dataset.map(\r\n File \"/usr/local/lib/python3.10/site-packages/datasets/arrow_dataset.py\", line 602, in wrapper\r\n out: Union[\"Dataset\", \"DatasetDict\"] = func(self, *args, **kwargs)\r\n File \"/usr/local/lib/python3.10/site-packages/datasets/arrow_dataset.py\", line 567, in wrapper\r\n out: Union[\"Dataset\", \"DatasetDict\"] = func(self, *args, **kwargs)\r\n File \"/usr/local/lib/python3.10/site-packages/datasets/arrow_dataset.py\", line 3248, in map\r\n for rank, done, content in iflatmap_unordered(\r\n File \"/usr/local/lib/python3.10/site-packages/datasets/utils/py_utils.py\", line 718, in iflatmap_unordered\r\n [async_result.get(timeout=0.05) for async_result in async_results]\r\n File \"/usr/local/lib/python3.10/site-packages/datasets/utils/py_utils.py\", line 718, in <listcomp>\r\n [async_result.get(timeout=0.05) for async_result in async_results]\r\n File \"/usr/local/lib/python3.10/site-packages/multiprocess/pool.py\", line 774, in get\r\n raise self._value\r\n**AssertionError: `bos_token` and `eos_token` should be the same with `conversation_template.seps`.**\n\n### Environment\n\n------------ Environment ------------\r\nColossal-AI version: 0.3.7\r\nPyTorch version: 2.1.0\r\nSystem CUDA version: 11.0\r\nCUDA version required by PyTorch: 11.8\r\n\r\ntokenizer_config.json \r\n\r\n \"add_bos_token\": true,\r\n \"add_eos_token\": false,\r\n \"bos_token\": {\r\n \"__type\": \"AddedToken\",\r\n \"content\": \"<s>\",\r\n \"lstrip\": false,\r\n \"normalized\": true,\r\n \"rstrip\": false,\r\n \"single_word\": false\r\n },\r\n \"clean_up_tokenization_spaces\": false,\r\n \"eos_token\": {\r\n \"__type\": \"AddedToken\",\r\n \"content\": \"</s>\",\r\n \"lstrip\": false,\r\n \"normalized\": true,\r\n \"rstrip\": false,\r\n \"single_word\": false\r\n },\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nPrepare sft dataset for fine-tuning\n\"\"\"\n\nimport argparse\nimport json\nimport math\nimport os\nfrom multiprocessing import cpu_count\n\nfrom colossal_llama.dataset.conversation import default_conversation\nfrom colossal_llama.dataset.spliced_and_tokenized_dataset import supervised_tokenize_sft\nfrom datasets import dataset_dict, load_dataset\nfrom transformers import AddedToken, AutoTokenizer\n\nfrom colossalai.logging import get_dist_logger\n\nlogger = get_dist_logger()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--data_input_dirs\",\n type=str,\n required=True,\n default=None,\n help=\"Comma(i.e., ',') separated list of all data directories containing `.jsonl` data files.\",\n )\n parser.add_argument(\n \"--tokenizer_dir\", type=str, required=True, default=None, help=\"A directory containing the tokenizer\"\n )\n parser.add_argument(\"--data_output_dirs\", type=str, default=\"data_output_dirs\", help=\"Data output directory\")\n parser.add_argument(\"--max_length\", type=int, default=8192, help=\"Max length of each spliced tokenized sequence\")\n parser.add_argument(\"--num_spliced_dataset_bins\", type=int, default=10, help=\"Number of spliced dataset bins\")\n parser.add_argument(\"--llama_version\", type=int, default=3, help=\"LLaMA version\")\n args = parser.parse_args()\n\n if args.num_spliced_dataset_bins >= 100000:\n raise ValueError(\"Too many spliced divisions, must be smaller than 100000\")\n\n args.data_cache_dir = os.path.join(args.data_output_dirs, \"cache\")\n args.data_jsonl_output_dir = os.path.join(args.data_output_dirs, \"jsonl\")\n args.data_arrow_output_dir = os.path.join(args.data_output_dirs, \"arrow\")\n\n if not os.path.exists(args.data_cache_dir):\n os.makedirs(args.data_cache_dir)\n if not os.path.exists(args.data_jsonl_output_dir):\n os.makedirs(args.data_jsonl_output_dir)\n if not os.path.exists(args.data_arrow_output_dir):\n os.makedirs(args.data_arrow_output_dir)\n\n # Prepare to all input datasets\n input_data_paths = []\n input_data_dirs = args.data_input_dirs.split(\",\")\n for ds_dir in input_data_dirs:\n ds_dir = os.path.abspath(ds_dir)\n assert os.path.exists(ds_dir), f\"Not find data dir {ds_dir}\"\n ds_files = [name for name in os.listdir(ds_dir) if name.endswith(\".jsonl\")]\n ds_paths = [os.path.join(ds_dir, name) for name in ds_files]\n input_data_paths.extend(ds_paths)\n\n # Prepare to data splitting.\n train_splits = []\n split_interval = math.ceil(100 / args.num_spliced_dataset_bins)\n for i in range(0, 100, split_interval):\n start = i\n end = i + split_interval\n if end > 100:\n end = 100\n train_splits.append(f\"train[{start}%:{end}%]\")\n\n # Prepare to the tokenizer.\n tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_dir)\n\n # Fix </s> split issue: https://github.com/huggingface/transformers/issues/23833\n if args.llama_version == 2:\n tokenizer.add_tokens(AddedToken(\"</s>\", normalized=False, special=True), special_tokens=True)\n\n tokenizer.add_bos_token = False\n tokenizer.add_eos_token = False\n if tokenizer.pad_token is None:\n if tokenizer.unk_token is not None:\n tokenizer.pad_token = tokenizer.unk_token\n else:\n tokenizer.pad_token = tokenizer.eos_token\n tokenizer.unk_token = tokenizer.eos_token\n\n list_dataset = load_dataset(\n path=\"json\",\n data_files=input_data_paths,\n cache_dir=os.path.join(args.data_cache_dir, \"raw\"),\n keep_in_memory=False,\n split=train_splits,\n num_proc=cpu_count(),\n )\n for index, dataset in enumerate(list_dataset):\n assert isinstance(dataset, dataset_dict.Dataset)\n logger.info(f\"Start to process part-{index}/{len(list_dataset)} of all original datasets.\")\n dataset = dataset.map(\n function=supervised_tokenize_sft,\n fn_kwargs={\n \"tokenizer\": tokenizer,\n \"conversation_template\": default_conversation,\n \"max_length\": args.max_length,\n },\n keep_in_memory=False,\n num_proc=min(len(dataset), cpu_count()),\n )\n\n dataset = dataset.filter(lambda data: data[\"labels\"] is not None)\n dataset = dataset.sort(column_names=(\"seq_category\", \"seq_length\"), reverse=False, keep_in_memory=False)\n\n # We don't concatenate data samples here.\n spliced_dataset = dataset\n # Save each jsonl spliced dataset.\n output_index = \"0\" * (5 - len(str(index))) + str(index)\n output_name = f\"part-{output_index}\"\n output_jsonl_path = os.path.join(args.data_jsonl_output_dir, output_name + \".jsonl\")\n # st = time.time()\n with open(file=output_jsonl_path, mode=\"w\", encoding=\"utf-8\") as fp_writer:\n spliced_count = 0\n for spliced_data_point in spliced_dataset:\n if spliced_count % 500 == 0:\n logger.info(f\"processing {spliced_count} spliced data points for {fp_writer.name}\")\n spliced_count += 1\n fp_writer.write(json.dumps(spliced_data_point, ensure_ascii=False) + \"\\n\")\n\n # Save each arrow spliced dataset\n output_arrow_path = os.path.join(args.data_arrow_output_dir, output_name)\n logger.info(f\"Start to save {output_arrow_path}\")\n spliced_dataset = load_dataset(\n path=\"json\",\n data_files=[output_jsonl_path],\n cache_dir=os.path.join(args.data_cache_dir, \"spliced_and_tokenized\"),\n keep_in_memory=False,\n num_proc=cpu_count(),\n split=\"train\",\n )\n spliced_dataset.save_to_disk(dataset_path=output_arrow_path, num_proc=min(len(spliced_dataset), cpu_count()))\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "applications/Colossal-LLaMA/prepare_sft_dataset.py"}]}
| 3,048 | 256 |
gh_patches_debug_38347
|
rasdani/github-patches
|
git_diff
|
NVIDIA-Merlin__NVTabular-568
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fill Missing Op option to create a new binary column indicating the value was replaced.
Fill Missing Op should have the option to create a new binary column indicating whether the column was filled or not for continuous variables.
This is a common feature used when dealing with missing values of categoricals.
</issue>
<code>
[start of nvtabular/ops/fill.py]
1 #
2 # Copyright (c) 2020, NVIDIA CORPORATION.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16 import cudf
17 import dask_cudf
18 from nvtx import annotate
19
20 from .operator import ColumnNames, Operator
21 from .stat_operator import StatOperator
22
23
24 class FillMissing(Operator):
25 """
26 This operation replaces missing values with a constant pre-defined value
27
28 Example usage::
29
30 # Use FillMissing to define a workflow for continuous columns and specify the fill value
31 # Default is 0
32 cont_features = ['cont1', 'cont2', 'cont3'] >> ops.FillMissing() >> ...
33 processor = nvtabular.Workflow(cont_features)
34
35 Parameters
36 -----------
37 fill_val : float, default 0
38 The constant value to replace missing values with.
39 """
40
41 def __init__(self, fill_val=0):
42 super().__init__()
43 self.fill_val = fill_val
44
45 @annotate("FillMissing_op", color="darkgreen", domain="nvt_python")
46 def transform(self, columns, gdf: cudf.DataFrame) -> cudf.DataFrame:
47 return gdf[columns].fillna(self.fill_val)
48
49 transform.__doc__ = Operator.transform.__doc__
50
51
52 class FillMedian(StatOperator):
53 """
54 This operation replaces missing values with the median value for the column.
55
56 Example usage::
57
58 # Initialize the workflow
59 proc = nvt.Workflow(
60 cat_names=CATEGORICAL_COLUMNS,
61 cont_names=CONTINUOUS_COLUMNS,
62 label_name=LABEL_COLUMNS
63 )
64
65 # Add FillMedian to the workflow for continuous columns
66 proc.add_cont_feature(nvt.ops.FillMedian())
67 """
68
69 def __init__(self):
70 super().__init__()
71 self.medians = {}
72
73 @annotate("FillMedian_transform", color="darkgreen", domain="nvt_python")
74 def transform(self, columns: ColumnNames, gdf: cudf.DataFrame) -> cudf.DataFrame:
75 if not self.medians:
76 raise RuntimeError("need to call 'fit' before running transform")
77
78 for col in columns:
79 gdf[col] = gdf[col].fillna(self.medians[col])
80 return gdf
81
82 @annotate("FillMedian_fit", color="green", domain="nvt_python")
83 def fit(self, columns: ColumnNames, ddf: dask_cudf.DataFrame):
84 # TODO: Use `method="tidigest"` when crick supports device
85 dask_stats = ddf[columns].quantile(q=0.5, method="dask")
86 return dask_stats
87
88 @annotate("FillMedian_finalize", color="green", domain="nvt_python")
89 def fit_finalize(self, dask_stats):
90 for col in dask_stats.index.values_host:
91 self.medians[col] = float(dask_stats[col])
92
93 transform.__doc__ = Operator.transform.__doc__
94 fit.__doc__ = StatOperator.fit.__doc__
95 fit_finalize.__doc__ = StatOperator.fit_finalize.__doc__
96
97 def clear(self):
98 self.medians = {}
99
[end of nvtabular/ops/fill.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nvtabular/ops/fill.py b/nvtabular/ops/fill.py
--- a/nvtabular/ops/fill.py
+++ b/nvtabular/ops/fill.py
@@ -36,18 +36,34 @@
-----------
fill_val : float, default 0
The constant value to replace missing values with.
+ add_binary_cols : boolean, default False
+ When True, adds binary columns that indicate whether cells in each column were filled
"""
- def __init__(self, fill_val=0):
+ def __init__(self, fill_val=0, add_binary_cols=False):
super().__init__()
self.fill_val = fill_val
+ self.add_binary_cols = add_binary_cols
@annotate("FillMissing_op", color="darkgreen", domain="nvt_python")
def transform(self, columns, gdf: cudf.DataFrame) -> cudf.DataFrame:
- return gdf[columns].fillna(self.fill_val)
+ if self.add_binary_cols:
+ for col in columns:
+ gdf[f"{col}_filled"] = gdf[col].isna()
+ gdf[col] = gdf[col].fillna(self.fill_val)
+ else:
+ gdf[columns] = gdf[columns].fillna(self.fill_val)
+
+ return gdf
transform.__doc__ = Operator.transform.__doc__
+ def output_column_names(self, columns: ColumnNames) -> ColumnNames:
+ output_cols = columns[:]
+ if self.add_binary_cols:
+ output_cols.extend([f"{col}_filled" for col in columns])
+ return output_cols
+
class FillMedian(StatOperator):
"""
@@ -64,10 +80,16 @@
# Add FillMedian to the workflow for continuous columns
proc.add_cont_feature(nvt.ops.FillMedian())
+
+ Parameters
+ -----------
+ add_binary_cols : boolean, default False
+ When True, adds binary columns that indicate whether cells in each column were filled
"""
- def __init__(self):
+ def __init__(self, add_binary_cols=False):
super().__init__()
+ self.add_binary_cols = add_binary_cols
self.medians = {}
@annotate("FillMedian_transform", color="darkgreen", domain="nvt_python")
@@ -76,6 +98,8 @@
raise RuntimeError("need to call 'fit' before running transform")
for col in columns:
+ if self.add_binary_cols:
+ gdf[f"{col}_filled"] = gdf[col].isna()
gdf[col] = gdf[col].fillna(self.medians[col])
return gdf
@@ -96,3 +120,9 @@
def clear(self):
self.medians = {}
+
+ def output_column_names(self, columns: ColumnNames) -> ColumnNames:
+ output_cols = columns[:]
+ if self.add_binary_cols:
+ output_cols.extend([f"{col}_filled" for col in columns])
+ return output_cols
|
{"golden_diff": "diff --git a/nvtabular/ops/fill.py b/nvtabular/ops/fill.py\n--- a/nvtabular/ops/fill.py\n+++ b/nvtabular/ops/fill.py\n@@ -36,18 +36,34 @@\n -----------\n fill_val : float, default 0\n The constant value to replace missing values with.\n+ add_binary_cols : boolean, default False\n+ When True, adds binary columns that indicate whether cells in each column were filled\n \"\"\"\n \n- def __init__(self, fill_val=0):\n+ def __init__(self, fill_val=0, add_binary_cols=False):\n super().__init__()\n self.fill_val = fill_val\n+ self.add_binary_cols = add_binary_cols\n \n @annotate(\"FillMissing_op\", color=\"darkgreen\", domain=\"nvt_python\")\n def transform(self, columns, gdf: cudf.DataFrame) -> cudf.DataFrame:\n- return gdf[columns].fillna(self.fill_val)\n+ if self.add_binary_cols:\n+ for col in columns:\n+ gdf[f\"{col}_filled\"] = gdf[col].isna()\n+ gdf[col] = gdf[col].fillna(self.fill_val)\n+ else:\n+ gdf[columns] = gdf[columns].fillna(self.fill_val)\n+\n+ return gdf\n \n transform.__doc__ = Operator.transform.__doc__\n \n+ def output_column_names(self, columns: ColumnNames) -> ColumnNames:\n+ output_cols = columns[:]\n+ if self.add_binary_cols:\n+ output_cols.extend([f\"{col}_filled\" for col in columns])\n+ return output_cols\n+\n \n class FillMedian(StatOperator):\n \"\"\"\n@@ -64,10 +80,16 @@\n \n # Add FillMedian to the workflow for continuous columns\n proc.add_cont_feature(nvt.ops.FillMedian())\n+\n+ Parameters\n+ -----------\n+ add_binary_cols : boolean, default False\n+ When True, adds binary columns that indicate whether cells in each column were filled\n \"\"\"\n \n- def __init__(self):\n+ def __init__(self, add_binary_cols=False):\n super().__init__()\n+ self.add_binary_cols = add_binary_cols\n self.medians = {}\n \n @annotate(\"FillMedian_transform\", color=\"darkgreen\", domain=\"nvt_python\")\n@@ -76,6 +98,8 @@\n raise RuntimeError(\"need to call 'fit' before running transform\")\n \n for col in columns:\n+ if self.add_binary_cols:\n+ gdf[f\"{col}_filled\"] = gdf[col].isna()\n gdf[col] = gdf[col].fillna(self.medians[col])\n return gdf\n \n@@ -96,3 +120,9 @@\n \n def clear(self):\n self.medians = {}\n+\n+ def output_column_names(self, columns: ColumnNames) -> ColumnNames:\n+ output_cols = columns[:]\n+ if self.add_binary_cols:\n+ output_cols.extend([f\"{col}_filled\" for col in columns])\n+ return output_cols\n", "issue": "Fill Missing Op option to create a new binary column indicating the value was replaced.\nFill Missing Op should have the option to create a new binary column indicating whether the column was filled or not for continuous variables.\r\n\r\nThis is a common feature used when dealing with missing values of categoricals.\n", "before_files": [{"content": "#\n# Copyright (c) 2020, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport cudf\nimport dask_cudf\nfrom nvtx import annotate\n\nfrom .operator import ColumnNames, Operator\nfrom .stat_operator import StatOperator\n\n\nclass FillMissing(Operator):\n \"\"\"\n This operation replaces missing values with a constant pre-defined value\n\n Example usage::\n\n # Use FillMissing to define a workflow for continuous columns and specify the fill value\n # Default is 0\n cont_features = ['cont1', 'cont2', 'cont3'] >> ops.FillMissing() >> ...\n processor = nvtabular.Workflow(cont_features)\n\n Parameters\n -----------\n fill_val : float, default 0\n The constant value to replace missing values with.\n \"\"\"\n\n def __init__(self, fill_val=0):\n super().__init__()\n self.fill_val = fill_val\n\n @annotate(\"FillMissing_op\", color=\"darkgreen\", domain=\"nvt_python\")\n def transform(self, columns, gdf: cudf.DataFrame) -> cudf.DataFrame:\n return gdf[columns].fillna(self.fill_val)\n\n transform.__doc__ = Operator.transform.__doc__\n\n\nclass FillMedian(StatOperator):\n \"\"\"\n This operation replaces missing values with the median value for the column.\n\n Example usage::\n\n # Initialize the workflow\n proc = nvt.Workflow(\n cat_names=CATEGORICAL_COLUMNS,\n cont_names=CONTINUOUS_COLUMNS,\n label_name=LABEL_COLUMNS\n )\n\n # Add FillMedian to the workflow for continuous columns\n proc.add_cont_feature(nvt.ops.FillMedian())\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.medians = {}\n\n @annotate(\"FillMedian_transform\", color=\"darkgreen\", domain=\"nvt_python\")\n def transform(self, columns: ColumnNames, gdf: cudf.DataFrame) -> cudf.DataFrame:\n if not self.medians:\n raise RuntimeError(\"need to call 'fit' before running transform\")\n\n for col in columns:\n gdf[col] = gdf[col].fillna(self.medians[col])\n return gdf\n\n @annotate(\"FillMedian_fit\", color=\"green\", domain=\"nvt_python\")\n def fit(self, columns: ColumnNames, ddf: dask_cudf.DataFrame):\n # TODO: Use `method=\"tidigest\"` when crick supports device\n dask_stats = ddf[columns].quantile(q=0.5, method=\"dask\")\n return dask_stats\n\n @annotate(\"FillMedian_finalize\", color=\"green\", domain=\"nvt_python\")\n def fit_finalize(self, dask_stats):\n for col in dask_stats.index.values_host:\n self.medians[col] = float(dask_stats[col])\n\n transform.__doc__ = Operator.transform.__doc__\n fit.__doc__ = StatOperator.fit.__doc__\n fit_finalize.__doc__ = StatOperator.fit_finalize.__doc__\n\n def clear(self):\n self.medians = {}\n", "path": "nvtabular/ops/fill.py"}]}
| 1,559 | 682 |
gh_patches_debug_22678
|
rasdani/github-patches
|
git_diff
|
vispy__vispy-1153
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Low fps with high-frequency mouse
If mouse frequency is set to 1000Hz (for a specialized gaming mouses), the rendering speed falls dramatically for vispy-widgets that use scene graph.
Moving a 125Hz mouse around: 80-120 fps
Moving a 1000Hz mouse around: 5-20 fps
Code:
``` python
import numpy as np
from vispy import app, scene
from vispy.visuals import MarkersVisual
app.use_app("PyQt4")
canvas = scene.SceneCanvas(show=True)
canvas.measure_fps()
view = canvas.central_widget.add_view()
view.camera = 'panzoom'
view.camera.rect = (0, 0, 800, 800)
Markers = scene.visuals.create_visual_node(MarkersVisual)
vis = Markers()
vis.set_data(np.array([[100, 100, 0], [400, 100, 0], [400, 400, 0], [100, 400, 0]], dtype=np.float32))
view.add(vis)
app.run()
```
</issue>
<code>
[start of vispy/app/base.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2015, Vispy Development Team.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 from ..util import SimpleBunch
6 import time
7
8
9 class BaseApplicationBackend(object):
10 """BaseApplicationBackend()
11
12 Abstract class that provides an interface between backends and Application.
13 Each backend must implement a subclass of ApplicationBackend, and
14 implement all its _vispy_xxx methods.
15 """
16
17 def _vispy_get_backend_name(self):
18 raise NotImplementedError()
19
20 def _vispy_process_events(self):
21 raise NotImplementedError()
22
23 def _vispy_run(self):
24 raise NotImplementedError()
25
26 def _vispy_reuse(self):
27 # Does nothing by default.
28 pass
29
30 def _vispy_quit(self):
31 raise NotImplementedError()
32
33 def _vispy_get_native_app(self):
34 # Should return the native application object
35 return self
36
37 # is called by inputhook.py for pauses
38 # to remove CPU stress
39 # this is virtual so that some backends which have specialize
40 # functionality to deal with user input / latency can use those methods
41 def _vispy_sleep(self, duration_sec):
42 time.sleep(duration_sec)
43
44
45 class BaseCanvasBackend(object):
46 """BaseCanvasBackend(vispy_canvas, capability, context_type)
47
48 Abstract class that provides an interface between backends and Canvas.
49 Each backend must implement a subclass of CanvasBackend, and
50 implement all its _vispy_xxx methods. Also, also a backend must
51 make sure to generate the following events: 'initialize', 'resize',
52 'draw', 'mouse_press', 'mouse_release', 'mouse_move',
53 'mouse_wheel', 'key_press', 'key_release'. When a backend detects
54 that the canvas should be closed, the backend should call
55 'self._vispy_canvas.close', because the close event is handled within
56 the canvas itself.
57 """
58
59 def __init__(self, vispy_canvas):
60 from .canvas import Canvas # Avoid circular import
61 assert isinstance(vispy_canvas, Canvas)
62 self._vispy_canvas = vispy_canvas
63
64 # We set the _backend attribute of the vispy_canvas to self,
65 # because at the end of the __init__ of the CanvasBackend
66 # implementation there might be a call to show or draw. By
67 # setting it here, we ensure that the Canvas is "ready to go".
68 vispy_canvas._backend = self
69
70 # Data used in the construction of new mouse events
71 self._vispy_mouse_data = {
72 'buttons': [],
73 'press_event': None,
74 'last_event': None,
75 'last_mouse_press': None,
76 }
77
78 def _process_backend_kwargs(self, kwargs):
79 """ Simple utility to retrieve kwargs in predetermined order.
80 Also checks whether the values of the backend arguments do not
81 violate the backend capabilities.
82 """
83 # Verify given argument with capability of the backend
84 app = self._vispy_canvas.app
85 capability = app.backend_module.capability
86 if kwargs['context'].shared.name: # name already assigned: shared
87 if not capability['context']:
88 raise RuntimeError('Cannot share context with this backend')
89 for key in [key for (key, val) in capability.items() if not val]:
90 if key in ['context', 'multi_window', 'scroll']:
91 continue
92 invert = key in ['resizable', 'decorate']
93 if bool(kwargs[key]) - invert:
94 raise RuntimeError('Config %s is not supported by backend %s'
95 % (key, app.backend_name))
96
97 # Return items in sequence
98 out = SimpleBunch()
99 keys = ['title', 'size', 'position', 'show', 'vsync', 'resizable',
100 'decorate', 'fullscreen', 'parent', 'context', 'always_on_top',
101 ]
102 for key in keys:
103 out[key] = kwargs[key]
104 return out
105
106 def _vispy_set_current(self):
107 # Make this the current context
108 raise NotImplementedError()
109
110 def _vispy_swap_buffers(self):
111 # Swap front and back buffer
112 raise NotImplementedError()
113
114 def _vispy_set_title(self, title):
115 # Set the window title. Has no effect for widgets
116 raise NotImplementedError()
117
118 def _vispy_set_size(self, w, h):
119 # Set size of the widget or window
120 raise NotImplementedError()
121
122 def _vispy_set_position(self, x, y):
123 # Set location of the widget or window. May have no effect for widgets
124 raise NotImplementedError()
125
126 def _vispy_set_visible(self, visible):
127 # Show or hide the window or widget
128 raise NotImplementedError()
129
130 def _vispy_set_fullscreen(self, fullscreen):
131 # Set fullscreen mode
132 raise NotImplementedError()
133
134 def _vispy_update(self):
135 # Invoke a redraw
136 raise NotImplementedError()
137
138 def _vispy_close(self):
139 # Force the window or widget to shut down
140 raise NotImplementedError()
141
142 def _vispy_get_size(self):
143 # Should return widget size
144 raise NotImplementedError()
145
146 def _vispy_get_physical_size(self):
147 # Should return physical widget size (actual number of screen pixels).
148 # This may differ from _vispy_get_size on backends that expose HiDPI
149 # screens. If not overriden, return the logical sizeself.
150 return self._vispy_get_size()
151
152 def _vispy_get_position(self):
153 # Should return widget position
154 raise NotImplementedError()
155
156 def _vispy_get_fullscreen(self):
157 # Should return bool for fullscreen status
158 raise NotImplementedError()
159
160 def _vispy_get_geometry(self):
161 # Should return widget (x, y, w, h)
162 x, y = self._vispy_get_position()
163 w, h = self._vispy_get_size()
164 return x, y, w, h
165
166 def _vispy_get_native_canvas(self):
167 # Should return the native widget object
168 # Most backends would not need to implement this
169 return self
170
171 def _vispy_mouse_press(self, **kwargs):
172 # default method for delivering mouse press events to the canvas
173 kwargs.update(self._vispy_mouse_data)
174 ev = self._vispy_canvas.events.mouse_press(**kwargs)
175 if self._vispy_mouse_data['press_event'] is None:
176 self._vispy_mouse_data['press_event'] = ev
177
178 self._vispy_mouse_data['buttons'].append(ev.button)
179 self._vispy_mouse_data['last_event'] = ev
180
181 if not getattr(self, '_double_click_supported', False):
182 # double-click events are not supported by this backend, so we
183 # detect them manually
184 self._vispy_detect_double_click(ev)
185
186 return ev
187
188 def _vispy_mouse_move(self, **kwargs):
189 # default method for delivering mouse move events to the canvas
190 kwargs.update(self._vispy_mouse_data)
191
192 # Break the chain of prior mouse events if no buttons are pressed
193 # (this means that during a mouse drag, we have full access to every
194 # move event generated since the drag started)
195 if self._vispy_mouse_data['press_event'] is None:
196 last_event = self._vispy_mouse_data['last_event']
197 if last_event is not None:
198 last_event._forget_last_event()
199 else:
200 kwargs['button'] = self._vispy_mouse_data['press_event'].button
201
202 ev = self._vispy_canvas.events.mouse_move(**kwargs)
203 self._vispy_mouse_data['last_event'] = ev
204 return ev
205
206 def _vispy_mouse_release(self, **kwargs):
207 # default method for delivering mouse release events to the canvas
208 kwargs.update(self._vispy_mouse_data)
209
210 ev = self._vispy_canvas.events.mouse_release(**kwargs)
211 if (self._vispy_mouse_data['press_event']
212 and self._vispy_mouse_data['press_event'].button == ev.button):
213 self._vispy_mouse_data['press_event'] = None
214
215 if ev.button in self._vispy_mouse_data['buttons']:
216 self._vispy_mouse_data['buttons'].remove(ev.button)
217 self._vispy_mouse_data['last_event'] = ev
218
219 return ev
220
221 def _vispy_mouse_double_click(self, **kwargs):
222 # default method for delivering double-click events to the canvas
223 kwargs.update(self._vispy_mouse_data)
224
225 ev = self._vispy_canvas.events.mouse_double_click(**kwargs)
226 self._vispy_mouse_data['last_event'] = ev
227 return ev
228
229 def _vispy_detect_double_click(self, ev, **kwargs):
230 # Called on every mouse_press or mouse_release, and calls
231 # _vispy_mouse_double_click if a double-click is calculated.
232 # Should be overridden with an empty function on backends which
233 # natively support double-clicking.
234
235 dt_max = 0.3 # time in seconds for a double-click detection
236
237 lastev = self._vispy_mouse_data['last_mouse_press']
238
239 if lastev is None:
240 self._vispy_mouse_data['last_mouse_press'] = ev
241 return
242
243 assert lastev.type == 'mouse_press'
244 assert ev.type == 'mouse_press'
245
246 # For a double-click to be detected, the button should be the same,
247 # the position should be the same, and the two mouse-presses should
248 # be within dt_max.
249 if ((ev.time - lastev.time <= dt_max) &
250 (lastev.pos[0] - ev.pos[0] == 0) &
251 (lastev.pos[1] - ev.pos[1] == 0) &
252 (lastev.button == ev.button)):
253 self._vispy_mouse_double_click(**kwargs)
254
255 self._vispy_mouse_data['last_mouse_press'] = ev
256
257
258 class BaseTimerBackend(object):
259 """BaseTimerBackend(vispy_timer)
260
261 Abstract class that provides an interface between backends and Timer.
262 Each backend must implement a subclass of TimerBackend, and
263 implement all its _vispy_xxx methods.
264 """
265
266 def __init__(self, vispy_timer):
267 self._vispy_timer = vispy_timer
268
269 def _vispy_start(self, interval):
270 raise NotImplementedError
271
272 def _vispy_stop(self):
273 raise NotImplementedError
274
275 def _vispy_get_native_timer(self):
276 # Should return the native timer object
277 # Most backends would not need to implement this
278 return self
279
[end of vispy/app/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/vispy/app/base.py b/vispy/app/base.py
--- a/vispy/app/base.py
+++ b/vispy/app/base.py
@@ -4,6 +4,7 @@
from ..util import SimpleBunch
import time
+from timeit import default_timer
class BaseApplicationBackend(object):
@@ -60,6 +61,7 @@
from .canvas import Canvas # Avoid circular import
assert isinstance(vispy_canvas, Canvas)
self._vispy_canvas = vispy_canvas
+ self._last_time = 0
# We set the _backend attribute of the vispy_canvas to self,
# because at the end of the __init__ of the CanvasBackend
@@ -186,6 +188,10 @@
return ev
def _vispy_mouse_move(self, **kwargs):
+ if default_timer() - self._last_time < .01:
+ return
+ self._last_time = default_timer()
+
# default method for delivering mouse move events to the canvas
kwargs.update(self._vispy_mouse_data)
|
{"golden_diff": "diff --git a/vispy/app/base.py b/vispy/app/base.py\n--- a/vispy/app/base.py\n+++ b/vispy/app/base.py\n@@ -4,6 +4,7 @@\n \n from ..util import SimpleBunch\n import time\n+from timeit import default_timer\n \n \n class BaseApplicationBackend(object):\n@@ -60,6 +61,7 @@\n from .canvas import Canvas # Avoid circular import\n assert isinstance(vispy_canvas, Canvas)\n self._vispy_canvas = vispy_canvas\n+ self._last_time = 0\n \n # We set the _backend attribute of the vispy_canvas to self,\n # because at the end of the __init__ of the CanvasBackend\n@@ -186,6 +188,10 @@\n return ev\n \n def _vispy_mouse_move(self, **kwargs):\n+ if default_timer() - self._last_time < .01:\n+ return\n+ self._last_time = default_timer()\n+\n # default method for delivering mouse move events to the canvas\n kwargs.update(self._vispy_mouse_data)\n", "issue": "Low fps with high-frequency mouse\nIf mouse frequency is set to 1000Hz (for a specialized gaming mouses), the rendering speed falls dramatically for vispy-widgets that use scene graph.\n\nMoving a 125Hz mouse around: 80-120 fps\nMoving a 1000Hz mouse around: 5-20 fps\n\nCode:\n\n``` python\nimport numpy as np\nfrom vispy import app, scene\nfrom vispy.visuals import MarkersVisual\n\napp.use_app(\"PyQt4\")\ncanvas = scene.SceneCanvas(show=True)\ncanvas.measure_fps()\n\nview = canvas.central_widget.add_view()\nview.camera = 'panzoom'\nview.camera.rect = (0, 0, 800, 800)\n\nMarkers = scene.visuals.create_visual_node(MarkersVisual)\nvis = Markers()\nvis.set_data(np.array([[100, 100, 0], [400, 100, 0], [400, 400, 0], [100, 400, 0]], dtype=np.float32))\nview.add(vis)\napp.run()\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\nfrom ..util import SimpleBunch\nimport time\n\n\nclass BaseApplicationBackend(object):\n \"\"\"BaseApplicationBackend()\n\n Abstract class that provides an interface between backends and Application.\n Each backend must implement a subclass of ApplicationBackend, and\n implement all its _vispy_xxx methods.\n \"\"\"\n\n def _vispy_get_backend_name(self):\n raise NotImplementedError()\n\n def _vispy_process_events(self):\n raise NotImplementedError()\n\n def _vispy_run(self):\n raise NotImplementedError()\n\n def _vispy_reuse(self):\n # Does nothing by default.\n pass\n\n def _vispy_quit(self):\n raise NotImplementedError()\n\n def _vispy_get_native_app(self):\n # Should return the native application object\n return self\n\n # is called by inputhook.py for pauses\n # to remove CPU stress\n # this is virtual so that some backends which have specialize\n # functionality to deal with user input / latency can use those methods\n def _vispy_sleep(self, duration_sec):\n time.sleep(duration_sec)\n\n\nclass BaseCanvasBackend(object):\n \"\"\"BaseCanvasBackend(vispy_canvas, capability, context_type)\n\n Abstract class that provides an interface between backends and Canvas.\n Each backend must implement a subclass of CanvasBackend, and\n implement all its _vispy_xxx methods. Also, also a backend must\n make sure to generate the following events: 'initialize', 'resize',\n 'draw', 'mouse_press', 'mouse_release', 'mouse_move',\n 'mouse_wheel', 'key_press', 'key_release'. When a backend detects\n that the canvas should be closed, the backend should call\n 'self._vispy_canvas.close', because the close event is handled within\n the canvas itself.\n \"\"\"\n\n def __init__(self, vispy_canvas):\n from .canvas import Canvas # Avoid circular import\n assert isinstance(vispy_canvas, Canvas)\n self._vispy_canvas = vispy_canvas\n\n # We set the _backend attribute of the vispy_canvas to self,\n # because at the end of the __init__ of the CanvasBackend\n # implementation there might be a call to show or draw. By\n # setting it here, we ensure that the Canvas is \"ready to go\".\n vispy_canvas._backend = self\n\n # Data used in the construction of new mouse events\n self._vispy_mouse_data = {\n 'buttons': [],\n 'press_event': None,\n 'last_event': None,\n 'last_mouse_press': None,\n }\n\n def _process_backend_kwargs(self, kwargs):\n \"\"\" Simple utility to retrieve kwargs in predetermined order.\n Also checks whether the values of the backend arguments do not\n violate the backend capabilities.\n \"\"\"\n # Verify given argument with capability of the backend\n app = self._vispy_canvas.app\n capability = app.backend_module.capability\n if kwargs['context'].shared.name: # name already assigned: shared\n if not capability['context']:\n raise RuntimeError('Cannot share context with this backend')\n for key in [key for (key, val) in capability.items() if not val]:\n if key in ['context', 'multi_window', 'scroll']:\n continue\n invert = key in ['resizable', 'decorate']\n if bool(kwargs[key]) - invert:\n raise RuntimeError('Config %s is not supported by backend %s'\n % (key, app.backend_name))\n\n # Return items in sequence\n out = SimpleBunch()\n keys = ['title', 'size', 'position', 'show', 'vsync', 'resizable',\n 'decorate', 'fullscreen', 'parent', 'context', 'always_on_top',\n ]\n for key in keys:\n out[key] = kwargs[key]\n return out\n\n def _vispy_set_current(self):\n # Make this the current context\n raise NotImplementedError()\n\n def _vispy_swap_buffers(self):\n # Swap front and back buffer\n raise NotImplementedError()\n\n def _vispy_set_title(self, title):\n # Set the window title. Has no effect for widgets\n raise NotImplementedError()\n\n def _vispy_set_size(self, w, h):\n # Set size of the widget or window\n raise NotImplementedError()\n\n def _vispy_set_position(self, x, y):\n # Set location of the widget or window. May have no effect for widgets\n raise NotImplementedError()\n\n def _vispy_set_visible(self, visible):\n # Show or hide the window or widget\n raise NotImplementedError()\n\n def _vispy_set_fullscreen(self, fullscreen):\n # Set fullscreen mode\n raise NotImplementedError()\n\n def _vispy_update(self):\n # Invoke a redraw\n raise NotImplementedError()\n\n def _vispy_close(self):\n # Force the window or widget to shut down\n raise NotImplementedError()\n\n def _vispy_get_size(self):\n # Should return widget size\n raise NotImplementedError()\n\n def _vispy_get_physical_size(self):\n # Should return physical widget size (actual number of screen pixels).\n # This may differ from _vispy_get_size on backends that expose HiDPI\n # screens. If not overriden, return the logical sizeself.\n return self._vispy_get_size()\n\n def _vispy_get_position(self):\n # Should return widget position\n raise NotImplementedError()\n\n def _vispy_get_fullscreen(self):\n # Should return bool for fullscreen status\n raise NotImplementedError()\n\n def _vispy_get_geometry(self):\n # Should return widget (x, y, w, h)\n x, y = self._vispy_get_position()\n w, h = self._vispy_get_size()\n return x, y, w, h\n\n def _vispy_get_native_canvas(self):\n # Should return the native widget object\n # Most backends would not need to implement this\n return self\n\n def _vispy_mouse_press(self, **kwargs):\n # default method for delivering mouse press events to the canvas\n kwargs.update(self._vispy_mouse_data)\n ev = self._vispy_canvas.events.mouse_press(**kwargs)\n if self._vispy_mouse_data['press_event'] is None:\n self._vispy_mouse_data['press_event'] = ev\n\n self._vispy_mouse_data['buttons'].append(ev.button)\n self._vispy_mouse_data['last_event'] = ev\n\n if not getattr(self, '_double_click_supported', False):\n # double-click events are not supported by this backend, so we\n # detect them manually\n self._vispy_detect_double_click(ev)\n\n return ev\n\n def _vispy_mouse_move(self, **kwargs):\n # default method for delivering mouse move events to the canvas\n kwargs.update(self._vispy_mouse_data)\n\n # Break the chain of prior mouse events if no buttons are pressed\n # (this means that during a mouse drag, we have full access to every\n # move event generated since the drag started)\n if self._vispy_mouse_data['press_event'] is None:\n last_event = self._vispy_mouse_data['last_event']\n if last_event is not None:\n last_event._forget_last_event()\n else:\n kwargs['button'] = self._vispy_mouse_data['press_event'].button\n\n ev = self._vispy_canvas.events.mouse_move(**kwargs)\n self._vispy_mouse_data['last_event'] = ev\n return ev\n\n def _vispy_mouse_release(self, **kwargs):\n # default method for delivering mouse release events to the canvas\n kwargs.update(self._vispy_mouse_data)\n\n ev = self._vispy_canvas.events.mouse_release(**kwargs)\n if (self._vispy_mouse_data['press_event']\n and self._vispy_mouse_data['press_event'].button == ev.button):\n self._vispy_mouse_data['press_event'] = None\n\n if ev.button in self._vispy_mouse_data['buttons']:\n self._vispy_mouse_data['buttons'].remove(ev.button)\n self._vispy_mouse_data['last_event'] = ev\n\n return ev\n\n def _vispy_mouse_double_click(self, **kwargs):\n # default method for delivering double-click events to the canvas\n kwargs.update(self._vispy_mouse_data)\n\n ev = self._vispy_canvas.events.mouse_double_click(**kwargs)\n self._vispy_mouse_data['last_event'] = ev\n return ev\n\n def _vispy_detect_double_click(self, ev, **kwargs):\n # Called on every mouse_press or mouse_release, and calls\n # _vispy_mouse_double_click if a double-click is calculated.\n # Should be overridden with an empty function on backends which\n # natively support double-clicking.\n\n dt_max = 0.3 # time in seconds for a double-click detection\n\n lastev = self._vispy_mouse_data['last_mouse_press']\n\n if lastev is None:\n self._vispy_mouse_data['last_mouse_press'] = ev\n return\n\n assert lastev.type == 'mouse_press'\n assert ev.type == 'mouse_press'\n\n # For a double-click to be detected, the button should be the same,\n # the position should be the same, and the two mouse-presses should\n # be within dt_max.\n if ((ev.time - lastev.time <= dt_max) &\n (lastev.pos[0] - ev.pos[0] == 0) &\n (lastev.pos[1] - ev.pos[1] == 0) &\n (lastev.button == ev.button)):\n self._vispy_mouse_double_click(**kwargs)\n\n self._vispy_mouse_data['last_mouse_press'] = ev\n\n\nclass BaseTimerBackend(object):\n \"\"\"BaseTimerBackend(vispy_timer)\n\n Abstract class that provides an interface between backends and Timer.\n Each backend must implement a subclass of TimerBackend, and\n implement all its _vispy_xxx methods.\n \"\"\"\n\n def __init__(self, vispy_timer):\n self._vispy_timer = vispy_timer\n\n def _vispy_start(self, interval):\n raise NotImplementedError\n\n def _vispy_stop(self):\n raise NotImplementedError\n\n def _vispy_get_native_timer(self):\n # Should return the native timer object\n # Most backends would not need to implement this\n return self\n", "path": "vispy/app/base.py"}]}
| 3,807 | 244 |
gh_patches_debug_28183
|
rasdani/github-patches
|
git_diff
|
OCHA-DAP__hdx-ckan-1839
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Datastore plugin fails to startup sometimes
We are running ckan using gunicorn.
Gunicorn run multiple threads.
At start time, datastore plugin checks the existence of db write permission by creating a table named _foo and removes it immediately afterwards.
However, since we are starting multiple threads, it just happens that a second thread will try to create the table while it has not been removed yet (race condition? :) ).
A solution would be to change the table name with something random / depending on the time.
Please find solutions, as this is another "untouchable" piece from ckan.
</issue>
<code>
[start of ckanext/datastore/plugin.py]
1 import sys
2 import logging
3
4 import ckan.plugins as p
5 import ckanext.datastore.logic.action as action
6 import ckanext.datastore.logic.auth as auth
7 import ckanext.datastore.db as db
8 import ckan.logic as logic
9 import ckan.model as model
10
11 log = logging.getLogger(__name__)
12 _get_or_bust = logic.get_or_bust
13
14 DEFAULT_FORMATS = []
15
16
17 class DatastoreException(Exception):
18 pass
19
20
21 class DatastorePlugin(p.SingletonPlugin):
22 p.implements(p.IConfigurable, inherit=True)
23 p.implements(p.IActions)
24 p.implements(p.IAuthFunctions)
25 p.implements(p.IResourceUrlChange)
26 p.implements(p.IDomainObjectModification, inherit=True)
27 p.implements(p.IRoutes, inherit=True)
28 p.implements(p.IResourceController, inherit=True)
29
30 legacy_mode = False
31 resource_show_action = None
32
33 def configure(self, config):
34 self.config = config
35 # check for ckan.datastore.write_url and ckan.datastore.read_url
36 if (not 'ckan.datastore.write_url' in config):
37 error_msg = 'ckan.datastore.write_url not found in config'
38 raise DatastoreException(error_msg)
39
40 # Legacy mode means that we have no read url. Consequently sql search is not
41 # available and permissions do not have to be changed. In legacy mode, the
42 # datastore runs on PG prior to 9.0 (for example 8.4).
43 self.legacy_mode = 'ckan.datastore.read_url' not in self.config
44
45 datapusher_formats = config.get('datapusher.formats', '').split()
46 self.datapusher_formats = datapusher_formats or DEFAULT_FORMATS
47
48 # Check whether we are running one of the paster commands which means
49 # that we should ignore the following tests.
50 if sys.argv[0].split('/')[-1] == 'paster' and 'datastore' in sys.argv[1:]:
51 log.warn('Omitting permission checks because you are '
52 'running paster commands.')
53 return
54
55 self.ckan_url = self.config['sqlalchemy.url']
56 self.write_url = self.config['ckan.datastore.write_url']
57 if self.legacy_mode:
58 self.read_url = self.write_url
59 log.warn('Legacy mode active. '
60 'The sql search will not be available.')
61 else:
62 self.read_url = self.config['ckan.datastore.read_url']
63
64 self.read_engine = db._get_engine(
65 {'connection_url': self.read_url})
66 if not model.engine_is_pg(self.read_engine):
67 log.warn('We detected that you do not use a PostgreSQL '
68 'database. The DataStore will NOT work and DataStore '
69 'tests will be skipped.')
70 return
71
72 if self._is_read_only_database():
73 log.warn('We detected that CKAN is running on a read '
74 'only database. Permission checks and the creation '
75 'of _table_metadata are skipped.')
76 else:
77 self._check_urls_and_permissions()
78 self._create_alias_table()
79
80
81 def notify(self, entity, operation=None):
82 if not isinstance(entity, model.Package) or self.legacy_mode:
83 return
84 # if a resource is new, it cannot have a datastore resource, yet
85 if operation == model.domain_object.DomainObjectOperation.changed:
86 context = {'model': model, 'ignore_auth': True}
87 if entity.private:
88 func = p.toolkit.get_action('datastore_make_private')
89 else:
90 func = p.toolkit.get_action('datastore_make_public')
91 for resource in entity.resources:
92 try:
93 func(context, {
94 'connection_url': self.write_url,
95 'resource_id': resource.id})
96 except p.toolkit.ObjectNotFound:
97 pass
98
99 def _log_or_raise(self, message):
100 if self.config.get('debug'):
101 log.critical(message)
102 else:
103 raise DatastoreException(message)
104
105 def _check_urls_and_permissions(self):
106 # Make sure that the right permissions are set
107 # so that no harmful queries can be made
108
109 if self._same_ckan_and_datastore_db():
110 self._log_or_raise('CKAN and DataStore database '
111 'cannot be the same.')
112
113 # in legacy mode, the read and write url are the same (both write url)
114 # consequently the same url check and and write privilege check
115 # don't make sense
116 if not self.legacy_mode:
117 if self._same_read_and_write_url():
118 self._log_or_raise('The write and read-only database '
119 'connection urls are the same.')
120
121 if not self._read_connection_has_correct_privileges():
122 self._log_or_raise('The read-only user has write privileges.')
123
124 def _is_read_only_database(self):
125 ''' Returns True if no connection has CREATE privileges on the public
126 schema. This is the case if replication is enabled.'''
127 for url in [self.ckan_url, self.write_url, self.read_url]:
128 connection = db._get_engine({'connection_url': url}).connect()
129 try:
130 sql = u"SELECT has_schema_privilege('public', 'CREATE')"
131 is_writable = connection.execute(sql).first()[0]
132 finally:
133 connection.close()
134 if is_writable:
135 return False
136 return True
137
138 def _same_ckan_and_datastore_db(self):
139 '''Returns True if the CKAN and DataStore db are the same'''
140 return self._get_db_from_url(self.ckan_url) == self._get_db_from_url(self.read_url)
141
142 def _get_db_from_url(self, url):
143 return url[url.rindex("@"):]
144
145 def _same_read_and_write_url(self):
146 return self.write_url == self.read_url
147
148 def _read_connection_has_correct_privileges(self):
149 ''' Returns True if the right permissions are set for the read
150 only user. A table is created by the write user to test the
151 read only user.
152 '''
153 write_connection = db._get_engine(
154 {'connection_url': self.write_url}).connect()
155 read_connection = db._get_engine(
156 {'connection_url': self.read_url}).connect()
157
158 drop_foo_sql = u'DROP TABLE IF EXISTS _foo'
159
160 write_connection.execute(drop_foo_sql)
161
162 try:
163 try:
164 write_connection.execute(u'CREATE TABLE _foo ()')
165 for privilege in ['INSERT', 'UPDATE', 'DELETE']:
166 test_privilege_sql = u"SELECT has_table_privilege('_foo', '{privilege}')"
167 sql = test_privilege_sql.format(privilege=privilege)
168 have_privilege = read_connection.execute(sql).first()[0]
169 if have_privilege:
170 return False
171 finally:
172 write_connection.execute(drop_foo_sql)
173 finally:
174 write_connection.close()
175 read_connection.close()
176 return True
177
178 def _create_alias_table(self):
179 mapping_sql = '''
180 SELECT DISTINCT
181 substr(md5(dependee.relname || COALESCE(dependent.relname, '')), 0, 17) AS "_id",
182 dependee.relname AS name,
183 dependee.oid AS oid,
184 dependent.relname AS alias_of
185 -- dependent.oid AS oid
186 FROM
187 pg_class AS dependee
188 LEFT OUTER JOIN pg_rewrite AS r ON r.ev_class = dependee.oid
189 LEFT OUTER JOIN pg_depend AS d ON d.objid = r.oid
190 LEFT OUTER JOIN pg_class AS dependent ON d.refobjid = dependent.oid
191 WHERE
192 (dependee.oid != dependent.oid OR dependent.oid IS NULL) AND
193 (dependee.relname IN (SELECT tablename FROM pg_catalog.pg_tables)
194 OR dependee.relname IN (SELECT viewname FROM pg_catalog.pg_views)) AND
195 dependee.relnamespace = (SELECT oid FROM pg_namespace WHERE nspname='public')
196 ORDER BY dependee.oid DESC;
197 '''
198 create_alias_table_sql = u'CREATE OR REPLACE VIEW "_table_metadata" AS {0}'.format(mapping_sql)
199 try:
200 connection = db._get_engine(
201 {'connection_url': self.write_url}).connect()
202 connection.execute(create_alias_table_sql)
203 finally:
204 connection.close()
205
206 def get_actions(self):
207 actions = {'datastore_create': action.datastore_create,
208 'datastore_upsert': action.datastore_upsert,
209 'datastore_delete': action.datastore_delete,
210 'datastore_search': action.datastore_search,
211 }
212 if not self.legacy_mode:
213 actions.update({
214 'datastore_search_sql': action.datastore_search_sql,
215 'datastore_make_private': action.datastore_make_private,
216 'datastore_make_public': action.datastore_make_public})
217 return actions
218
219 def get_auth_functions(self):
220 return {'datastore_create': auth.datastore_create,
221 'datastore_upsert': auth.datastore_upsert,
222 'datastore_delete': auth.datastore_delete,
223 'datastore_search': auth.datastore_search,
224 'datastore_search_sql': auth.datastore_search_sql,
225 'datastore_change_permissions': auth.datastore_change_permissions}
226
227 def before_map(self, m):
228 m.connect('/datastore/dump/{resource_id}',
229 controller='ckanext.datastore.controller:DatastoreController',
230 action='dump')
231 return m
232
233 def before_show(self, resource_dict):
234 # Modify the resource url of datastore resources so that
235 # they link to the datastore dumps.
236 if resource_dict.get('url_type') == 'datastore':
237 resource_dict['url'] = p.toolkit.url_for(
238 controller='ckanext.datastore.controller:DatastoreController',
239 action='dump', resource_id=resource_dict['id'])
240
241 try:
242 connection = self.read_engine.connect()
243 result = connection.execute(
244 'SELECT 1 FROM "_table_metadata" WHERE name = %s AND alias_of IS NULL',
245 resource_dict['id']
246 ).fetchone()
247 if result:
248 resource_dict['datastore_active'] = True
249 else:
250 resource_dict['datastore_active'] = False
251 finally:
252 connection.close()
253 return resource_dict
254
[end of ckanext/datastore/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ckanext/datastore/plugin.py b/ckanext/datastore/plugin.py
--- a/ckanext/datastore/plugin.py
+++ b/ckanext/datastore/plugin.py
@@ -1,6 +1,9 @@
import sys
import logging
+import string
+import random
+
import ckan.plugins as p
import ckanext.datastore.logic.action as action
import ckanext.datastore.logic.auth as auth
@@ -155,15 +158,19 @@
read_connection = db._get_engine(
{'connection_url': self.read_url}).connect()
- drop_foo_sql = u'DROP TABLE IF EXISTS _foo'
+ # create a nice random table name
+ chars = string.uppercase + string.lowercase
+ table_name = '_'.join(random.choice(chars) for _ in range(9))
+
+ drop_foo_sql = u'DROP TABLE IF EXISTS ' + table_name
write_connection.execute(drop_foo_sql)
try:
try:
- write_connection.execute(u'CREATE TABLE _foo ()')
+ write_connection.execute(u'CREATE TEMP TABLE ' + table_name + ' ()')
for privilege in ['INSERT', 'UPDATE', 'DELETE']:
- test_privilege_sql = u"SELECT has_table_privilege('_foo', '{privilege}')"
+ test_privilege_sql = u"SELECT has_table_privilege('" + table_name + "', '{privilege}')"
sql = test_privilege_sql.format(privilege=privilege)
have_privilege = read_connection.execute(sql).first()[0]
if have_privilege:
|
{"golden_diff": "diff --git a/ckanext/datastore/plugin.py b/ckanext/datastore/plugin.py\n--- a/ckanext/datastore/plugin.py\n+++ b/ckanext/datastore/plugin.py\n@@ -1,6 +1,9 @@\n import sys\n import logging\n \n+import string\n+import random\n+\n import ckan.plugins as p\n import ckanext.datastore.logic.action as action\n import ckanext.datastore.logic.auth as auth\n@@ -155,15 +158,19 @@\n read_connection = db._get_engine(\n {'connection_url': self.read_url}).connect()\n \n- drop_foo_sql = u'DROP TABLE IF EXISTS _foo'\n+ # create a nice random table name\n+ chars = string.uppercase + string.lowercase\n+ table_name = '_'.join(random.choice(chars) for _ in range(9))\n+ \n+ drop_foo_sql = u'DROP TABLE IF EXISTS ' + table_name\n \n write_connection.execute(drop_foo_sql)\n \n try:\n try:\n- write_connection.execute(u'CREATE TABLE _foo ()')\n+ write_connection.execute(u'CREATE TEMP TABLE ' + table_name + ' ()')\n for privilege in ['INSERT', 'UPDATE', 'DELETE']:\n- test_privilege_sql = u\"SELECT has_table_privilege('_foo', '{privilege}')\"\n+ test_privilege_sql = u\"SELECT has_table_privilege('\" + table_name + \"', '{privilege}')\"\n sql = test_privilege_sql.format(privilege=privilege)\n have_privilege = read_connection.execute(sql).first()[0]\n if have_privilege:\n", "issue": "Datastore plugin fails to startup sometimes\nWe are running ckan using gunicorn.\nGunicorn run multiple threads.\n\nAt start time, datastore plugin checks the existence of db write permission by creating a table named _foo and removes it immediately afterwards. \nHowever, since we are starting multiple threads, it just happens that a second thread will try to create the table while it has not been removed yet (race condition? :) ).\nA solution would be to change the table name with something random / depending on the time.\n\nPlease find solutions, as this is another \"untouchable\" piece from ckan.\n\n", "before_files": [{"content": "import sys\nimport logging\n\nimport ckan.plugins as p\nimport ckanext.datastore.logic.action as action\nimport ckanext.datastore.logic.auth as auth\nimport ckanext.datastore.db as db\nimport ckan.logic as logic\nimport ckan.model as model\n\nlog = logging.getLogger(__name__)\n_get_or_bust = logic.get_or_bust\n\nDEFAULT_FORMATS = []\n\n\nclass DatastoreException(Exception):\n pass\n\n\nclass DatastorePlugin(p.SingletonPlugin):\n p.implements(p.IConfigurable, inherit=True)\n p.implements(p.IActions)\n p.implements(p.IAuthFunctions)\n p.implements(p.IResourceUrlChange)\n p.implements(p.IDomainObjectModification, inherit=True)\n p.implements(p.IRoutes, inherit=True)\n p.implements(p.IResourceController, inherit=True)\n\n legacy_mode = False\n resource_show_action = None\n\n def configure(self, config):\n self.config = config\n # check for ckan.datastore.write_url and ckan.datastore.read_url\n if (not 'ckan.datastore.write_url' in config):\n error_msg = 'ckan.datastore.write_url not found in config'\n raise DatastoreException(error_msg)\n\n # Legacy mode means that we have no read url. Consequently sql search is not\n # available and permissions do not have to be changed. In legacy mode, the\n # datastore runs on PG prior to 9.0 (for example 8.4).\n self.legacy_mode = 'ckan.datastore.read_url' not in self.config\n\n datapusher_formats = config.get('datapusher.formats', '').split()\n self.datapusher_formats = datapusher_formats or DEFAULT_FORMATS\n\n # Check whether we are running one of the paster commands which means\n # that we should ignore the following tests.\n if sys.argv[0].split('/')[-1] == 'paster' and 'datastore' in sys.argv[1:]:\n log.warn('Omitting permission checks because you are '\n 'running paster commands.')\n return\n\n self.ckan_url = self.config['sqlalchemy.url']\n self.write_url = self.config['ckan.datastore.write_url']\n if self.legacy_mode:\n self.read_url = self.write_url\n log.warn('Legacy mode active. '\n 'The sql search will not be available.')\n else:\n self.read_url = self.config['ckan.datastore.read_url']\n\n self.read_engine = db._get_engine(\n {'connection_url': self.read_url})\n if not model.engine_is_pg(self.read_engine):\n log.warn('We detected that you do not use a PostgreSQL '\n 'database. The DataStore will NOT work and DataStore '\n 'tests will be skipped.')\n return\n\n if self._is_read_only_database():\n log.warn('We detected that CKAN is running on a read '\n 'only database. Permission checks and the creation '\n 'of _table_metadata are skipped.')\n else:\n self._check_urls_and_permissions()\n self._create_alias_table()\n\n\n def notify(self, entity, operation=None):\n if not isinstance(entity, model.Package) or self.legacy_mode:\n return\n # if a resource is new, it cannot have a datastore resource, yet\n if operation == model.domain_object.DomainObjectOperation.changed:\n context = {'model': model, 'ignore_auth': True}\n if entity.private:\n func = p.toolkit.get_action('datastore_make_private')\n else:\n func = p.toolkit.get_action('datastore_make_public')\n for resource in entity.resources:\n try:\n func(context, {\n 'connection_url': self.write_url,\n 'resource_id': resource.id})\n except p.toolkit.ObjectNotFound:\n pass\n\n def _log_or_raise(self, message):\n if self.config.get('debug'):\n log.critical(message)\n else:\n raise DatastoreException(message)\n\n def _check_urls_and_permissions(self):\n # Make sure that the right permissions are set\n # so that no harmful queries can be made\n\n if self._same_ckan_and_datastore_db():\n self._log_or_raise('CKAN and DataStore database '\n 'cannot be the same.')\n\n # in legacy mode, the read and write url are the same (both write url)\n # consequently the same url check and and write privilege check\n # don't make sense\n if not self.legacy_mode:\n if self._same_read_and_write_url():\n self._log_or_raise('The write and read-only database '\n 'connection urls are the same.')\n\n if not self._read_connection_has_correct_privileges():\n self._log_or_raise('The read-only user has write privileges.')\n\n def _is_read_only_database(self):\n ''' Returns True if no connection has CREATE privileges on the public\n schema. This is the case if replication is enabled.'''\n for url in [self.ckan_url, self.write_url, self.read_url]:\n connection = db._get_engine({'connection_url': url}).connect()\n try:\n sql = u\"SELECT has_schema_privilege('public', 'CREATE')\"\n is_writable = connection.execute(sql).first()[0]\n finally:\n connection.close()\n if is_writable:\n return False\n return True\n\n def _same_ckan_and_datastore_db(self):\n '''Returns True if the CKAN and DataStore db are the same'''\n return self._get_db_from_url(self.ckan_url) == self._get_db_from_url(self.read_url)\n\n def _get_db_from_url(self, url):\n return url[url.rindex(\"@\"):]\n\n def _same_read_and_write_url(self):\n return self.write_url == self.read_url\n\n def _read_connection_has_correct_privileges(self):\n ''' Returns True if the right permissions are set for the read\n only user. A table is created by the write user to test the\n read only user.\n '''\n write_connection = db._get_engine(\n {'connection_url': self.write_url}).connect()\n read_connection = db._get_engine(\n {'connection_url': self.read_url}).connect()\n\n drop_foo_sql = u'DROP TABLE IF EXISTS _foo'\n\n write_connection.execute(drop_foo_sql)\n\n try:\n try:\n write_connection.execute(u'CREATE TABLE _foo ()')\n for privilege in ['INSERT', 'UPDATE', 'DELETE']:\n test_privilege_sql = u\"SELECT has_table_privilege('_foo', '{privilege}')\"\n sql = test_privilege_sql.format(privilege=privilege)\n have_privilege = read_connection.execute(sql).first()[0]\n if have_privilege:\n return False\n finally:\n write_connection.execute(drop_foo_sql)\n finally:\n write_connection.close()\n read_connection.close()\n return True\n\n def _create_alias_table(self):\n mapping_sql = '''\n SELECT DISTINCT\n substr(md5(dependee.relname || COALESCE(dependent.relname, '')), 0, 17) AS \"_id\",\n dependee.relname AS name,\n dependee.oid AS oid,\n dependent.relname AS alias_of\n -- dependent.oid AS oid\n FROM\n pg_class AS dependee\n LEFT OUTER JOIN pg_rewrite AS r ON r.ev_class = dependee.oid\n LEFT OUTER JOIN pg_depend AS d ON d.objid = r.oid\n LEFT OUTER JOIN pg_class AS dependent ON d.refobjid = dependent.oid\n WHERE\n (dependee.oid != dependent.oid OR dependent.oid IS NULL) AND\n (dependee.relname IN (SELECT tablename FROM pg_catalog.pg_tables)\n OR dependee.relname IN (SELECT viewname FROM pg_catalog.pg_views)) AND\n dependee.relnamespace = (SELECT oid FROM pg_namespace WHERE nspname='public')\n ORDER BY dependee.oid DESC;\n '''\n create_alias_table_sql = u'CREATE OR REPLACE VIEW \"_table_metadata\" AS {0}'.format(mapping_sql)\n try:\n connection = db._get_engine(\n {'connection_url': self.write_url}).connect()\n connection.execute(create_alias_table_sql)\n finally:\n connection.close()\n\n def get_actions(self):\n actions = {'datastore_create': action.datastore_create,\n 'datastore_upsert': action.datastore_upsert,\n 'datastore_delete': action.datastore_delete,\n 'datastore_search': action.datastore_search,\n }\n if not self.legacy_mode:\n actions.update({\n 'datastore_search_sql': action.datastore_search_sql,\n 'datastore_make_private': action.datastore_make_private,\n 'datastore_make_public': action.datastore_make_public})\n return actions\n\n def get_auth_functions(self):\n return {'datastore_create': auth.datastore_create,\n 'datastore_upsert': auth.datastore_upsert,\n 'datastore_delete': auth.datastore_delete,\n 'datastore_search': auth.datastore_search,\n 'datastore_search_sql': auth.datastore_search_sql,\n 'datastore_change_permissions': auth.datastore_change_permissions}\n\n def before_map(self, m):\n m.connect('/datastore/dump/{resource_id}',\n controller='ckanext.datastore.controller:DatastoreController',\n action='dump')\n return m\n\n def before_show(self, resource_dict):\n # Modify the resource url of datastore resources so that\n # they link to the datastore dumps.\n if resource_dict.get('url_type') == 'datastore':\n resource_dict['url'] = p.toolkit.url_for(\n controller='ckanext.datastore.controller:DatastoreController',\n action='dump', resource_id=resource_dict['id'])\n\n try:\n connection = self.read_engine.connect()\n result = connection.execute(\n 'SELECT 1 FROM \"_table_metadata\" WHERE name = %s AND alias_of IS NULL',\n resource_dict['id']\n ).fetchone()\n if result:\n resource_dict['datastore_active'] = True\n else:\n resource_dict['datastore_active'] = False\n finally:\n connection.close()\n return resource_dict\n", "path": "ckanext/datastore/plugin.py"}]}
| 3,500 | 353 |
gh_patches_debug_20803
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2862
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Edit information field in admin gives error page
### Describe the bug
The site gives the 500 error page when you try to edit information fields of registered users
### How to reproduce
Go to an event on the admin and press change on a user's registration. At "Fields:" press the "Edit Information Fields" button and the bug happens (found at parents day, Tim Kersten)
### Expected behaviour
I'd expect the appropriate page for editing a user's information fields.
</issue>
<code>
[start of website/events/admin/views.py]
1 import csv
2
3 from django.conf import settings
4 from django.contrib import messages
5 from django.contrib.admin import helpers
6 from django.contrib.admin.views.decorators import staff_member_required
7 from django.contrib.auth.mixins import PermissionRequiredMixin
8 from django.http import HttpResponse
9 from django.shortcuts import get_object_or_404, redirect
10 from django.utils import timezone
11 from django.utils.decorators import method_decorator
12 from django.utils.text import slugify
13 from django.utils.translation import gettext_lazy as _
14 from django.utils.translation import pgettext_lazy
15 from django.views import View
16 from django.views.generic import DetailView, FormView
17
18 import qrcode
19
20 from events import services
21 from events.decorators import organiser_only
22 from events.exceptions import RegistrationError
23 from events.forms import EventMessageForm, FieldsForm
24 from events.models import Event, EventRegistration
25 from payments.models import Payment
26 from pushnotifications.models import Category, Message
27
28
29 @method_decorator(staff_member_required, name="dispatch")
30 @method_decorator(organiser_only, name="dispatch")
31 class EventAdminDetails(DetailView, PermissionRequiredMixin):
32 """Render an overview of registrations for the specified event."""
33
34 template_name = "events/admin/details.html"
35 model = Event
36 context_object_name = "event"
37 permission_required = "events.change_event"
38
39 def get_context_data(self, **kwargs):
40 context = super().get_context_data(**kwargs)
41
42 context.update({"payment": Payment, "has_permission": True, "site_url": "/"})
43
44 return context
45
46
47 @method_decorator(staff_member_required, name="dispatch")
48 @method_decorator(organiser_only, name="dispatch")
49 class RegistrationAdminFields(FormView):
50 """Render a form that allows the user to change the details of their registration.
51
52 The user should be authenticated.
53 """
54
55 form_class = FieldsForm
56 template_name = "admin/change_form.html"
57 registration = None
58 admin = None
59
60 def get_context_data(self, **kwargs):
61 context = super().get_context_data(**kwargs)
62 context.update(
63 {
64 **self.admin.admin_site.each_context(self.request),
65 "add": False,
66 "change": True,
67 "has_view_permission": True,
68 "has_add_permission": False,
69 "has_change_permission": self.request.user.has_perms(
70 "events.change_eventregistration"
71 ),
72 "has_delete_permission": False,
73 "has_editable_inline_admin_formsets": False,
74 "app_label": "events",
75 "opts": self.registration._meta,
76 "is_popup": False,
77 "save_as": False,
78 "save_on_top": False,
79 "original": self.registration,
80 "obj_id": self.registration.pk,
81 "title": _("Change registration fields"),
82 "adminform": helpers.AdminForm(
83 context["form"],
84 ((None, {"fields": context["form"].fields.keys()}),),
85 {},
86 ),
87 }
88 )
89 return context
90
91 def get_form_kwargs(self):
92 kwargs = super().get_form_kwargs()
93 kwargs["fields"] = services.registration_fields(
94 self.request, registration=self.registration
95 )
96 return kwargs
97
98 def form_valid(self, form):
99 values = form.field_values()
100 try:
101 services.update_registration(
102 registration=self.registration,
103 field_values=values,
104 actor=self.request.user,
105 )
106 messages.success(self.request, _("Registration successfully saved."))
107 if "_save" in self.request.POST:
108 return redirect(
109 "admin:events_eventregistration_change", self.registration.pk
110 )
111 except RegistrationError as e:
112 messages.error(self.request, e)
113 return self.render_to_response(self.get_context_data(form=form))
114
115 def dispatch(self, request, *args, **kwargs):
116 self.registration = get_object_or_404(
117 EventRegistration, pk=self.kwargs["registration"]
118 )
119 try:
120 if self.registration.event.has_fields:
121 return super().dispatch(request, *args, **kwargs)
122 except RegistrationError:
123 pass
124 return redirect("admin:events_eventregistration_change", self.registration.pk)
125
126
127 @method_decorator(staff_member_required, name="dispatch")
128 @method_decorator(organiser_only, name="dispatch")
129 class EventMessage(FormView):
130 """Renders a form that allows the user to create a push notification for all users registers to the event."""
131
132 form_class = EventMessageForm
133 template_name = "events/admin/message_form.html"
134 admin = None
135 event = None
136
137 def get_context_data(self, **kwargs):
138 context = super().get_context_data(**kwargs)
139 context.update(
140 {
141 **self.admin.admin_site.each_context(self.request),
142 "add": False,
143 "change": True,
144 "has_view_permission": True,
145 "has_add_permission": False,
146 "has_change_permission": self.request.user.has_perms(
147 "events.change_event"
148 ),
149 "has_delete_permission": False,
150 "has_editable_inline_admin_formsets": False,
151 "app_label": "events",
152 "opts": self.event._meta,
153 "is_popup": False,
154 "save_as": False,
155 "save_on_top": False,
156 "original": self.event,
157 "obj_id": self.event.pk,
158 "title": _("Send push notification"),
159 "adminform": helpers.AdminForm(
160 context["form"],
161 ((None, {"fields": context["form"].fields.keys()}),),
162 {},
163 ),
164 }
165 )
166 return context
167
168 def form_valid(self, form):
169 values = form.cleaned_data
170 if not values["url"]:
171 values["url"] = settings.BASE_URL + self.event.get_absolute_url()
172 message = Message(
173 title=values["title"],
174 body=values["body"],
175 url=values["url"],
176 category=Category.objects.get(key=Category.EVENT),
177 )
178 message.save()
179 message.users.set([r.member for r in self.event.participants if r.member])
180 message.send()
181
182 messages.success(self.request, _("Message sent successfully."))
183 if "_save" in self.request.POST:
184 return redirect("admin:events_event_details", self.event.pk)
185 return super().form_valid(form)
186
187 def dispatch(self, request, *args, **kwargs):
188 self.event = get_object_or_404(Event, pk=self.kwargs["pk"])
189 return super().dispatch(request, *args, **kwargs)
190
191
192 @method_decorator(staff_member_required, name="dispatch")
193 @method_decorator(organiser_only, name="dispatch")
194 class EventRegistrationsExport(View, PermissionRequiredMixin):
195 """View to export registrations."""
196
197 template_name = "events/admin/details.html"
198 permission_required = "events.change_event"
199
200 def get(self, request, pk):
201 """Export the registration of a specified event.
202
203 :param request: the request object
204 :param pk: the primary key of the event
205 :return: A CSV containing all registrations for the event
206 """
207 event = get_object_or_404(Event, pk=pk)
208 extra_fields = event.registrationinformationfield_set.all()
209 registrations = event.eventregistration_set.all()
210
211 header_fields = (
212 [
213 _("Name"),
214 _("Email"),
215 _("Paid"),
216 _("Present"),
217 _("Status"),
218 _("Phone number"),
219 ]
220 + [field.name for field in extra_fields]
221 + [_("Date"), _("Date cancelled")]
222 )
223
224 rows = []
225 if event.price == 0:
226 header_fields.remove(_("Paid"))
227 for registration in registrations:
228 if registration.member:
229 name = registration.member.get_full_name()
230 else:
231 name = registration.name
232 status = pgettext_lazy("registration status", "registered").capitalize()
233 cancelled = None
234 if registration.date_cancelled:
235 if registration.is_late_cancellation():
236 status = pgettext_lazy(
237 "registration status", "late cancellation"
238 ).capitalize()
239 else:
240 status = pgettext_lazy(
241 "registration status", "cancelled"
242 ).capitalize()
243 cancelled = timezone.localtime(registration.date_cancelled)
244
245 elif registration.queue_position:
246 status = pgettext_lazy("registration status", "waiting")
247 data = {
248 _("Name"): name,
249 _("Date"): timezone.localtime(registration.date),
250 _("Present"): _("Yes") if registration.present else "",
251 _("Phone number"): (
252 registration.phone_number if registration.phone_number else ""
253 ),
254 _("Email"): (registration.email if registration.email else ""),
255 _("Status"): status,
256 _("Date cancelled"): cancelled,
257 }
258 if event.price > 0:
259 if registration.is_paid():
260 data[_("Paid")] = registration.payment.get_type_display()
261 else:
262 data[_("Paid")] = _("No")
263
264 data.update(
265 {
266 field["field"].name: field["value"]
267 for field in registration.information_fields
268 }
269 )
270 rows.append(data)
271
272 response = HttpResponse(content_type="text/csv")
273 writer = csv.DictWriter(response, header_fields)
274 writer.writeheader()
275
276 rows = sorted(
277 rows,
278 key=lambda row: (
279 row[_("Status")]
280 == pgettext_lazy(
281 "registration status", "late cancellation"
282 ).capitalize(),
283 row[_("Date")],
284 ),
285 reverse=True,
286 )
287
288 for row in rows:
289 writer.writerow(row)
290
291 response[
292 "Content-Disposition"
293 ] = f'attachment; filename="{slugify(event.title)}.csv"'
294 return response
295
296
297 @method_decorator(staff_member_required, name="dispatch")
298 @method_decorator(organiser_only, name="dispatch")
299 class EventMarkPresentQR(View):
300 def get(self, request, *args, **kwargs):
301 event = get_object_or_404(Event, pk=kwargs["pk"])
302 image = qrcode.make(event.mark_present_url)
303
304 response = HttpResponse(content_type="image/png")
305 image.save(response, "PNG")
306
307 return response
308
[end of website/events/admin/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/events/admin/views.py b/website/events/admin/views.py
--- a/website/events/admin/views.py
+++ b/website/events/admin/views.py
@@ -66,7 +66,7 @@
"change": True,
"has_view_permission": True,
"has_add_permission": False,
- "has_change_permission": self.request.user.has_perms(
+ "has_change_permission": self.request.user.has_perm(
"events.change_eventregistration"
),
"has_delete_permission": False,
@@ -143,7 +143,7 @@
"change": True,
"has_view_permission": True,
"has_add_permission": False,
- "has_change_permission": self.request.user.has_perms(
+ "has_change_permission": self.request.user.has_perm(
"events.change_event"
),
"has_delete_permission": False,
|
{"golden_diff": "diff --git a/website/events/admin/views.py b/website/events/admin/views.py\n--- a/website/events/admin/views.py\n+++ b/website/events/admin/views.py\n@@ -66,7 +66,7 @@\n \"change\": True,\n \"has_view_permission\": True,\n \"has_add_permission\": False,\n- \"has_change_permission\": self.request.user.has_perms(\n+ \"has_change_permission\": self.request.user.has_perm(\n \"events.change_eventregistration\"\n ),\n \"has_delete_permission\": False,\n@@ -143,7 +143,7 @@\n \"change\": True,\n \"has_view_permission\": True,\n \"has_add_permission\": False,\n- \"has_change_permission\": self.request.user.has_perms(\n+ \"has_change_permission\": self.request.user.has_perm(\n \"events.change_event\"\n ),\n \"has_delete_permission\": False,\n", "issue": "Edit information field in admin gives error page\n### Describe the bug\r\nThe site gives the 500 error page when you try to edit information fields of registered users\r\n\r\n### How to reproduce\r\nGo to an event on the admin and press change on a user's registration. At \"Fields:\" press the \"Edit Information Fields\" button and the bug happens (found at parents day, Tim Kersten)\r\n\r\n### Expected behaviour\r\nI'd expect the appropriate page for editing a user's information fields.\r\n\n", "before_files": [{"content": "import csv\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin import helpers\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.text import slugify\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import pgettext_lazy\nfrom django.views import View\nfrom django.views.generic import DetailView, FormView\n\nimport qrcode\n\nfrom events import services\nfrom events.decorators import organiser_only\nfrom events.exceptions import RegistrationError\nfrom events.forms import EventMessageForm, FieldsForm\nfrom events.models import Event, EventRegistration\nfrom payments.models import Payment\nfrom pushnotifications.models import Category, Message\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventAdminDetails(DetailView, PermissionRequiredMixin):\n \"\"\"Render an overview of registrations for the specified event.\"\"\"\n\n template_name = \"events/admin/details.html\"\n model = Event\n context_object_name = \"event\"\n permission_required = \"events.change_event\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context.update({\"payment\": Payment, \"has_permission\": True, \"site_url\": \"/\"})\n\n return context\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass RegistrationAdminFields(FormView):\n \"\"\"Render a form that allows the user to change the details of their registration.\n\n The user should be authenticated.\n \"\"\"\n\n form_class = FieldsForm\n template_name = \"admin/change_form.html\"\n registration = None\n admin = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n **self.admin.admin_site.each_context(self.request),\n \"add\": False,\n \"change\": True,\n \"has_view_permission\": True,\n \"has_add_permission\": False,\n \"has_change_permission\": self.request.user.has_perms(\n \"events.change_eventregistration\"\n ),\n \"has_delete_permission\": False,\n \"has_editable_inline_admin_formsets\": False,\n \"app_label\": \"events\",\n \"opts\": self.registration._meta,\n \"is_popup\": False,\n \"save_as\": False,\n \"save_on_top\": False,\n \"original\": self.registration,\n \"obj_id\": self.registration.pk,\n \"title\": _(\"Change registration fields\"),\n \"adminform\": helpers.AdminForm(\n context[\"form\"],\n ((None, {\"fields\": context[\"form\"].fields.keys()}),),\n {},\n ),\n }\n )\n return context\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"fields\"] = services.registration_fields(\n self.request, registration=self.registration\n )\n return kwargs\n\n def form_valid(self, form):\n values = form.field_values()\n try:\n services.update_registration(\n registration=self.registration,\n field_values=values,\n actor=self.request.user,\n )\n messages.success(self.request, _(\"Registration successfully saved.\"))\n if \"_save\" in self.request.POST:\n return redirect(\n \"admin:events_eventregistration_change\", self.registration.pk\n )\n except RegistrationError as e:\n messages.error(self.request, e)\n return self.render_to_response(self.get_context_data(form=form))\n\n def dispatch(self, request, *args, **kwargs):\n self.registration = get_object_or_404(\n EventRegistration, pk=self.kwargs[\"registration\"]\n )\n try:\n if self.registration.event.has_fields:\n return super().dispatch(request, *args, **kwargs)\n except RegistrationError:\n pass\n return redirect(\"admin:events_eventregistration_change\", self.registration.pk)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventMessage(FormView):\n \"\"\"Renders a form that allows the user to create a push notification for all users registers to the event.\"\"\"\n\n form_class = EventMessageForm\n template_name = \"events/admin/message_form.html\"\n admin = None\n event = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n **self.admin.admin_site.each_context(self.request),\n \"add\": False,\n \"change\": True,\n \"has_view_permission\": True,\n \"has_add_permission\": False,\n \"has_change_permission\": self.request.user.has_perms(\n \"events.change_event\"\n ),\n \"has_delete_permission\": False,\n \"has_editable_inline_admin_formsets\": False,\n \"app_label\": \"events\",\n \"opts\": self.event._meta,\n \"is_popup\": False,\n \"save_as\": False,\n \"save_on_top\": False,\n \"original\": self.event,\n \"obj_id\": self.event.pk,\n \"title\": _(\"Send push notification\"),\n \"adminform\": helpers.AdminForm(\n context[\"form\"],\n ((None, {\"fields\": context[\"form\"].fields.keys()}),),\n {},\n ),\n }\n )\n return context\n\n def form_valid(self, form):\n values = form.cleaned_data\n if not values[\"url\"]:\n values[\"url\"] = settings.BASE_URL + self.event.get_absolute_url()\n message = Message(\n title=values[\"title\"],\n body=values[\"body\"],\n url=values[\"url\"],\n category=Category.objects.get(key=Category.EVENT),\n )\n message.save()\n message.users.set([r.member for r in self.event.participants if r.member])\n message.send()\n\n messages.success(self.request, _(\"Message sent successfully.\"))\n if \"_save\" in self.request.POST:\n return redirect(\"admin:events_event_details\", self.event.pk)\n return super().form_valid(form)\n\n def dispatch(self, request, *args, **kwargs):\n self.event = get_object_or_404(Event, pk=self.kwargs[\"pk\"])\n return super().dispatch(request, *args, **kwargs)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventRegistrationsExport(View, PermissionRequiredMixin):\n \"\"\"View to export registrations.\"\"\"\n\n template_name = \"events/admin/details.html\"\n permission_required = \"events.change_event\"\n\n def get(self, request, pk):\n \"\"\"Export the registration of a specified event.\n\n :param request: the request object\n :param pk: the primary key of the event\n :return: A CSV containing all registrations for the event\n \"\"\"\n event = get_object_or_404(Event, pk=pk)\n extra_fields = event.registrationinformationfield_set.all()\n registrations = event.eventregistration_set.all()\n\n header_fields = (\n [\n _(\"Name\"),\n _(\"Email\"),\n _(\"Paid\"),\n _(\"Present\"),\n _(\"Status\"),\n _(\"Phone number\"),\n ]\n + [field.name for field in extra_fields]\n + [_(\"Date\"), _(\"Date cancelled\")]\n )\n\n rows = []\n if event.price == 0:\n header_fields.remove(_(\"Paid\"))\n for registration in registrations:\n if registration.member:\n name = registration.member.get_full_name()\n else:\n name = registration.name\n status = pgettext_lazy(\"registration status\", \"registered\").capitalize()\n cancelled = None\n if registration.date_cancelled:\n if registration.is_late_cancellation():\n status = pgettext_lazy(\n \"registration status\", \"late cancellation\"\n ).capitalize()\n else:\n status = pgettext_lazy(\n \"registration status\", \"cancelled\"\n ).capitalize()\n cancelled = timezone.localtime(registration.date_cancelled)\n\n elif registration.queue_position:\n status = pgettext_lazy(\"registration status\", \"waiting\")\n data = {\n _(\"Name\"): name,\n _(\"Date\"): timezone.localtime(registration.date),\n _(\"Present\"): _(\"Yes\") if registration.present else \"\",\n _(\"Phone number\"): (\n registration.phone_number if registration.phone_number else \"\"\n ),\n _(\"Email\"): (registration.email if registration.email else \"\"),\n _(\"Status\"): status,\n _(\"Date cancelled\"): cancelled,\n }\n if event.price > 0:\n if registration.is_paid():\n data[_(\"Paid\")] = registration.payment.get_type_display()\n else:\n data[_(\"Paid\")] = _(\"No\")\n\n data.update(\n {\n field[\"field\"].name: field[\"value\"]\n for field in registration.information_fields\n }\n )\n rows.append(data)\n\n response = HttpResponse(content_type=\"text/csv\")\n writer = csv.DictWriter(response, header_fields)\n writer.writeheader()\n\n rows = sorted(\n rows,\n key=lambda row: (\n row[_(\"Status\")]\n == pgettext_lazy(\n \"registration status\", \"late cancellation\"\n ).capitalize(),\n row[_(\"Date\")],\n ),\n reverse=True,\n )\n\n for row in rows:\n writer.writerow(row)\n\n response[\n \"Content-Disposition\"\n ] = f'attachment; filename=\"{slugify(event.title)}.csv\"'\n return response\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventMarkPresentQR(View):\n def get(self, request, *args, **kwargs):\n event = get_object_or_404(Event, pk=kwargs[\"pk\"])\n image = qrcode.make(event.mark_present_url)\n\n response = HttpResponse(content_type=\"image/png\")\n image.save(response, \"PNG\")\n\n return response\n", "path": "website/events/admin/views.py"}]}
| 3,544 | 191 |
gh_patches_debug_32974
|
rasdani/github-patches
|
git_diff
|
google__mobly-473
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Log snippet client calls at debug level
Right now there is no log for calls made to snippet client side, which makes it difficult to debug should a snippet client call hangs.
On debug level, we should log messages sent and received by the client, same way we log messages received and sent by snippet server on devices.
</issue>
<code>
[start of mobly/controllers/android_device_lib/jsonrpc_client_base.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Base class for clients that communicate with apps over a JSON RPC interface.
15
16 The JSON protocol expected by this module is:
17
18 .. code-block:: json
19
20 Request:
21 {
22 "id": <monotonically increasing integer containing the ID of
23 this request>
24 "method": <string containing the name of the method to execute>
25 "params": <JSON array containing the arguments to the method>
26 }
27
28 Response:
29 {
30 "id": <int id of request that this response maps to>,
31 "result": <Arbitrary JSON object containing the result of
32 executing the method. If the method could not be
33 executed or returned void, contains 'null'.>,
34 "error": <String containing the error thrown by executing the
35 method. If no error occurred, contains 'null'.>
36 "callback": <String that represents a callback ID used to
37 identify events associated with a particular
38 CallbackHandler object.>
39 }
40 """
41
42 from builtins import str
43
44 import json
45 import socket
46 import threading
47
48 from mobly.controllers.android_device_lib import callback_handler
49 from mobly.controllers.android_device_lib import errors
50
51 # UID of the 'unknown' jsonrpc session. Will cause creation of a new session.
52 UNKNOWN_UID = -1
53
54 # Maximum time to wait for the socket to open on the device.
55 _SOCKET_CONNECTION_TIMEOUT = 60
56
57 # Maximum time to wait for a response message on the socket.
58 _SOCKET_READ_TIMEOUT = callback_handler.MAX_TIMEOUT
59
60
61 class Error(errors.DeviceError):
62 pass
63
64
65 class AppStartError(Error):
66 """Raised when the app is not able to be started."""
67
68
69 class AppRestoreConnectionError(Error):
70 """Raised when failed to restore app from disconnection."""
71
72
73 class ApiError(Error):
74 """Raised when remote API reports an error."""
75
76
77 class ProtocolError(Error):
78 """Raised when there is some error in exchanging data with server."""
79 NO_RESPONSE_FROM_HANDSHAKE = 'No response from handshake.'
80 NO_RESPONSE_FROM_SERVER = 'No response from server.'
81 MISMATCHED_API_ID = 'Mismatched API id.'
82
83
84 class JsonRpcCommand(object):
85 """Commands that can be invoked on all jsonrpc clients.
86
87 INIT: Initializes a new session.
88 CONTINUE: Creates a connection.
89 """
90 INIT = 'initiate'
91 CONTINUE = 'continue'
92
93
94 class JsonRpcClientBase(object):
95 """Base class for jsonrpc clients that connect to remote servers.
96
97 Connects to a remote device running a jsonrpc-compatible app. Before opening
98 a connection a port forward must be setup to go over usb. This be done using
99 adb.forward([local, remote]). Once the port has been forwarded it can be
100 used in this object as the port of communication.
101
102 Attributes:
103 host_port: (int) The host port of this RPC client.
104 device_port: (int) The device port of this RPC client.
105 app_name: (str) The user-visible name of the app being communicated
106 with.
107 uid: (int) The uid of this session.
108 """
109
110 def __init__(self, app_name, ad):
111 """
112 Args:
113 app_name: (str) The user-visible name of the app being communicated
114 with.
115 ad: (AndroidDevice) The device object associated with a client.
116 """
117 self.host_port = None
118 self.device_port = None
119 self.app_name = app_name
120 self._ad = ad
121 self.log = self._ad.log
122 self.uid = None
123 self._client = None # prevent close errors on connect failure
124 self._conn = None
125 self._counter = None
126 self._lock = threading.Lock()
127 self._event_client = None
128
129 def __del__(self):
130 self.disconnect()
131
132 # Methods to be implemented by subclasses.
133
134 def start_app_and_connect(self):
135 """Starts the server app on the android device and connects to it.
136
137 After this, the self.host_port and self.device_port attributes must be
138 set.
139
140 Must be implemented by subclasses.
141
142 Raises:
143 AppStartError: When the app was not able to be started.
144 """
145 raise NotImplementedError()
146
147 def stop_app(self):
148 """Kills any running instance of the app.
149
150 Must be implemented by subclasses.
151 """
152 raise NotImplementedError()
153
154 def restore_app_connection(self, port=None):
155 """Reconnects to the app after device USB was disconnected.
156
157 Instead of creating new instance of the client:
158 - Uses the given port (or finds a new available host_port if none is
159 given).
160 - Tries to connect to remote server with selected port.
161
162 Must be implemented by subclasses.
163
164 Args:
165 port: If given, this is the host port from which to connect to remote
166 device port. If not provided, find a new available port as host
167 port.
168
169 Raises:
170 AppRestoreConnectionError: When the app was not able to be
171 reconnected.
172 """
173 raise NotImplementedError()
174
175 def _start_event_client(self):
176 """Starts a separate JsonRpc client to the same session for propagating
177 events.
178
179 This is an optional function that should only implement if the client
180 utilizes the snippet event mechanism.
181
182 Returns:
183 A JsonRpc Client object that connects to the same session as the
184 one on which this function is called.
185 """
186 raise NotImplementedError()
187
188 # Rest of the client methods.
189
190 def connect(self, uid=UNKNOWN_UID, cmd=JsonRpcCommand.INIT):
191 """Opens a connection to a JSON RPC server.
192
193 Opens a connection to a remote client. The connection attempt will time
194 out if it takes longer than _SOCKET_CONNECTION_TIMEOUT seconds. Each
195 subsequent operation over this socket will time out after
196 _SOCKET_READ_TIMEOUT seconds as well.
197
198 Args:
199 uid: int, The uid of the session to join, or UNKNOWN_UID to start a
200 new session.
201 cmd: JsonRpcCommand, The command to use for creating the connection.
202
203 Raises:
204 IOError: Raised when the socket times out from io error
205 socket.timeout: Raised when the socket waits to long for connection.
206 ProtocolError: Raised when there is an error in the protocol.
207 """
208 self._counter = self._id_counter()
209 self._conn = socket.create_connection(('localhost', self.host_port),
210 _SOCKET_CONNECTION_TIMEOUT)
211 self._conn.settimeout(_SOCKET_READ_TIMEOUT)
212 self._client = self._conn.makefile(mode='brw')
213
214 resp = self._cmd(cmd, uid)
215 if not resp:
216 raise ProtocolError(self._ad,
217 ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)
218 result = json.loads(str(resp, encoding='utf8'))
219 if result['status']:
220 self.uid = result['uid']
221 else:
222 self.uid = UNKNOWN_UID
223
224 def disconnect(self):
225 """Close the connection to the remote client."""
226 if self._conn:
227 self._conn.close()
228 self._conn = None
229
230 def _client_send(self, msg):
231 """Sends an Rpc message through the connection.
232
233 Args:
234 msg: string, the message to send.
235
236 Raises:
237 Error: a socket error occurred during the send.
238 """
239 try:
240 self._client.write(msg.encode("utf8") + b'\n')
241 self._client.flush()
242 except socket.error as e:
243 raise Error(
244 self._ad,
245 'Encountered socket error "%s" sending RPC message "%s"' %
246 (e, msg))
247
248 def _client_receive(self):
249 """Receives the server's response of an Rpc message.
250
251 Returns:
252 Raw byte string of the response.
253
254 Raises:
255 Error: a socket error occurred during the read.
256 """
257 try:
258 return self._client.readline()
259 except socket.error as e:
260 raise Error(
261 self._ad,
262 'Encountered socket error reading RPC response "%s"' % e)
263
264 def _cmd(self, command, uid=None):
265 """Send a command to the server.
266
267 Args:
268 command: str, The name of the command to execute.
269 uid: int, the uid of the session to send the command to.
270
271 Returns:
272 The line that was written back.
273 """
274 if not uid:
275 uid = self.uid
276 self._client_send(json.dumps({'cmd': command, 'uid': uid}))
277 return self._client_receive()
278
279 def _rpc(self, method, *args):
280 """Sends an rpc to the app.
281
282 Args:
283 method: str, The name of the method to execute.
284 args: any, The args of the method.
285
286 Returns:
287 The result of the rpc.
288
289 Raises:
290 ProtocolError: Something went wrong with the protocol.
291 ApiError: The rpc went through, however executed with errors.
292 """
293 with self._lock:
294 apiid = next(self._counter)
295 data = {'id': apiid, 'method': method, 'params': args}
296 request = json.dumps(data)
297 self._client_send(request)
298 response = self._client_receive()
299 if not response:
300 raise ProtocolError(self._ad,
301 ProtocolError.NO_RESPONSE_FROM_SERVER)
302 result = json.loads(str(response, encoding="utf8"))
303 if result['error']:
304 raise ApiError(self._ad, result['error'])
305 if result['id'] != apiid:
306 raise ProtocolError(self._ad, ProtocolError.MISMATCHED_API_ID)
307 if result.get('callback') is not None:
308 if self._event_client is None:
309 self._event_client = self._start_event_client()
310 return callback_handler.CallbackHandler(
311 callback_id=result['callback'],
312 event_client=self._event_client,
313 ret_value=result['result'],
314 method_name=method,
315 ad=self._ad)
316 return result['result']
317
318 def disable_hidden_api_blacklist(self):
319 """If necessary and possible, disables hidden api blacklist."""
320 version_codename = self._ad.adb.getprop('ro.build.version.codename')
321 sdk_version = int(self._ad.adb.getprop('ro.build.version.sdk'))
322 # we check version_codename in addition to sdk_version because P builds
323 # in development report sdk_version 27, but still enforce the blacklist.
324 if self._ad.is_rootable and (sdk_version >= 28
325 or version_codename == 'P'):
326 self._ad.adb.shell(
327 'settings put global hidden_api_blacklist_exemptions "*"')
328
329 def __getattr__(self, name):
330 """Wrapper for python magic to turn method calls into RPC calls."""
331
332 def rpc_call(*args):
333 return self._rpc(name, *args)
334
335 return rpc_call
336
337 def _id_counter(self):
338 i = 0
339 while True:
340 yield i
341 i += 1
342
[end of mobly/controllers/android_device_lib/jsonrpc_client_base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mobly/controllers/android_device_lib/jsonrpc_client_base.py b/mobly/controllers/android_device_lib/jsonrpc_client_base.py
--- a/mobly/controllers/android_device_lib/jsonrpc_client_base.py
+++ b/mobly/controllers/android_device_lib/jsonrpc_client_base.py
@@ -19,7 +19,7 @@
Request:
{
- "id": <monotonically increasing integer containing the ID of
+ "id": <monotonically increasing integer containing the ID of
this request>
"method": <string containing the name of the method to execute>
"params": <JSON array containing the arguments to the method>
@@ -239,6 +239,7 @@
try:
self._client.write(msg.encode("utf8") + b'\n')
self._client.flush()
+ self.log.debug('Snippet sent %s.', msg)
except socket.error as e:
raise Error(
self._ad,
@@ -255,7 +256,9 @@
Error: a socket error occurred during the read.
"""
try:
- return self._client.readline()
+ response = self._client.readline()
+ self.log.debug('Snippet received: %s', response)
+ return response
except socket.error as e:
raise Error(
self._ad,
@@ -299,7 +302,7 @@
if not response:
raise ProtocolError(self._ad,
ProtocolError.NO_RESPONSE_FROM_SERVER)
- result = json.loads(str(response, encoding="utf8"))
+ result = json.loads(str(response, encoding='utf8'))
if result['error']:
raise ApiError(self._ad, result['error'])
if result['id'] != apiid:
|
{"golden_diff": "diff --git a/mobly/controllers/android_device_lib/jsonrpc_client_base.py b/mobly/controllers/android_device_lib/jsonrpc_client_base.py\n--- a/mobly/controllers/android_device_lib/jsonrpc_client_base.py\n+++ b/mobly/controllers/android_device_lib/jsonrpc_client_base.py\n@@ -19,7 +19,7 @@\n \n Request:\n {\n- \"id\": <monotonically increasing integer containing the ID of \n+ \"id\": <monotonically increasing integer containing the ID of\n this request>\n \"method\": <string containing the name of the method to execute>\n \"params\": <JSON array containing the arguments to the method>\n@@ -239,6 +239,7 @@\n try:\n self._client.write(msg.encode(\"utf8\") + b'\\n')\n self._client.flush()\n+ self.log.debug('Snippet sent %s.', msg)\n except socket.error as e:\n raise Error(\n self._ad,\n@@ -255,7 +256,9 @@\n Error: a socket error occurred during the read.\n \"\"\"\n try:\n- return self._client.readline()\n+ response = self._client.readline()\n+ self.log.debug('Snippet received: %s', response)\n+ return response\n except socket.error as e:\n raise Error(\n self._ad,\n@@ -299,7 +302,7 @@\n if not response:\n raise ProtocolError(self._ad,\n ProtocolError.NO_RESPONSE_FROM_SERVER)\n- result = json.loads(str(response, encoding=\"utf8\"))\n+ result = json.loads(str(response, encoding='utf8'))\n if result['error']:\n raise ApiError(self._ad, result['error'])\n if result['id'] != apiid:\n", "issue": "Log snippet client calls at debug level\nRight now there is no log for calls made to snippet client side, which makes it difficult to debug should a snippet client call hangs.\r\n\r\nOn debug level, we should log messages sent and received by the client, same way we log messages received and sent by snippet server on devices.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Base class for clients that communicate with apps over a JSON RPC interface.\n\nThe JSON protocol expected by this module is:\n\n.. code-block:: json\n\n Request:\n {\n \"id\": <monotonically increasing integer containing the ID of \n this request>\n \"method\": <string containing the name of the method to execute>\n \"params\": <JSON array containing the arguments to the method>\n }\n\n Response:\n {\n \"id\": <int id of request that this response maps to>,\n \"result\": <Arbitrary JSON object containing the result of\n executing the method. If the method could not be\n executed or returned void, contains 'null'.>,\n \"error\": <String containing the error thrown by executing the\n method. If no error occurred, contains 'null'.>\n \"callback\": <String that represents a callback ID used to\n identify events associated with a particular\n CallbackHandler object.>\n }\n\"\"\"\n\nfrom builtins import str\n\nimport json\nimport socket\nimport threading\n\nfrom mobly.controllers.android_device_lib import callback_handler\nfrom mobly.controllers.android_device_lib import errors\n\n# UID of the 'unknown' jsonrpc session. Will cause creation of a new session.\nUNKNOWN_UID = -1\n\n# Maximum time to wait for the socket to open on the device.\n_SOCKET_CONNECTION_TIMEOUT = 60\n\n# Maximum time to wait for a response message on the socket.\n_SOCKET_READ_TIMEOUT = callback_handler.MAX_TIMEOUT\n\n\nclass Error(errors.DeviceError):\n pass\n\n\nclass AppStartError(Error):\n \"\"\"Raised when the app is not able to be started.\"\"\"\n\n\nclass AppRestoreConnectionError(Error):\n \"\"\"Raised when failed to restore app from disconnection.\"\"\"\n\n\nclass ApiError(Error):\n \"\"\"Raised when remote API reports an error.\"\"\"\n\n\nclass ProtocolError(Error):\n \"\"\"Raised when there is some error in exchanging data with server.\"\"\"\n NO_RESPONSE_FROM_HANDSHAKE = 'No response from handshake.'\n NO_RESPONSE_FROM_SERVER = 'No response from server.'\n MISMATCHED_API_ID = 'Mismatched API id.'\n\n\nclass JsonRpcCommand(object):\n \"\"\"Commands that can be invoked on all jsonrpc clients.\n\n INIT: Initializes a new session.\n CONTINUE: Creates a connection.\n \"\"\"\n INIT = 'initiate'\n CONTINUE = 'continue'\n\n\nclass JsonRpcClientBase(object):\n \"\"\"Base class for jsonrpc clients that connect to remote servers.\n\n Connects to a remote device running a jsonrpc-compatible app. Before opening\n a connection a port forward must be setup to go over usb. This be done using\n adb.forward([local, remote]). Once the port has been forwarded it can be\n used in this object as the port of communication.\n\n Attributes:\n host_port: (int) The host port of this RPC client.\n device_port: (int) The device port of this RPC client.\n app_name: (str) The user-visible name of the app being communicated\n with.\n uid: (int) The uid of this session.\n \"\"\"\n\n def __init__(self, app_name, ad):\n \"\"\"\n Args:\n app_name: (str) The user-visible name of the app being communicated\n with.\n ad: (AndroidDevice) The device object associated with a client.\n \"\"\"\n self.host_port = None\n self.device_port = None\n self.app_name = app_name\n self._ad = ad\n self.log = self._ad.log\n self.uid = None\n self._client = None # prevent close errors on connect failure\n self._conn = None\n self._counter = None\n self._lock = threading.Lock()\n self._event_client = None\n\n def __del__(self):\n self.disconnect()\n\n # Methods to be implemented by subclasses.\n\n def start_app_and_connect(self):\n \"\"\"Starts the server app on the android device and connects to it.\n\n After this, the self.host_port and self.device_port attributes must be\n set.\n\n Must be implemented by subclasses.\n\n Raises:\n AppStartError: When the app was not able to be started.\n \"\"\"\n raise NotImplementedError()\n\n def stop_app(self):\n \"\"\"Kills any running instance of the app.\n\n Must be implemented by subclasses.\n \"\"\"\n raise NotImplementedError()\n\n def restore_app_connection(self, port=None):\n \"\"\"Reconnects to the app after device USB was disconnected.\n\n Instead of creating new instance of the client:\n - Uses the given port (or finds a new available host_port if none is\n given).\n - Tries to connect to remote server with selected port.\n\n Must be implemented by subclasses.\n\n Args:\n port: If given, this is the host port from which to connect to remote\n device port. If not provided, find a new available port as host\n port.\n\n Raises:\n AppRestoreConnectionError: When the app was not able to be\n reconnected.\n \"\"\"\n raise NotImplementedError()\n\n def _start_event_client(self):\n \"\"\"Starts a separate JsonRpc client to the same session for propagating\n events.\n\n This is an optional function that should only implement if the client\n utilizes the snippet event mechanism.\n\n Returns:\n A JsonRpc Client object that connects to the same session as the\n one on which this function is called.\n \"\"\"\n raise NotImplementedError()\n\n # Rest of the client methods.\n\n def connect(self, uid=UNKNOWN_UID, cmd=JsonRpcCommand.INIT):\n \"\"\"Opens a connection to a JSON RPC server.\n\n Opens a connection to a remote client. The connection attempt will time\n out if it takes longer than _SOCKET_CONNECTION_TIMEOUT seconds. Each\n subsequent operation over this socket will time out after\n _SOCKET_READ_TIMEOUT seconds as well.\n\n Args:\n uid: int, The uid of the session to join, or UNKNOWN_UID to start a\n new session.\n cmd: JsonRpcCommand, The command to use for creating the connection.\n\n Raises:\n IOError: Raised when the socket times out from io error\n socket.timeout: Raised when the socket waits to long for connection.\n ProtocolError: Raised when there is an error in the protocol.\n \"\"\"\n self._counter = self._id_counter()\n self._conn = socket.create_connection(('localhost', self.host_port),\n _SOCKET_CONNECTION_TIMEOUT)\n self._conn.settimeout(_SOCKET_READ_TIMEOUT)\n self._client = self._conn.makefile(mode='brw')\n\n resp = self._cmd(cmd, uid)\n if not resp:\n raise ProtocolError(self._ad,\n ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)\n result = json.loads(str(resp, encoding='utf8'))\n if result['status']:\n self.uid = result['uid']\n else:\n self.uid = UNKNOWN_UID\n\n def disconnect(self):\n \"\"\"Close the connection to the remote client.\"\"\"\n if self._conn:\n self._conn.close()\n self._conn = None\n\n def _client_send(self, msg):\n \"\"\"Sends an Rpc message through the connection.\n\n Args:\n msg: string, the message to send.\n\n Raises:\n Error: a socket error occurred during the send.\n \"\"\"\n try:\n self._client.write(msg.encode(\"utf8\") + b'\\n')\n self._client.flush()\n except socket.error as e:\n raise Error(\n self._ad,\n 'Encountered socket error \"%s\" sending RPC message \"%s\"' %\n (e, msg))\n\n def _client_receive(self):\n \"\"\"Receives the server's response of an Rpc message.\n\n Returns:\n Raw byte string of the response.\n\n Raises:\n Error: a socket error occurred during the read.\n \"\"\"\n try:\n return self._client.readline()\n except socket.error as e:\n raise Error(\n self._ad,\n 'Encountered socket error reading RPC response \"%s\"' % e)\n\n def _cmd(self, command, uid=None):\n \"\"\"Send a command to the server.\n\n Args:\n command: str, The name of the command to execute.\n uid: int, the uid of the session to send the command to.\n\n Returns:\n The line that was written back.\n \"\"\"\n if not uid:\n uid = self.uid\n self._client_send(json.dumps({'cmd': command, 'uid': uid}))\n return self._client_receive()\n\n def _rpc(self, method, *args):\n \"\"\"Sends an rpc to the app.\n\n Args:\n method: str, The name of the method to execute.\n args: any, The args of the method.\n\n Returns:\n The result of the rpc.\n\n Raises:\n ProtocolError: Something went wrong with the protocol.\n ApiError: The rpc went through, however executed with errors.\n \"\"\"\n with self._lock:\n apiid = next(self._counter)\n data = {'id': apiid, 'method': method, 'params': args}\n request = json.dumps(data)\n self._client_send(request)\n response = self._client_receive()\n if not response:\n raise ProtocolError(self._ad,\n ProtocolError.NO_RESPONSE_FROM_SERVER)\n result = json.loads(str(response, encoding=\"utf8\"))\n if result['error']:\n raise ApiError(self._ad, result['error'])\n if result['id'] != apiid:\n raise ProtocolError(self._ad, ProtocolError.MISMATCHED_API_ID)\n if result.get('callback') is not None:\n if self._event_client is None:\n self._event_client = self._start_event_client()\n return callback_handler.CallbackHandler(\n callback_id=result['callback'],\n event_client=self._event_client,\n ret_value=result['result'],\n method_name=method,\n ad=self._ad)\n return result['result']\n\n def disable_hidden_api_blacklist(self):\n \"\"\"If necessary and possible, disables hidden api blacklist.\"\"\"\n version_codename = self._ad.adb.getprop('ro.build.version.codename')\n sdk_version = int(self._ad.adb.getprop('ro.build.version.sdk'))\n # we check version_codename in addition to sdk_version because P builds\n # in development report sdk_version 27, but still enforce the blacklist.\n if self._ad.is_rootable and (sdk_version >= 28\n or version_codename == 'P'):\n self._ad.adb.shell(\n 'settings put global hidden_api_blacklist_exemptions \"*\"')\n\n def __getattr__(self, name):\n \"\"\"Wrapper for python magic to turn method calls into RPC calls.\"\"\"\n\n def rpc_call(*args):\n return self._rpc(name, *args)\n\n return rpc_call\n\n def _id_counter(self):\n i = 0\n while True:\n yield i\n i += 1\n", "path": "mobly/controllers/android_device_lib/jsonrpc_client_base.py"}]}
| 4,009 | 384 |
gh_patches_debug_10704
|
rasdani/github-patches
|
git_diff
|
facebookresearch__fairseq-4805
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[fairseq] Guard call to `shape_as_tensor` with `is_in_onnx_export()`
This is a no-op in eager and in ONNX export, but it's better for other
tracers if this is preserved as shapes directly instead of converted to
a tensor.
There is a little annoying code duplication with
`torch.jit.is_scripting()`, which is unforunately necessary because we
didn't implement compile-time short circuiting correctly in TorchScript
lol.
</issue>
<code>
[start of fairseq/modules/sinusoidal_positional_embedding.py]
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 import math
7 from typing import Any, Optional
8
9 import torch
10 import torch.onnx.operators
11 from fairseq import utils
12 from torch import Tensor, nn
13
14
15 class SinusoidalPositionalEmbedding(nn.Module):
16 """This module produces sinusoidal positional embeddings of any length.
17
18 Padding symbols are ignored.
19 """
20
21 def __init__(self, embedding_dim, padding_idx, init_size=1024):
22 super().__init__()
23 self.embedding_dim = embedding_dim
24 self.padding_idx = padding_idx if padding_idx is not None else 0
25 self.weights = SinusoidalPositionalEmbedding.get_embedding(
26 init_size, embedding_dim, padding_idx
27 )
28 self.onnx_trace = False
29 self.register_buffer("_float_tensor", torch.FloatTensor(1))
30 self.max_positions = int(1e5)
31
32 def prepare_for_onnx_export_(self):
33 self.onnx_trace = True
34
35 @staticmethod
36 def get_embedding(
37 num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
38 ):
39 """Build sinusoidal embeddings.
40
41 This matches the implementation in tensor2tensor, but differs slightly
42 from the description in Section 3.5 of "Attention Is All You Need".
43 """
44 half_dim = embedding_dim // 2
45 emb = math.log(10000) / (half_dim - 1)
46 emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
47 emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
48 1
49 ) * emb.unsqueeze(0)
50 emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
51 num_embeddings, -1
52 )
53 if embedding_dim % 2 == 1:
54 # zero pad
55 emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
56 if padding_idx is not None:
57 emb[padding_idx, :] = 0
58 return emb
59
60 def forward(
61 self,
62 input,
63 incremental_state: Optional[Any] = None,
64 timestep: Optional[Tensor] = None,
65 positions: Optional[Any] = None,
66 ):
67 """Input is expected to be of size [bsz x seqlen]."""
68 if torch.jit.is_scripting():
69 bspair = torch.onnx.operators.shape_as_tensor(input)
70 elif torch.onnx.is_in_onnx_export():
71 bspair = torch.onnx.operators.shape_as_tensor(input)
72 else:
73 bspair = input.size()
74 bsz, seq_len = bspair[0], bspair[1]
75 max_pos = self.padding_idx + 1 + seq_len
76 if self.weights is None or max_pos > self.weights.size(0):
77 # recompute/expand embeddings if needed
78 self.weights = SinusoidalPositionalEmbedding.get_embedding(
79 max_pos, self.embedding_dim, self.padding_idx
80 )
81 self.weights = self.weights.to(self._float_tensor)
82
83 if incremental_state is not None:
84 # positions is the same for every token when decoding a single step
85 pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
86 if self.onnx_trace:
87 return (
88 self.weights.index_select(index=self.padding_idx + pos, dim=0)
89 .unsqueeze(1)
90 .repeat(bsz, 1, 1)
91 )
92 return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
93
94 positions = utils.make_positions(
95 input, self.padding_idx, onnx_trace=self.onnx_trace
96 )
97 if self.onnx_trace:
98 flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))
99 embedding_shape = torch.cat(
100 (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))
101 )
102 embeddings = torch.onnx.operators.reshape_from_tensor_shape(
103 flat_embeddings, embedding_shape
104 )
105 return embeddings
106 return (
107 self.weights.index_select(0, positions.view(-1))
108 .view(bsz, seq_len, -1)
109 .detach()
110 )
111
[end of fairseq/modules/sinusoidal_positional_embedding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/fairseq/modules/sinusoidal_positional_embedding.py b/fairseq/modules/sinusoidal_positional_embedding.py
--- a/fairseq/modules/sinusoidal_positional_embedding.py
+++ b/fairseq/modules/sinusoidal_positional_embedding.py
@@ -97,7 +97,7 @@
if self.onnx_trace:
flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))
embedding_shape = torch.cat(
- (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))
+ (bsz, seq_len, torch.tensor([-1], dtype=torch.long))
)
embeddings = torch.onnx.operators.reshape_from_tensor_shape(
flat_embeddings, embedding_shape
|
{"golden_diff": "diff --git a/fairseq/modules/sinusoidal_positional_embedding.py b/fairseq/modules/sinusoidal_positional_embedding.py\n--- a/fairseq/modules/sinusoidal_positional_embedding.py\n+++ b/fairseq/modules/sinusoidal_positional_embedding.py\n@@ -97,7 +97,7 @@\n if self.onnx_trace:\n flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))\n embedding_shape = torch.cat(\n- (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))\n+ (bsz, seq_len, torch.tensor([-1], dtype=torch.long))\n )\n embeddings = torch.onnx.operators.reshape_from_tensor_shape(\n flat_embeddings, embedding_shape\n", "issue": "[fairseq] Guard call to `shape_as_tensor` with `is_in_onnx_export()`\nThis is a no-op in eager and in ONNX export, but it's better for other\ntracers if this is preserved as shapes directly instead of converted to\na tensor.\n\nThere is a little annoying code duplication with\n`torch.jit.is_scripting()`, which is unforunately necessary because we\ndidn't implement compile-time short circuiting correctly in TorchScript\nlol.\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom typing import Any, Optional\n\nimport torch\nimport torch.onnx.operators\nfrom fairseq import utils\nfrom torch import Tensor, nn\n\n\nclass SinusoidalPositionalEmbedding(nn.Module):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\n\n Padding symbols are ignored.\n \"\"\"\n\n def __init__(self, embedding_dim, padding_idx, init_size=1024):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.padding_idx = padding_idx if padding_idx is not None else 0\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n init_size, embedding_dim, padding_idx\n )\n self.onnx_trace = False\n self.register_buffer(\"_float_tensor\", torch.FloatTensor(1))\n self.max_positions = int(1e5)\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n @staticmethod\n def get_embedding(\n num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None\n ):\n \"\"\"Build sinusoidal embeddings.\n\n This matches the implementation in tensor2tensor, but differs slightly\n from the description in Section 3.5 of \"Attention Is All You Need\".\n \"\"\"\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)\n emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(\n 1\n ) * emb.unsqueeze(0)\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(\n num_embeddings, -1\n )\n if embedding_dim % 2 == 1:\n # zero pad\n emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)\n if padding_idx is not None:\n emb[padding_idx, :] = 0\n return emb\n\n def forward(\n self,\n input,\n incremental_state: Optional[Any] = None,\n timestep: Optional[Tensor] = None,\n positions: Optional[Any] = None,\n ):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n if torch.jit.is_scripting():\n bspair = torch.onnx.operators.shape_as_tensor(input)\n elif torch.onnx.is_in_onnx_export():\n bspair = torch.onnx.operators.shape_as_tensor(input)\n else:\n bspair = input.size()\n bsz, seq_len = bspair[0], bspair[1]\n max_pos = self.padding_idx + 1 + seq_len\n if self.weights is None or max_pos > self.weights.size(0):\n # recompute/expand embeddings if needed\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n max_pos, self.embedding_dim, self.padding_idx\n )\n self.weights = self.weights.to(self._float_tensor)\n\n if incremental_state is not None:\n # positions is the same for every token when decoding a single step\n pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len\n if self.onnx_trace:\n return (\n self.weights.index_select(index=self.padding_idx + pos, dim=0)\n .unsqueeze(1)\n .repeat(bsz, 1, 1)\n )\n return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)\n\n positions = utils.make_positions(\n input, self.padding_idx, onnx_trace=self.onnx_trace\n )\n if self.onnx_trace:\n flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))\n embedding_shape = torch.cat(\n (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))\n )\n embeddings = torch.onnx.operators.reshape_from_tensor_shape(\n flat_embeddings, embedding_shape\n )\n return embeddings\n return (\n self.weights.index_select(0, positions.view(-1))\n .view(bsz, seq_len, -1)\n .detach()\n )\n", "path": "fairseq/modules/sinusoidal_positional_embedding.py"}]}
| 1,824 | 164 |
gh_patches_debug_8031
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-22664
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
syslog_json callback doesn't cast SYSLOG_PORT value from environment as integer
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Bug Report
##### COMPONENT NAME
<!--- Name of the module/plugin/task/feature -->
syslog_json callback plugin
##### ANSIBLE VERSION
<!--- Paste verbatim output from “ansible --version” between quotes below -->
```
ansible 2.2.1.0
```
##### CONFIGURATION
Specifying SYSLOG_PORT environment variable to target a non-default port:
SYSLOG_PORT=1514
##### OS / ENVIRONMENT
Redhat 6 (but this should affect any UNIX platform)
##### SUMMARY
The syslog_json callback plugin allows configuration of its target port by specifying the SYSLOG_PORT environment variable; however, it doesn't cast the value it obtains from get_env as int, so specifying a port number this way results in errors on each task and no syslog output:
Traceback (most recent call last):
File "/usr/lib64/python2.6/logging/handlers.py", line 806, in emit
self.socket.sendto(msg, self.address)
TypeError: an integer is required
##### STEPS TO REPRODUCE
For any playbook, specify the syslog_json callback plugin and the syslog port environment variable:
<!--- Paste example playbooks or commands between quotes below -->
```yaml
ANSIBLE_STDOUT_CALLBACK=syslog_json SYSLOG_PORT=1514 ansible-playbook playbook.yml
```
<!--- You can also paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- What did you expect to happen when running the steps above? -->
No output to stdout, JSON output directed to syslog for each task.
##### ACTUAL RESULTS
<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes below -->
```
Traceback (most recent call last):
File "/usr/lib64/python2.6/logging/handlers.py", line 806, in emit
self.socket.sendto(msg, self.address)
TypeError: an integer is required
```
</issue>
<code>
[start of lib/ansible/plugins/callback/syslog_json.py]
1 # Make coding more python3-ish
2 from __future__ import (absolute_import, division, print_function)
3 __metaclass__ = type
4
5 import os
6 import json
7
8 import logging
9 import logging.handlers
10
11 import socket
12
13 from ansible.plugins.callback import CallbackBase
14
15 class CallbackModule(CallbackBase):
16 """
17 logs ansible-playbook and ansible runs to a syslog server in json format
18 make sure you have in ansible.cfg:
19 callback_plugins = <path_to_callback_plugins_folder>
20 and put the plugin in <path_to_callback_plugins_folder>
21
22 This plugin makes use of the following environment variables:
23 SYSLOG_SERVER (optional): defaults to localhost
24 SYSLOG_PORT (optional): defaults to 514
25 SYSLOG_FACILITY (optional): defaults to user
26 """
27 CALLBACK_VERSION = 2.0
28 CALLBACK_TYPE = 'aggregate'
29 CALLBACK_NAME = 'syslog_json'
30 CALLBACK_NEEDS_WHITELIST = True
31
32 def __init__(self):
33
34 super(CallbackModule, self).__init__()
35
36 self.logger = logging.getLogger('ansible logger')
37 self.logger.setLevel(logging.DEBUG)
38
39 self.handler = logging.handlers.SysLogHandler(
40 address = (os.getenv('SYSLOG_SERVER','localhost'),
41 os.getenv('SYSLOG_PORT',514)),
42 facility= os.getenv('SYSLOG_FACILITY',logging.handlers.SysLogHandler.LOG_USER)
43 )
44 self.logger.addHandler(self.handler)
45 self.hostname = socket.gethostname()
46
47
48 def runner_on_failed(self, host, res, ignore_errors=False):
49 self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))
50
51 def runner_on_ok(self, host, res):
52 self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))
53
54 def runner_on_skipped(self, host, item=None):
55 self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s' % (self.hostname,host, 'skipped'))
56
57 def runner_on_unreachable(self, host, res):
58 self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))
59
60 def runner_on_async_failed(self, host, res, jid):
61 self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))
62
63 def playbook_on_import_for_host(self, host, imported_file):
64 self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s' % (self.hostname,host,imported_file))
65
66 def playbook_on_not_import_for_host(self, host, missing_file):
67 self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s' % (self.hostname,host,missing_file))
68
[end of lib/ansible/plugins/callback/syslog_json.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/ansible/plugins/callback/syslog_json.py b/lib/ansible/plugins/callback/syslog_json.py
--- a/lib/ansible/plugins/callback/syslog_json.py
+++ b/lib/ansible/plugins/callback/syslog_json.py
@@ -38,7 +38,7 @@
self.handler = logging.handlers.SysLogHandler(
address = (os.getenv('SYSLOG_SERVER','localhost'),
- os.getenv('SYSLOG_PORT',514)),
+ int(os.getenv('SYSLOG_PORT',514))),
facility= os.getenv('SYSLOG_FACILITY',logging.handlers.SysLogHandler.LOG_USER)
)
self.logger.addHandler(self.handler)
|
{"golden_diff": "diff --git a/lib/ansible/plugins/callback/syslog_json.py b/lib/ansible/plugins/callback/syslog_json.py\n--- a/lib/ansible/plugins/callback/syslog_json.py\n+++ b/lib/ansible/plugins/callback/syslog_json.py\n@@ -38,7 +38,7 @@\n \n self.handler = logging.handlers.SysLogHandler(\n address = (os.getenv('SYSLOG_SERVER','localhost'),\n- os.getenv('SYSLOG_PORT',514)),\n+ int(os.getenv('SYSLOG_PORT',514))),\n facility= os.getenv('SYSLOG_FACILITY',logging.handlers.SysLogHandler.LOG_USER)\n )\n self.logger.addHandler(self.handler)\n", "issue": "syslog_json callback doesn't cast SYSLOG_PORT value from environment as integer\n##### ISSUE TYPE\r\n<!--- Pick one below and delete the rest: -->\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\n<!--- Name of the module/plugin/task/feature -->\r\nsyslog_json callback plugin\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \u201cansible --version\u201d between quotes below -->\r\n```\r\nansible 2.2.1.0\r\n```\r\n\r\n##### CONFIGURATION\r\nSpecifying SYSLOG_PORT environment variable to target a non-default port:\r\nSYSLOG_PORT=1514\r\n\r\n##### OS / ENVIRONMENT\r\nRedhat 6 (but this should affect any UNIX platform)\r\n\r\n##### SUMMARY\r\nThe syslog_json callback plugin allows configuration of its target port by specifying the SYSLOG_PORT environment variable; however, it doesn't cast the value it obtains from get_env as int, so specifying a port number this way results in errors on each task and no syslog output:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python2.6/logging/handlers.py\", line 806, in emit\r\n self.socket.sendto(msg, self.address)\r\nTypeError: an integer is required\r\n\r\n##### STEPS TO REPRODUCE\r\nFor any playbook, specify the syslog_json callback plugin and the syslog port environment variable:\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\nANSIBLE_STDOUT_CALLBACK=syslog_json SYSLOG_PORT=1514 ansible-playbook playbook.yml\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\nNo output to stdout, JSON output directed to syslog for each task.\r\n\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\n\r\n<!--- Paste verbatim command output between quotes below -->\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python2.6/logging/handlers.py\", line 806, in emit\r\n self.socket.sendto(msg, self.address)\r\nTypeError: an integer is required\r\n```\r\n\n", "before_files": [{"content": "# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\nimport json\n\nimport logging\nimport logging.handlers\n\nimport socket\n\nfrom ansible.plugins.callback import CallbackBase\n\nclass CallbackModule(CallbackBase):\n \"\"\"\n logs ansible-playbook and ansible runs to a syslog server in json format\n make sure you have in ansible.cfg:\n callback_plugins = <path_to_callback_plugins_folder>\n and put the plugin in <path_to_callback_plugins_folder>\n\n This plugin makes use of the following environment variables:\n SYSLOG_SERVER (optional): defaults to localhost\n SYSLOG_PORT (optional): defaults to 514\n SYSLOG_FACILITY (optional): defaults to user\n \"\"\"\n CALLBACK_VERSION = 2.0\n CALLBACK_TYPE = 'aggregate'\n CALLBACK_NAME = 'syslog_json'\n CALLBACK_NEEDS_WHITELIST = True\n\n def __init__(self):\n\n super(CallbackModule, self).__init__()\n\n self.logger = logging.getLogger('ansible logger')\n self.logger.setLevel(logging.DEBUG)\n\n self.handler = logging.handlers.SysLogHandler(\n address = (os.getenv('SYSLOG_SERVER','localhost'),\n os.getenv('SYSLOG_PORT',514)),\n facility= os.getenv('SYSLOG_FACILITY',logging.handlers.SysLogHandler.LOG_USER)\n )\n self.logger.addHandler(self.handler)\n self.hostname = socket.gethostname()\n\n\n def runner_on_failed(self, host, res, ignore_errors=False):\n self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))\n\n def runner_on_ok(self, host, res):\n self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))\n\n def runner_on_skipped(self, host, item=None):\n self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s' % (self.hostname,host, 'skipped'))\n\n def runner_on_unreachable(self, host, res):\n self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))\n\n def runner_on_async_failed(self, host, res, jid):\n self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))\n\n def playbook_on_import_for_host(self, host, imported_file):\n self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s' % (self.hostname,host,imported_file))\n\n def playbook_on_not_import_for_host(self, host, missing_file):\n self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s' % (self.hostname,host,missing_file))\n", "path": "lib/ansible/plugins/callback/syslog_json.py"}]}
| 1,791 | 140 |
gh_patches_debug_18136
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-343
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow testing of examples not parameterized by 'num_epochs'
The AIR example isn't run by the inference testing machinery, as it doesn't have a `--num-epochs` argument.
I could add such an argument, but this isn't a very natural parameterization for this example. Also, running an entire epoch for each example seems likely to be overkill in some cases, including AIR. (Assuming an epoch is a pass through the whole dataset.)
One idea is that the test should look for examples with a dedicated`--ci-test` argument or similar. This is a bit more flexible, though we might pay for this with a little extra code in each example.
</issue>
<code>
[start of examples/air/main.py]
1 """
2 This attempts (currently unsuccessfully) to reproduce some of the
3 results on the multi-mnist data set described in [1].
4
5 [1] Eslami, SM Ali, et al. "Attend, infer, repeat: Fast scene
6 understanding with generative models." Advances in Neural Information
7 Processing Systems. 2016.
8 """
9
10 import os
11 import time
12 import argparse
13 from functools import partial
14 import numpy as np
15
16 import torch
17 from torch.autograd import Variable
18
19 import pyro
20 import pyro.optim as optim
21 import pyro.poutine as poutine
22 from pyro.infer import SVI
23
24 import visdom
25
26 from air import AIR
27 from viz import draw_many, post_process_latents
28
29 parser = argparse.ArgumentParser(description="Pyro AIR example", argument_default=argparse.SUPPRESS)
30 parser.add_argument('-n', '--num-steps', type=int, default=int(1e8),
31 help='number of optimization steps to take')
32 parser.add_argument('-b', '--batch-size', type=int, default=64,
33 help='batch size')
34 parser.add_argument('--progress-every', type=int, default=1,
35 help='number of steps between writing progress to stdout')
36 parser.add_argument('--baseline-scalar', type=float,
37 help='scale the output of the baseline nets by this value')
38 parser.add_argument('--no-baselines', action='store_true', default=False,
39 help='do not use data dependent baselines')
40 parser.add_argument('--encoder-net', type=int, nargs='+', default=[200],
41 help='encoder net hidden layer sizes')
42 parser.add_argument('--decoder-net', type=int, nargs='+', default=[200],
43 help='decoder net hidden layer sizes')
44 parser.add_argument('--predict-net', type=int, nargs='+',
45 help='predict net hidden layer sizes')
46 parser.add_argument('--embed-net', type=int, nargs='+',
47 help='embed net architecture')
48 parser.add_argument('--bl-predict-net', type=int, nargs='+',
49 help='baseline predict net hidden layer sizes')
50 parser.add_argument('--non-linearity', type=str,
51 help='non linearity to use throughout')
52 parser.add_argument('--viz', action='store_true', default=False,
53 help='generate vizualizations during optimization')
54 parser.add_argument('--viz-every', type=int, default=100,
55 help='number of steps between vizualizations')
56 parser.add_argument('--visdom-env', default='main',
57 help='visdom enviroment name')
58 # parser.add_argument('--checkpoint', action='store_true',
59 # help='periodically persist parameters')
60 # parser.add_argument('--checkpoint-every', type=int, default=1000,
61 # help='number of steps between checkpoints')
62 parser.add_argument('--cuda', action='store_true', default=False,
63 help='use cuda')
64 parser.add_argument('-t', '--model-steps', type=int, default=3,
65 help='number of time steps')
66 parser.add_argument('--rnn-hidden-size', type=int, default=256,
67 help='rnn hidden size')
68 parser.add_argument('--encoder-latent-size', type=int, default=50,
69 help='attention window encoder/decoder latent space size')
70 parser.add_argument('--decoder-output-bias', type=float,
71 help='bias added to decoder output (prior to applying non-linearity)')
72 parser.add_argument('--window-size', type=int, default=28,
73 help='attention window size')
74 parser.add_argument('--z-pres-prior', type=float, default=None,
75 help='prior success probability for z_pres')
76 parser.add_argument('--anneal-prior', action='store_true', default=False,
77 help='anneal z_pres prior during optimization')
78 parser.add_argument('--anneal-prior-from', type=float, default=0.99,
79 help='initial z_pres prior prob')
80 parser.add_argument('--anneal-prior-over', type=int, default=100000,
81 help='number of steps over which to anneal the prior')
82 parser.add_argument('--no-masking', action='store_true', default=False,
83 help='do not mask out the costs of unused choices')
84 parser.add_argument('--fudge-z-pres', action='store_true', default=False,
85 help='fudge z_pres to remove discreteness for testing')
86 parser.add_argument('--seed', type=int, help='random seed', default=None)
87 parser.add_argument('--print-modules', action='store_true',
88 help='write the network architecture to stdout')
89
90 args = parser.parse_args()
91 # print(args)
92
93 if args.seed is not None:
94 pyro.set_rng_seed(args.seed)
95
96 # Load data.
97 infile = './data/multi_mnist_train_uint8.npz'
98 if not os.path.exists(infile):
99 print('Could not find the dataset at {}'.format(infile))
100 print('Run "python multi_mnist.py" to generate it.')
101 exit()
102 X_np = np.load(infile)['x'].astype(np.float32)
103 X_np /= 255.0
104 X = Variable(torch.from_numpy(X_np))
105 X_size = X.size(0)
106 if args.cuda:
107 X = X.cuda()
108
109
110 # Yields the following distribution over the number of steps (when
111 # taking a maximum of 3 steps):
112 # p(0) = 0.4
113 # p(1) = 0.3
114 # p(2) = 0.2
115 # p(3) = 0.1
116 def default_z_pres_prior_p(t):
117 if t == 0:
118 return 0.6
119 elif t == 1:
120 return 0.5
121 else:
122 return 0.33
123
124
125 # Implements "prior annealing" as described in this blog post:
126 # http://akosiorek.github.io/ml/2017/09/03/implementing-air.html
127 def annealed_z_pres_prior_p(opt_step, time_step):
128 p_0 = args.anneal_prior_from
129 p_final = args.z_pres_prior or default_z_pres_prior_p(time_step)
130 s = min(opt_step / args.anneal_prior_over, 1.0)
131 return p_final * s + p_0 * (1 - s)
132
133
134 def z_pres_prior_p(opt_step, time_step):
135 if args.anneal_prior:
136 return annealed_z_pres_prior_p(opt_step, time_step)
137 elif args.z_pres_prior:
138 return args.z_pres_prior
139 else:
140 return default_z_pres_prior_p(time_step)
141
142
143 model_arg_keys = ['window_size',
144 'rnn_hidden_size',
145 'decoder_output_bias',
146 'baseline_scalar',
147 'encoder_net',
148 'decoder_net',
149 'predict_net',
150 'embed_net',
151 'bl_predict_net',
152 'non_linearity',
153 'fudge_z_pres',
154 'print_modules']
155 model_args = {key: getattr(args, key) for key in model_arg_keys if key in args}
156 air = AIR(
157 num_steps=args.model_steps,
158 x_size=50,
159 use_masking=not args.no_masking,
160 use_baselines=not args.no_baselines,
161 z_what_size=args.encoder_latent_size,
162 use_cuda=args.cuda,
163 **model_args
164 )
165
166 vis = visdom.Visdom(env=args.visdom_env)
167 # Viz sample from prior.
168 if args.viz:
169 z, x = air.prior(5, z_pres_prior_p=partial(z_pres_prior_p, 0))
170 vis.images(draw_many(x, post_process_latents(z)))
171
172 t0 = time.time()
173 examples_to_viz = X[9:14]
174
175
176 # Do inference.
177 def per_param_optim_args(module_name, param_name, tags):
178 lr = 1e-3 if 'baseline' in tags else 1e-4
179 return {'lr': lr}
180
181
182 svi = SVI(air.model, air.guide,
183 optim.Adam(per_param_optim_args),
184 loss='ELBO',
185 trace_graph=True)
186
187 for i in range(args.num_steps):
188
189 loss = svi.step(X, args.batch_size, z_pres_prior_p=partial(z_pres_prior_p, i))
190
191 if i % args.progress_every == 0:
192 print('i={}, epochs={:.2f}, elapsed={:.2f}, elbo={:.2f}'.format(
193 i,
194 (i * args.batch_size) / X_size,
195 (time.time() - t0) / 3600,
196 loss / X_size))
197
198 if args.viz and (i + 1) % args.viz_every == 0:
199 trace = poutine.trace(air.guide).get_trace(examples_to_viz, 0)
200 z, recons = poutine.replay(air.prior, trace)(examples_to_viz.size(0))
201 z_wheres = post_process_latents(z)
202
203 # Show data with inferred objection positions.
204 vis.images(draw_many(examples_to_viz, z_wheres))
205 # Show reconstructions of data.
206 vis.images(draw_many(recons, z_wheres))
207
208 # TODO: Report accuracy on predictions of object counts.
209
[end of examples/air/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/air/main.py b/examples/air/main.py
--- a/examples/air/main.py
+++ b/examples/air/main.py
@@ -8,9 +8,11 @@
"""
import os
+import sys
import time
import argparse
from functools import partial
+from subprocess import check_call
import numpy as np
import torch
@@ -96,9 +98,10 @@
# Load data.
infile = './data/multi_mnist_train_uint8.npz'
if not os.path.exists(infile):
- print('Could not find the dataset at {}'.format(infile))
- print('Run "python multi_mnist.py" to generate it.')
- exit()
+ print('Running multi_mnist.py to generate dataset at {}...'.format(infile))
+ multi_mnist_py = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'multi_mnist.py')
+ check_call([sys.executable, multi_mnist_py])
+ print('Finished running multi_mnist.py.')
X_np = np.load(infile)['x'].astype(np.float32)
X_np /= 255.0
X = Variable(torch.from_numpy(X_np))
|
{"golden_diff": "diff --git a/examples/air/main.py b/examples/air/main.py\n--- a/examples/air/main.py\n+++ b/examples/air/main.py\n@@ -8,9 +8,11 @@\n \"\"\"\n \n import os\n+import sys\n import time\n import argparse\n from functools import partial\n+from subprocess import check_call\n import numpy as np\n \n import torch\n@@ -96,9 +98,10 @@\n # Load data.\n infile = './data/multi_mnist_train_uint8.npz'\n if not os.path.exists(infile):\n- print('Could not find the dataset at {}'.format(infile))\n- print('Run \"python multi_mnist.py\" to generate it.')\n- exit()\n+ print('Running multi_mnist.py to generate dataset at {}...'.format(infile))\n+ multi_mnist_py = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'multi_mnist.py')\n+ check_call([sys.executable, multi_mnist_py])\n+ print('Finished running multi_mnist.py.')\n X_np = np.load(infile)['x'].astype(np.float32)\n X_np /= 255.0\n X = Variable(torch.from_numpy(X_np))\n", "issue": "Allow testing of examples not parameterized by 'num_epochs'\nThe AIR example isn't run by the inference testing machinery, as it doesn't have a `--num-epochs` argument.\r\n\r\nI could add such an argument, but this isn't a very natural parameterization for this example. Also, running an entire epoch for each example seems likely to be overkill in some cases, including AIR. (Assuming an epoch is a pass through the whole dataset.)\r\n\r\nOne idea is that the test should look for examples with a dedicated`--ci-test` argument or similar. This is a bit more flexible, though we might pay for this with a little extra code in each example.\n", "before_files": [{"content": "\"\"\"\nThis attempts (currently unsuccessfully) to reproduce some of the\nresults on the multi-mnist data set described in [1].\n\n[1] Eslami, SM Ali, et al. \"Attend, infer, repeat: Fast scene\nunderstanding with generative models.\" Advances in Neural Information\nProcessing Systems. 2016.\n\"\"\"\n\nimport os\nimport time\nimport argparse\nfrom functools import partial\nimport numpy as np\n\nimport torch\nfrom torch.autograd import Variable\n\nimport pyro\nimport pyro.optim as optim\nimport pyro.poutine as poutine\nfrom pyro.infer import SVI\n\nimport visdom\n\nfrom air import AIR\nfrom viz import draw_many, post_process_latents\n\nparser = argparse.ArgumentParser(description=\"Pyro AIR example\", argument_default=argparse.SUPPRESS)\nparser.add_argument('-n', '--num-steps', type=int, default=int(1e8),\n help='number of optimization steps to take')\nparser.add_argument('-b', '--batch-size', type=int, default=64,\n help='batch size')\nparser.add_argument('--progress-every', type=int, default=1,\n help='number of steps between writing progress to stdout')\nparser.add_argument('--baseline-scalar', type=float,\n help='scale the output of the baseline nets by this value')\nparser.add_argument('--no-baselines', action='store_true', default=False,\n help='do not use data dependent baselines')\nparser.add_argument('--encoder-net', type=int, nargs='+', default=[200],\n help='encoder net hidden layer sizes')\nparser.add_argument('--decoder-net', type=int, nargs='+', default=[200],\n help='decoder net hidden layer sizes')\nparser.add_argument('--predict-net', type=int, nargs='+',\n help='predict net hidden layer sizes')\nparser.add_argument('--embed-net', type=int, nargs='+',\n help='embed net architecture')\nparser.add_argument('--bl-predict-net', type=int, nargs='+',\n help='baseline predict net hidden layer sizes')\nparser.add_argument('--non-linearity', type=str,\n help='non linearity to use throughout')\nparser.add_argument('--viz', action='store_true', default=False,\n help='generate vizualizations during optimization')\nparser.add_argument('--viz-every', type=int, default=100,\n help='number of steps between vizualizations')\nparser.add_argument('--visdom-env', default='main',\n help='visdom enviroment name')\n# parser.add_argument('--checkpoint', action='store_true',\n# help='periodically persist parameters')\n# parser.add_argument('--checkpoint-every', type=int, default=1000,\n# help='number of steps between checkpoints')\nparser.add_argument('--cuda', action='store_true', default=False,\n help='use cuda')\nparser.add_argument('-t', '--model-steps', type=int, default=3,\n help='number of time steps')\nparser.add_argument('--rnn-hidden-size', type=int, default=256,\n help='rnn hidden size')\nparser.add_argument('--encoder-latent-size', type=int, default=50,\n help='attention window encoder/decoder latent space size')\nparser.add_argument('--decoder-output-bias', type=float,\n help='bias added to decoder output (prior to applying non-linearity)')\nparser.add_argument('--window-size', type=int, default=28,\n help='attention window size')\nparser.add_argument('--z-pres-prior', type=float, default=None,\n help='prior success probability for z_pres')\nparser.add_argument('--anneal-prior', action='store_true', default=False,\n help='anneal z_pres prior during optimization')\nparser.add_argument('--anneal-prior-from', type=float, default=0.99,\n help='initial z_pres prior prob')\nparser.add_argument('--anneal-prior-over', type=int, default=100000,\n help='number of steps over which to anneal the prior')\nparser.add_argument('--no-masking', action='store_true', default=False,\n help='do not mask out the costs of unused choices')\nparser.add_argument('--fudge-z-pres', action='store_true', default=False,\n help='fudge z_pres to remove discreteness for testing')\nparser.add_argument('--seed', type=int, help='random seed', default=None)\nparser.add_argument('--print-modules', action='store_true',\n help='write the network architecture to stdout')\n\nargs = parser.parse_args()\n# print(args)\n\nif args.seed is not None:\n pyro.set_rng_seed(args.seed)\n\n# Load data.\ninfile = './data/multi_mnist_train_uint8.npz'\nif not os.path.exists(infile):\n print('Could not find the dataset at {}'.format(infile))\n print('Run \"python multi_mnist.py\" to generate it.')\n exit()\nX_np = np.load(infile)['x'].astype(np.float32)\nX_np /= 255.0\nX = Variable(torch.from_numpy(X_np))\nX_size = X.size(0)\nif args.cuda:\n X = X.cuda()\n\n\n# Yields the following distribution over the number of steps (when\n# taking a maximum of 3 steps):\n# p(0) = 0.4\n# p(1) = 0.3\n# p(2) = 0.2\n# p(3) = 0.1\ndef default_z_pres_prior_p(t):\n if t == 0:\n return 0.6\n elif t == 1:\n return 0.5\n else:\n return 0.33\n\n\n# Implements \"prior annealing\" as described in this blog post:\n# http://akosiorek.github.io/ml/2017/09/03/implementing-air.html\ndef annealed_z_pres_prior_p(opt_step, time_step):\n p_0 = args.anneal_prior_from\n p_final = args.z_pres_prior or default_z_pres_prior_p(time_step)\n s = min(opt_step / args.anneal_prior_over, 1.0)\n return p_final * s + p_0 * (1 - s)\n\n\ndef z_pres_prior_p(opt_step, time_step):\n if args.anneal_prior:\n return annealed_z_pres_prior_p(opt_step, time_step)\n elif args.z_pres_prior:\n return args.z_pres_prior\n else:\n return default_z_pres_prior_p(time_step)\n\n\nmodel_arg_keys = ['window_size',\n 'rnn_hidden_size',\n 'decoder_output_bias',\n 'baseline_scalar',\n 'encoder_net',\n 'decoder_net',\n 'predict_net',\n 'embed_net',\n 'bl_predict_net',\n 'non_linearity',\n 'fudge_z_pres',\n 'print_modules']\nmodel_args = {key: getattr(args, key) for key in model_arg_keys if key in args}\nair = AIR(\n num_steps=args.model_steps,\n x_size=50,\n use_masking=not args.no_masking,\n use_baselines=not args.no_baselines,\n z_what_size=args.encoder_latent_size,\n use_cuda=args.cuda,\n **model_args\n)\n\nvis = visdom.Visdom(env=args.visdom_env)\n# Viz sample from prior.\nif args.viz:\n z, x = air.prior(5, z_pres_prior_p=partial(z_pres_prior_p, 0))\n vis.images(draw_many(x, post_process_latents(z)))\n\nt0 = time.time()\nexamples_to_viz = X[9:14]\n\n\n# Do inference.\ndef per_param_optim_args(module_name, param_name, tags):\n lr = 1e-3 if 'baseline' in tags else 1e-4\n return {'lr': lr}\n\n\nsvi = SVI(air.model, air.guide,\n optim.Adam(per_param_optim_args),\n loss='ELBO',\n trace_graph=True)\n\nfor i in range(args.num_steps):\n\n loss = svi.step(X, args.batch_size, z_pres_prior_p=partial(z_pres_prior_p, i))\n\n if i % args.progress_every == 0:\n print('i={}, epochs={:.2f}, elapsed={:.2f}, elbo={:.2f}'.format(\n i,\n (i * args.batch_size) / X_size,\n (time.time() - t0) / 3600,\n loss / X_size))\n\n if args.viz and (i + 1) % args.viz_every == 0:\n trace = poutine.trace(air.guide).get_trace(examples_to_viz, 0)\n z, recons = poutine.replay(air.prior, trace)(examples_to_viz.size(0))\n z_wheres = post_process_latents(z)\n\n # Show data with inferred objection positions.\n vis.images(draw_many(examples_to_viz, z_wheres))\n # Show reconstructions of data.\n vis.images(draw_many(recons, z_wheres))\n\n # TODO: Report accuracy on predictions of object counts.\n", "path": "examples/air/main.py"}]}
| 3,146 | 254 |
gh_patches_debug_26977
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-781
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Registration form needs help text
To be a valid online user the user must register with their student emails. This is not indicated in the registration form.
</issue>
<code>
[start of apps/authentication/forms.py]
1 # -*- coding: utf-8 -*-
2
3 import datetime
4 import re
5
6 from django import forms
7 from django.contrib import auth
8 from django.utils.translation import ugettext as _
9
10 from apps.authentication.models import OnlineUser as User, Email
11
12 class LoginForm(forms.Form):
13 username = forms.CharField(widget=forms.TextInput(), label=_("Brukernavn"), max_length=50)
14 password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"Passord"))
15 user = None
16
17 def clean(self):
18 if self._errors:
19 return
20
21 user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])
22
23 if user:
24 if user.is_active:
25 self.user = user
26 else:
27 self._errors['username'] = self.error_class([_(u"Din konto er ikke aktiv. Forsøk gjenoppretning av passord.")])
28 else:
29 self._errors['username'] = self.error_class([_(u"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.")])
30 return self.cleaned_data
31
32 def login(self, request):
33 try:
34 User.objects.get(username=request.POST['username'])
35 except:
36 return False
37 if self.is_valid():
38 auth.login(request, self.user)
39 return True
40 return False
41
42 class RegisterForm(forms.Form):
43 username = forms.CharField(label=_("Brukernavn"), max_length=20)
44 first_name = forms.CharField(label=_("Fornavn"), max_length=50)
45 last_name = forms.CharField(label=_("Etternavn"), max_length=50)
46 email = forms.EmailField(label=_("Epost"), max_length=50)
47 password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("Passord"))
48 repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("Gjenta passord"))
49 address = forms.CharField(label=_("Adresse"), max_length=100, required=False)
50 zip_code = forms.CharField(label=_("Postnummer"), max_length=4, required=False)
51 phone = forms.CharField(label=_("Telefon"), max_length=20, required=False)
52
53 def clean(self):
54 super(RegisterForm, self).clean()
55 if self.is_valid():
56 cleaned_data = self.cleaned_data
57
58 # Check passwords
59 if cleaned_data['password'] != cleaned_data['repeat_password']:
60 self._errors['repeat_password'] = self.error_class([_(u"Passordene er ikke like.")])
61
62 # Check username
63 username = cleaned_data['username']
64 if User.objects.filter(username=username).count() > 0:
65 self._errors['username'] = self.error_class([_(u"Brukernavnet er allerede registrert.")])
66 if not re.match("^[a-zA-Z0-9_-]+$", username):
67 self._errors['username'] = self.error_class([_(u"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _")])
68
69 # Check email
70 email = cleaned_data['email'].lower()
71 if Email.objects.filter(email=email).count() > 0:
72 self._errors['email'] = self.error_class([_(u"Det fins allerede en bruker med denne epostadressen.")])
73
74 # ZIP code digits only
75 zip_code = cleaned_data['zip_code']
76 if len(zip_code) != 0:
77 if len(zip_code) != 4 or not zip_code.isdigit():
78 self._errors['zip_code'] = self.error_class([_(u"Postnummer må bestå av fire siffer.")])
79
80 return cleaned_data
81
82 class RecoveryForm(forms.Form):
83 email = forms.EmailField(label="Email", max_length=50)
84
85 class ChangePasswordForm(forms.Form):
86 new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"Nytt passord"))
87 repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"Gjenta passord"))
88
89 def clean(self):
90 super(ChangePasswordForm, self).clean()
91 if self.is_valid():
92 cleaned_data = self.cleaned_data
93
94 # Check passwords
95 if cleaned_data['new_password'] != cleaned_data['repeat_password']:
96 self._errors['repeat_password'] = self.error_class([_(u"Passordene er ikke like.")])
97
98 return cleaned_data
99
100
101 class NewEmailForm(forms.Form):
102 new_email = forms.EmailField(label=_(u"ny epostadresse"))
103
[end of apps/authentication/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/apps/authentication/forms.py b/apps/authentication/forms.py
--- a/apps/authentication/forms.py
+++ b/apps/authentication/forms.py
@@ -40,14 +40,14 @@
return False
class RegisterForm(forms.Form):
- username = forms.CharField(label=_("Brukernavn"), max_length=20)
- first_name = forms.CharField(label=_("Fornavn"), max_length=50)
+ username = forms.CharField(label=_("Brukernavn"), max_length=20, help_text=u'Valgfritt brukernavn')
+ first_name = forms.CharField(label=_("Fornavn"), max_length=50, help_text=u'Mellomnavn inkluderer du etter fornavnet ditt')
last_name = forms.CharField(label=_("Etternavn"), max_length=50)
- email = forms.EmailField(label=_("Epost"), max_length=50)
+ email = forms.EmailField(label=_("Epost"), max_length=50, help_text=u'Du kan legge til flere epostadresser senere i din profil.')
password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("Passord"))
repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("Gjenta passord"))
- address = forms.CharField(label=_("Adresse"), max_length=100, required=False)
- zip_code = forms.CharField(label=_("Postnummer"), max_length=4, required=False)
+ address = forms.CharField(label=_("Adresse"), max_length=100, required=False, widget=forms.Textarea(attrs={'rows':3}))
+ zip_code = forms.CharField(label=_("Postnummer"), max_length=4, required=False, help_text=u'Vi henter by basert på postnummer')
phone = forms.CharField(label=_("Telefon"), max_length=20, required=False)
def clean(self):
|
{"golden_diff": "diff --git a/apps/authentication/forms.py b/apps/authentication/forms.py\n--- a/apps/authentication/forms.py\n+++ b/apps/authentication/forms.py\n@@ -40,14 +40,14 @@\n return False\n \n class RegisterForm(forms.Form):\n- username = forms.CharField(label=_(\"Brukernavn\"), max_length=20)\n- first_name = forms.CharField(label=_(\"Fornavn\"), max_length=50)\n+ username = forms.CharField(label=_(\"Brukernavn\"), max_length=20, help_text=u'Valgfritt brukernavn')\n+ first_name = forms.CharField(label=_(\"Fornavn\"), max_length=50, help_text=u'Mellomnavn inkluderer du etter fornavnet ditt')\n last_name = forms.CharField(label=_(\"Etternavn\"), max_length=50)\n- email = forms.EmailField(label=_(\"Epost\"), max_length=50)\n+ email = forms.EmailField(label=_(\"Epost\"), max_length=50, help_text=u'Du kan legge til flere epostadresser senere i din profil.')\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"Passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"Gjenta passord\"))\n- address = forms.CharField(label=_(\"Adresse\"), max_length=100, required=False)\n- zip_code = forms.CharField(label=_(\"Postnummer\"), max_length=4, required=False)\n+ address = forms.CharField(label=_(\"Adresse\"), max_length=100, required=False, widget=forms.Textarea(attrs={'rows':3}))\n+ zip_code = forms.CharField(label=_(\"Postnummer\"), max_length=4, required=False, help_text=u'Vi henter by basert p\u00e5 postnummer')\n phone = forms.CharField(label=_(\"Telefon\"), max_length=20, required=False)\n \n def clean(self):\n", "issue": "Registration form needs help text\nTo be a valid online user the user must register with their student emails. This is not indicated in the registration form.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser as User, Email\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(), label=_(\"Brukernavn\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"Passord\"))\n user = None\n\n def clean(self):\n if self._errors:\n return\n \n user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])\n\n if user:\n if user.is_active:\n self.user = user\n else:\n self._errors['username'] = self.error_class([_(u\"Din konto er ikke aktiv. Fors\u00f8k gjenoppretning av passord.\")])\n else:\n self._errors['username'] = self.error_class([_(u\"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.\")])\n return self.cleaned_data\n\n def login(self, request):\n try:\n User.objects.get(username=request.POST['username'])\n except:\n return False\n if self.is_valid():\n auth.login(request, self.user)\n return True\n return False\n\nclass RegisterForm(forms.Form):\n username = forms.CharField(label=_(\"Brukernavn\"), max_length=20)\n first_name = forms.CharField(label=_(\"Fornavn\"), max_length=50)\n last_name = forms.CharField(label=_(\"Etternavn\"), max_length=50)\n email = forms.EmailField(label=_(\"Epost\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"Passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"Gjenta passord\"))\n address = forms.CharField(label=_(\"Adresse\"), max_length=100, required=False)\n zip_code = forms.CharField(label=_(\"Postnummer\"), max_length=4, required=False)\n phone = forms.CharField(label=_(\"Telefon\"), max_length=20, required=False)\n \n def clean(self):\n super(RegisterForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([_(u\"Passordene er ikke like.\")])\n\n # Check username\n username = cleaned_data['username']\n if User.objects.filter(username=username).count() > 0:\n self._errors['username'] = self.error_class([_(u\"Brukernavnet er allerede registrert.\")])\n if not re.match(\"^[a-zA-Z0-9_-]+$\", username):\n self._errors['username'] = self.error_class([_(u\"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _\")])\n\n # Check email\n email = cleaned_data['email'].lower()\n if Email.objects.filter(email=email).count() > 0:\n self._errors['email'] = self.error_class([_(u\"Det fins allerede en bruker med denne epostadressen.\")])\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 0:\n if len(zip_code) != 4 or not zip_code.isdigit():\n self._errors['zip_code'] = self.error_class([_(u\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data \n\nclass RecoveryForm(forms.Form):\n email = forms.EmailField(label=\"Email\", max_length=50)\n\nclass ChangePasswordForm(forms.Form):\n new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"Nytt passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"Gjenta passord\"))\n\n def clean(self):\n super(ChangePasswordForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['new_password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([_(u\"Passordene er ikke like.\")])\n\n return cleaned_data\n\n\nclass NewEmailForm(forms.Form):\n new_email = forms.EmailField(label=_(u\"ny epostadresse\"))\n", "path": "apps/authentication/forms.py"}]}
| 1,744 | 402 |
gh_patches_debug_8005
|
rasdani/github-patches
|
git_diff
|
Cloud-CV__EvalAI-1881
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove RabbitMQ config settings
Since we have removed RabbitMQ as a message broker, we would also like to remove the settings for it in the `settings/common.py`.
</issue>
<code>
[start of settings/common.py]
1 """
2 Django settings for evalai project.
3
4 Generated by 'django-admin startproject' using Django 1.10.2.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.10/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.10/ref/settings/
11 """
12
13 import datetime
14 import os
15 import sys
16
17 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
18 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
19 APPS_DIR = os.path.join(BASE_DIR, 'apps')
20
21 sys.path.append(APPS_DIR)
22
23 # Quick-start development settings - unsuitable for production
24 # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
25
26 # SECURITY WARNING: keep the secret key used in production secret!
27 SECRET_KEY = os.environ.get('SECRET_KEY', 'random_secret_key')
28
29 # SECURITY WARNING: don't run with debug turned on in production!
30 DEBUG = True
31
32 TEST = False
33
34 ALLOWED_HOSTS = []
35
36
37 # Application definition
38
39 DEFAULT_APPS = [
40 'django.contrib.admin',
41 'django.contrib.auth',
42 'django.contrib.contenttypes',
43 'django.contrib.sessions',
44 'django.contrib.messages',
45 'django.contrib.staticfiles',
46 'django.contrib.sites',
47 ]
48
49 OUR_APPS = [
50 'accounts',
51 'analytics',
52 'base',
53 'challenges',
54 'hosts',
55 'jobs',
56 'participants',
57 'web',
58 ]
59
60 THIRD_PARTY_APPS = [
61 'allauth',
62 'allauth.account',
63 'corsheaders',
64 'django_ses',
65 'import_export',
66 'rest_auth',
67 'rest_auth.registration',
68 'rest_framework.authtoken',
69 'rest_framework',
70 'rest_framework_docs',
71 'rest_framework_expiring_authtoken',
72 'drf_yasg',
73 ]
74
75 INSTALLED_APPS = DEFAULT_APPS + OUR_APPS + THIRD_PARTY_APPS
76
77 MIDDLEWARE = [
78 'corsheaders.middleware.CorsMiddleware',
79 'django.middleware.security.SecurityMiddleware',
80 'django.contrib.sessions.middleware.SessionMiddleware',
81 'django.middleware.common.CommonMiddleware',
82 'django.middleware.csrf.CsrfViewMiddleware',
83 'django.contrib.auth.middleware.AuthenticationMiddleware',
84 'django.contrib.messages.middleware.MessageMiddleware',
85 'django.middleware.clickjacking.XFrameOptionsMiddleware',
86 ]
87
88 ROOT_URLCONF = 'evalai.urls'
89
90
91 TEMPLATES = [
92 {
93 'BACKEND': 'django.template.backends.django.DjangoTemplates',
94 'DIRS': [],
95 'APP_DIRS': True,
96 'OPTIONS': {
97 'context_processors': [
98 'django.template.context_processors.debug',
99 'django.template.context_processors.request',
100 'django.contrib.auth.context_processors.auth',
101 'django.contrib.messages.context_processors.messages',
102 ],
103 },
104 },
105 ]
106
107 WSGI_APPLICATION = 'evalai.wsgi.application'
108
109
110 # Password validation
111 # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
112
113 AUTH_PASSWORD_VALIDATORS = [
114 {
115 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # noqa
116 },
117 {
118 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # noqa
119 },
120 {
121 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # noqa
122 },
123 {
124 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # noqa
125 },
126 ]
127
128
129 # Internationalization
130 # https://docs.djangoproject.com/en/1.10/topics/i18n/
131
132 LANGUAGE_CODE = 'en-us'
133
134 TIME_ZONE = 'UTC'
135
136 USE_I18N = True
137
138 USE_L10N = True
139
140 USE_TZ = True
141
142 # Static files (CSS, JavaScript, Images)
143 # https://docs.djangoproject.com/en/1.10/howto/static-files/
144
145 STATIC_URL = '/static/'
146 STATIC_ROOT = os.path.join(BASE_DIR, 'static')
147 MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
148 MEDIA_URL = "/media/"
149
150 SITE_ID = 1
151
152 REST_FRAMEWORK = {
153 'DEFAULT_PAGINATION_CLASS': (
154 'rest_framework.pagination.LimitOffsetPagination'),
155 'PAGE_SIZE': 10,
156 'DEFAULT_PERMISSION_CLASSES': [
157 'rest_framework.permissions.IsAuthenticatedOrReadOnly'
158 ],
159 'DEFAULT_AUTHENTICATION_CLASSES': [
160 'rest_framework_expiring_authtoken.authentication.ExpiringTokenAuthentication',
161 ],
162 'TEST_REQUEST_DEFAULT_FORMAT': 'json',
163 'DEFAULT_THROTTLE_CLASSES': (
164 'rest_framework.throttling.AnonRateThrottle',
165 'rest_framework.throttling.UserRateThrottle'
166 ),
167 'DEFAULT_THROTTLE_RATES': {
168 'anon': '100/minute',
169 'user': '100/minute'
170 },
171 'DEFAULT_RENDERER_CLASSES': (
172 'rest_framework.renderers.JSONRenderer',
173 )
174 }
175
176 # ALLAUTH SETTINGS
177 ACCOUNT_EMAIL_REQUIRED = True
178 OLD_PASSWORD_FIELD_ENABLED = True
179 ACCOUNT_CONFIRM_EMAIL_ON_GET = True
180 ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = '/api/auth/email-confirmed/'
181 ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/api/auth/email-confirmed/'
182
183 AUTHENTICATION_BACKENDS = (
184 # Needed to login by username in Django admin, regardless of `allauth`
185 'django.contrib.auth.backends.ModelBackend',
186 # `allauth` specific authentication methods, such as login by e-mail
187 'allauth.account.auth_backends.AuthenticationBackend',
188 )
189
190 # CORS Settings
191 CORS_ORIGIN_ALLOW_ALL = True
192
193 # REST Framework Expiring Tokens Configuration
194 EXPIRING_TOKEN_LIFESPAN = datetime.timedelta(days=7)
195
196 # Logging
197 LOGGING = {
198 'version': 1,
199 'disable_existing_loggers': False,
200 'root': {
201 'level': 'INFO',
202 'handlers': ['console'],
203 },
204 'filters': {
205 'require_debug_false': {
206 '()': 'django.utils.log.RequireDebugFalse',
207 },
208 'require_debug_true': {
209 '()': 'django.utils.log.RequireDebugTrue',
210 }
211 },
212 'formatters': {
213 'simple': {
214 'format': '[%(asctime)s] %(levelname)s %(message)s',
215 'datefmt': '%Y-%m-%d %H:%M:%S'
216 },
217 'verbose': {
218 'format': '[%(asctime)s] %(levelname)s %(module)s %(message)s',
219 'datefmt': '%Y-%m-%d %H:%M:%S'
220 }
221 },
222 'handlers': {
223 'console': {
224 'level': 'INFO',
225 'filters': ['require_debug_true'],
226 'class': 'logging.StreamHandler',
227 'formatter': 'simple'
228 },
229 'logfile': {
230 'level': 'DEBUG',
231 'class': 'logging.handlers.RotatingFileHandler',
232 'filename': os.path.join(BASE_DIR, 'django.log'),
233 'maxBytes': 50000,
234 'backupCount': 10,
235 'formatter': 'verbose'
236 },
237 'mail_admins': {
238 'level': 'ERROR',
239 'class': 'django.utils.log.AdminEmailHandler',
240 'filters': ['require_debug_false'],
241 }
242 },
243 'loggers': {
244 'django': {
245 'handlers': ['console'],
246 'propagate': False,
247 },
248 'django.request': {
249 'handlers': ['mail_admins'],
250 'level': 'ERROR',
251 'propagate': False,
252 },
253 'django.security': {
254 'handlers': ['mail_admins'],
255 'level': 'ERROR',
256 'propagate': False,
257 },
258 'django.db.backends': {
259 'handlers': ['mail_admins'],
260 'level': 'ERROR',
261 'propagate': False,
262 }
263 }
264 }
265
266 CACHES = {
267 'default': {
268 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
269 }
270 }
271
272 RABBITMQ_PARAMETERS = {
273 'HOST': os.environ.get("RABBITMQ_HOST", 'localhost'),
274 'EVALAI_EXCHANGE': {
275 'NAME': 'evalai_submissions',
276 'TYPE': 'topic',
277 },
278 'SUBMISSION_QUEUE': 'submission_task_queue',
279 }
280
281 # The maximum size in bytes for request body
282 # https://docs.djangoproject.com/en/1.10/ref/settings/#data-upload-max-memory-size
283 FILE_UPLOAD_MAX_MEMORY_SIZE = 524288000 # 500 MB
284 DATA_UPLOAD_MAX_MEMORY_SIZE = 524288000 # 500 MB
285
286 # To make usermame field read-only, customized serializer is defined.
287 REST_AUTH_SERIALIZERS = {
288 'USER_DETAILS_SERIALIZER': 'accounts.serializers.ProfileSerializer',
289 }
290
291 # For inviting users to participant and host teams.
292 ADMIN_EMAIL = "[email protected]"
293 CLOUDCV_TEAM_EMAIL = "EvalAI Team <[email protected]>"
294
295 SWAGGER_SETTINGS = {
296 'DEFAULT_INFO': 'evalai.urls.swagger_api_info',
297 'SECURITY_DEFINITIONS': {
298 'Token Authentication': {
299 'type': 'apiKey',
300 'name': 'Authorization',
301 'in': 'header'
302 },
303 }
304 }
305
306 REDOC_SETTINGS = {
307 'SPEC_URL': ('docs.yaml', {'format': '.yaml'}),
308 }
309
[end of settings/common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/settings/common.py b/settings/common.py
--- a/settings/common.py
+++ b/settings/common.py
@@ -269,15 +269,6 @@
}
}
-RABBITMQ_PARAMETERS = {
- 'HOST': os.environ.get("RABBITMQ_HOST", 'localhost'),
- 'EVALAI_EXCHANGE': {
- 'NAME': 'evalai_submissions',
- 'TYPE': 'topic',
- },
- 'SUBMISSION_QUEUE': 'submission_task_queue',
-}
-
# The maximum size in bytes for request body
# https://docs.djangoproject.com/en/1.10/ref/settings/#data-upload-max-memory-size
FILE_UPLOAD_MAX_MEMORY_SIZE = 524288000 # 500 MB
|
{"golden_diff": "diff --git a/settings/common.py b/settings/common.py\n--- a/settings/common.py\n+++ b/settings/common.py\n@@ -269,15 +269,6 @@\n }\n }\n \n-RABBITMQ_PARAMETERS = {\n- 'HOST': os.environ.get(\"RABBITMQ_HOST\", 'localhost'),\n- 'EVALAI_EXCHANGE': {\n- 'NAME': 'evalai_submissions',\n- 'TYPE': 'topic',\n- },\n- 'SUBMISSION_QUEUE': 'submission_task_queue',\n-}\n-\n # The maximum size in bytes for request body\n # https://docs.djangoproject.com/en/1.10/ref/settings/#data-upload-max-memory-size\n FILE_UPLOAD_MAX_MEMORY_SIZE = 524288000 # 500 MB\n", "issue": "Remove RabbitMQ config settings\nSince we have removed RabbitMQ as a message broker, we would also like to remove the settings for it in the `settings/common.py`.\n", "before_files": [{"content": "\"\"\"\nDjango settings for evalai project.\n\nGenerated by 'django-admin startproject' using Django 1.10.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\n\nimport datetime\nimport os\nimport sys\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nAPPS_DIR = os.path.join(BASE_DIR, 'apps')\n\nsys.path.append(APPS_DIR)\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'random_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEST = False\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nDEFAULT_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n]\n\nOUR_APPS = [\n 'accounts',\n 'analytics',\n 'base',\n 'challenges',\n 'hosts',\n 'jobs',\n 'participants',\n 'web',\n]\n\nTHIRD_PARTY_APPS = [\n 'allauth',\n 'allauth.account',\n 'corsheaders',\n 'django_ses',\n 'import_export',\n 'rest_auth',\n 'rest_auth.registration',\n 'rest_framework.authtoken',\n 'rest_framework',\n 'rest_framework_docs',\n 'rest_framework_expiring_authtoken',\n 'drf_yasg',\n]\n\nINSTALLED_APPS = DEFAULT_APPS + OUR_APPS + THIRD_PARTY_APPS\n\nMIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'evalai.urls'\n\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'evalai.wsgi.application'\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # noqa\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # noqa\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # noqa\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # noqa\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.10/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.10/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = \"/media/\"\n\nSITE_ID = 1\n\nREST_FRAMEWORK = {\n 'DEFAULT_PAGINATION_CLASS': (\n 'rest_framework.pagination.LimitOffsetPagination'),\n 'PAGE_SIZE': 10,\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticatedOrReadOnly'\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework_expiring_authtoken.authentication.ExpiringTokenAuthentication',\n ],\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n 'DEFAULT_THROTTLE_CLASSES': (\n 'rest_framework.throttling.AnonRateThrottle',\n 'rest_framework.throttling.UserRateThrottle'\n ),\n 'DEFAULT_THROTTLE_RATES': {\n 'anon': '100/minute',\n 'user': '100/minute'\n },\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n )\n}\n\n# ALLAUTH SETTINGS\nACCOUNT_EMAIL_REQUIRED = True\nOLD_PASSWORD_FIELD_ENABLED = True\nACCOUNT_CONFIRM_EMAIL_ON_GET = True\nACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = '/api/auth/email-confirmed/'\nACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/api/auth/email-confirmed/'\n\nAUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# REST Framework Expiring Tokens Configuration\nEXPIRING_TOKEN_LIFESPAN = datetime.timedelta(days=7)\n\n# Logging\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'root': {\n 'level': 'INFO',\n 'handlers': ['console'],\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse',\n },\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n }\n },\n 'formatters': {\n 'simple': {\n 'format': '[%(asctime)s] %(levelname)s %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S'\n },\n 'verbose': {\n 'format': '[%(asctime)s] %(levelname)s %(module)s %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'filters': ['require_debug_true'],\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'logfile': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': os.path.join(BASE_DIR, 'django.log'),\n 'maxBytes': 50000,\n 'backupCount': 10,\n 'formatter': 'verbose'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler',\n 'filters': ['require_debug_false'],\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'django.security': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'django.db.backends': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': False,\n }\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n }\n}\n\nRABBITMQ_PARAMETERS = {\n 'HOST': os.environ.get(\"RABBITMQ_HOST\", 'localhost'),\n 'EVALAI_EXCHANGE': {\n 'NAME': 'evalai_submissions',\n 'TYPE': 'topic',\n },\n 'SUBMISSION_QUEUE': 'submission_task_queue',\n}\n\n# The maximum size in bytes for request body\n# https://docs.djangoproject.com/en/1.10/ref/settings/#data-upload-max-memory-size\nFILE_UPLOAD_MAX_MEMORY_SIZE = 524288000 # 500 MB\nDATA_UPLOAD_MAX_MEMORY_SIZE = 524288000 # 500 MB\n\n# To make usermame field read-only, customized serializer is defined.\nREST_AUTH_SERIALIZERS = {\n 'USER_DETAILS_SERIALIZER': 'accounts.serializers.ProfileSerializer',\n}\n\n# For inviting users to participant and host teams.\nADMIN_EMAIL = \"[email protected]\"\nCLOUDCV_TEAM_EMAIL = \"EvalAI Team <[email protected]>\"\n\nSWAGGER_SETTINGS = {\n 'DEFAULT_INFO': 'evalai.urls.swagger_api_info',\n 'SECURITY_DEFINITIONS': {\n 'Token Authentication': {\n 'type': 'apiKey',\n 'name': 'Authorization',\n 'in': 'header'\n },\n }\n}\n\nREDOC_SETTINGS = {\n 'SPEC_URL': ('docs.yaml', {'format': '.yaml'}),\n}\n", "path": "settings/common.py"}]}
| 3,406 | 170 |
gh_patches_debug_39715
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-443
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor start page context creation
I got a strange error on the Start page:
```
Traceback (most recent call last):
File "/home/calitp/.local/lib/python3.9/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/home/calitp/.local/lib/python3.9/site-packages/django/core/handlers/base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/home/calitp/.local/lib/python3.9/site-packages/django/utils/decorators.py", line 130, in _wrapped_view
response = view_func(request, *args, **kwargs)
File "/home/calitp/.local/lib/python3.9/site-packages/django/utils/decorators.py", line 130, in _wrapped_view
response = view_func(request, *args, **kwargs)
File "/home/calitp/app/benefits/eligibility/views.py", line 127, in start
ctx["media"].insert(0, auth_media)
Exception Type: UnboundLocalError at /eligibility/start
Exception Value: local variable 'auth_media' referenced before assignment
```
Maybe now that the media list refactor and the login button are merged into dev, we can clean up the logic and context creation for this view?
## Steps to recreate
1. From start page, click "Continue with Login.gov" button
2. Sign in with Login.gov account
3. Land on confirm page (DMV form)
4. Change URL manually to `/eligibility` (the verifier select page)
5. Select "Senior Discount Program", click "Continue"
6. 💥
</issue>
<code>
[start of benefits/eligibility/views.py]
1 """
2 The eligibility application: view definitions for the eligibility verification flow.
3 """
4 from django.contrib import messages
5 from django.shortcuts import redirect
6 from django.template.response import TemplateResponse
7 from django.urls import reverse
8 from django.utils.decorators import decorator_from_middleware
9 from django.utils.translation import pgettext, gettext as _
10
11 from benefits.core import middleware, recaptcha, session, viewmodels
12 from benefits.core.models import EligibilityVerifier
13 from benefits.core.views import PageTemplateResponse
14 from benefits.settings import OAUTH_CLIENT_NAME
15 from . import analytics, api, forms
16
17
18 @decorator_from_middleware(middleware.AgencySessionRequired)
19 def index(request):
20 """View handler for the eligibility verifier selection form."""
21
22 session.update(request, eligibility_types=[], origin=reverse("eligibility:index"))
23 agency = session.agency(request)
24
25 eligibility_start = reverse("eligibility:start")
26
27 page = viewmodels.Page(
28 title=_("eligibility.pages.index.title"),
29 content_title=_("eligibility.pages.index.content_title"),
30 forms=forms.EligibilityVerifierSelectionForm(agency=agency),
31 )
32
33 if request.method == "POST":
34 form = forms.EligibilityVerifierSelectionForm(data=request.POST, agency=agency)
35
36 if form.is_valid():
37 verifier_id = form.cleaned_data.get("verifier")
38 verifier = EligibilityVerifier.objects.get(id=verifier_id)
39 session.update(request, verifier=verifier)
40
41 response = redirect(eligibility_start)
42 else:
43 # form was not valid, allow for correction/resubmission
44 page.forms = [form]
45 response = PageTemplateResponse(request, page)
46 else:
47 if agency.eligibility_verifiers.count() == 1:
48 verifier = agency.eligibility_verifiers.first()
49 session.update(request, verifier=verifier)
50 response = redirect(eligibility_start)
51 else:
52 response = PageTemplateResponse(request, page)
53
54 return response
55
56
57 @decorator_from_middleware(middleware.AgencySessionRequired)
58 @decorator_from_middleware(middleware.VerifierSessionRequired)
59 def start(request):
60 """View handler for the eligibility verification getting started screen."""
61
62 session.update(request, eligibility_types=[], origin=reverse("eligibility:start"))
63 verifier = session.verifier(request)
64
65 if verifier.requires_authentication and not session.auth(request):
66 if OAUTH_CLIENT_NAME is None:
67 raise Exception("EligibilityVerifier requires authentication, but OAUTH_CLIENT_NAME is None")
68
69 auth_provider = verifier.auth_provider
70 button = viewmodels.Button.external(
71 text=_(auth_provider.sign_in_button_label),
72 url=reverse("oauth:login"),
73 id="login",
74 )
75 auth_media = dict(
76 icon=viewmodels.Icon("idscreencheck", pgettext("image alt text", "core.icons.idscreencheck")),
77 heading=_("eligibility.media.heading"),
78 details=_("eligibility.media.details"),
79 links=[
80 viewmodels.Button.link(
81 classes="btn-text btn-link",
82 text=_("eligibility.media.link_text"),
83 url=_("eligibility.media.link_url"),
84 target="_blank",
85 rel="noopener noreferrer",
86 )
87 ],
88 )
89 else:
90 button = viewmodels.Button.primary(text=_("eligibility.buttons.continue"), url=reverse("eligibility:confirm"))
91
92 page = viewmodels.Page(
93 title=_("eligibility.pages.start.title"),
94 noimage=True,
95 paragraphs=[_(verifier.start_blurb)],
96 button=button,
97 )
98
99 ctx = page.context_dict()
100 ctx["title"] = _(verifier.start_content_title)
101 ctx["media"] = [
102 dict(
103 icon=viewmodels.Icon("idcardcheck", pgettext("image alt text", "core.icons.idcardcheck")),
104 heading=_(verifier.start_item_name),
105 details=_(verifier.start_item_description),
106 ),
107 dict(
108 icon=viewmodels.Icon("bankcardcheck", pgettext("image alt text", "core.icons.bankcardcheck")),
109 heading=_("eligibility.pages.start.items[1].title"),
110 details=_("eligibility.pages.start.items[1].text"),
111 links=[
112 viewmodels.Button.link(
113 classes="btn-text btn-link",
114 text=_("eligibility.pages.start.items[1].button[0].link"),
115 url=_("eligibility.pages.start.items[1].button[0].url"),
116 ),
117 viewmodels.Button.link(
118 classes="btn-text btn-link",
119 text=_("eligibility.pages.start.items[1].button[1].link"),
120 url=_("eligibility.pages.start.items[1].button[1].url"),
121 ),
122 ],
123 ),
124 ]
125
126 if verifier.requires_authentication:
127 ctx["media"].insert(0, auth_media)
128
129 return TemplateResponse(request, "eligibility/start.html", ctx)
130
131
132 @decorator_from_middleware(middleware.AgencySessionRequired)
133 @decorator_from_middleware(middleware.RateLimit)
134 @decorator_from_middleware(middleware.VerifierSessionRequired)
135 def confirm(request):
136 """View handler for the eligibility verification form."""
137
138 verifier = session.verifier(request)
139
140 page = viewmodels.Page(
141 title=_(verifier.form_title),
142 content_title=_(verifier.form_content_title),
143 paragraphs=[_(verifier.form_blurb)],
144 form=forms.EligibilityVerificationForm(auto_id=True, label_suffix="", verifier=verifier),
145 classes="text-lg-center",
146 )
147
148 if request.method == "POST":
149 analytics.started_eligibility(request)
150
151 form = forms.EligibilityVerificationForm(data=request.POST, verifier=verifier)
152 response = _verify(request, form)
153
154 if response is None:
155 # form was not valid, allow for correction/resubmission
156 analytics.returned_error(request, form.errors)
157 page.forms = [form]
158 response = PageTemplateResponse(request, page)
159 elif session.eligible(request):
160 eligibility = session.eligibility(request)
161 response = verified(request, [eligibility.name])
162 else:
163 response = PageTemplateResponse(request, page)
164
165 return response
166
167
168 def _verify(request, form):
169 """Helper calls the eligibility verification API with user input."""
170
171 if not form.is_valid():
172 if recaptcha.has_error(form):
173 messages.error(request, "Recaptcha failed. Please try again.")
174 return None
175
176 sub, name = form.cleaned_data.get("sub"), form.cleaned_data.get("name")
177
178 agency = session.agency(request)
179 verifier = session.verifier(request)
180 client = api.Client(agency, verifier)
181
182 response = client.verify(sub, name)
183
184 if response.error and any(response.error):
185 form.add_api_errors(response.error)
186 return None
187 elif any(response.eligibility):
188 return verified(request, response.eligibility)
189 else:
190 return unverified(request)
191
192
193 @decorator_from_middleware(middleware.AgencySessionRequired)
194 def verified(request, verified_types):
195 """View handler for the verified eligibility page."""
196
197 analytics.returned_success(request)
198
199 enrollment_index = reverse("enrollment:index")
200 session.update(request, eligibility_types=verified_types, origin=enrollment_index)
201
202 return redirect(enrollment_index)
203
204
205 @decorator_from_middleware(middleware.AgencySessionRequired)
206 @decorator_from_middleware(middleware.VerifierSessionRequired)
207 def unverified(request):
208 """View handler for the unverified eligibility page."""
209
210 analytics.returned_fail(request)
211
212 # tel: link to agency phone number
213 agency = session.agency(request)
214 buttons = viewmodels.Button.agency_contact_links(agency)
215
216 verifier = session.verifier(request)
217
218 page = viewmodels.Page(
219 title=_(verifier.unverified_title),
220 content_title=_(verifier.unverified_content_title),
221 icon=viewmodels.Icon("idcardquestion", pgettext("image alt text", "core.icons.idcardquestion")),
222 paragraphs=[_(verifier.unverified_blurb), _("eligibility.pages.unverified.p[1]")],
223 buttons=buttons,
224 classes="text-lg-center",
225 )
226
227 return PageTemplateResponse(request, page)
228
[end of benefits/eligibility/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/benefits/eligibility/views.py b/benefits/eligibility/views.py
--- a/benefits/eligibility/views.py
+++ b/benefits/eligibility/views.py
@@ -62,43 +62,8 @@
session.update(request, eligibility_types=[], origin=reverse("eligibility:start"))
verifier = session.verifier(request)
- if verifier.requires_authentication and not session.auth(request):
- if OAUTH_CLIENT_NAME is None:
- raise Exception("EligibilityVerifier requires authentication, but OAUTH_CLIENT_NAME is None")
-
- auth_provider = verifier.auth_provider
- button = viewmodels.Button.external(
- text=_(auth_provider.sign_in_button_label),
- url=reverse("oauth:login"),
- id="login",
- )
- auth_media = dict(
- icon=viewmodels.Icon("idscreencheck", pgettext("image alt text", "core.icons.idscreencheck")),
- heading=_("eligibility.media.heading"),
- details=_("eligibility.media.details"),
- links=[
- viewmodels.Button.link(
- classes="btn-text btn-link",
- text=_("eligibility.media.link_text"),
- url=_("eligibility.media.link_url"),
- target="_blank",
- rel="noopener noreferrer",
- )
- ],
- )
- else:
- button = viewmodels.Button.primary(text=_("eligibility.buttons.continue"), url=reverse("eligibility:confirm"))
-
- page = viewmodels.Page(
- title=_("eligibility.pages.start.title"),
- noimage=True,
- paragraphs=[_(verifier.start_blurb)],
- button=button,
- )
-
- ctx = page.context_dict()
- ctx["title"] = _(verifier.start_content_title)
- ctx["media"] = [
+ button = viewmodels.Button.primary(text=_("eligibility.buttons.continue"), url=reverse("eligibility:confirm"))
+ media = [
dict(
icon=viewmodels.Icon("idcardcheck", pgettext("image alt text", "core.icons.idcardcheck")),
heading=_(verifier.start_item_name),
@@ -124,7 +89,44 @@
]
if verifier.requires_authentication:
- ctx["media"].insert(0, auth_media)
+ if OAUTH_CLIENT_NAME is None:
+ raise Exception("EligibilityVerifier requires authentication, but OAUTH_CLIENT_NAME is None")
+
+ media.insert(
+ 0,
+ dict(
+ icon=viewmodels.Icon("idscreencheck", pgettext("image alt text", "core.icons.idscreencheck")),
+ heading=_("eligibility.media.heading"),
+ details=_("eligibility.media.details"),
+ links=[
+ viewmodels.Button.link(
+ classes="btn-text btn-link",
+ text=_("eligibility.media.link_text"),
+ url=_("eligibility.media.link_url"),
+ target="_blank",
+ rel="noopener noreferrer",
+ )
+ ],
+ ),
+ )
+
+ if not session.auth(request):
+ button = viewmodels.Button.external(
+ text=_(verifier.auth_provider.sign_in_button_label),
+ url=reverse("oauth:login"),
+ id="login",
+ )
+
+ page = viewmodels.Page(
+ title=_("eligibility.pages.start.title"),
+ noimage=True,
+ paragraphs=[_(verifier.start_blurb)],
+ button=button,
+ )
+
+ ctx = page.context_dict()
+ ctx["title"] = _(verifier.start_content_title)
+ ctx["media"] = media
return TemplateResponse(request, "eligibility/start.html", ctx)
|
{"golden_diff": "diff --git a/benefits/eligibility/views.py b/benefits/eligibility/views.py\n--- a/benefits/eligibility/views.py\n+++ b/benefits/eligibility/views.py\n@@ -62,43 +62,8 @@\n session.update(request, eligibility_types=[], origin=reverse(\"eligibility:start\"))\n verifier = session.verifier(request)\n \n- if verifier.requires_authentication and not session.auth(request):\n- if OAUTH_CLIENT_NAME is None:\n- raise Exception(\"EligibilityVerifier requires authentication, but OAUTH_CLIENT_NAME is None\")\n-\n- auth_provider = verifier.auth_provider\n- button = viewmodels.Button.external(\n- text=_(auth_provider.sign_in_button_label),\n- url=reverse(\"oauth:login\"),\n- id=\"login\",\n- )\n- auth_media = dict(\n- icon=viewmodels.Icon(\"idscreencheck\", pgettext(\"image alt text\", \"core.icons.idscreencheck\")),\n- heading=_(\"eligibility.media.heading\"),\n- details=_(\"eligibility.media.details\"),\n- links=[\n- viewmodels.Button.link(\n- classes=\"btn-text btn-link\",\n- text=_(\"eligibility.media.link_text\"),\n- url=_(\"eligibility.media.link_url\"),\n- target=\"_blank\",\n- rel=\"noopener noreferrer\",\n- )\n- ],\n- )\n- else:\n- button = viewmodels.Button.primary(text=_(\"eligibility.buttons.continue\"), url=reverse(\"eligibility:confirm\"))\n-\n- page = viewmodels.Page(\n- title=_(\"eligibility.pages.start.title\"),\n- noimage=True,\n- paragraphs=[_(verifier.start_blurb)],\n- button=button,\n- )\n-\n- ctx = page.context_dict()\n- ctx[\"title\"] = _(verifier.start_content_title)\n- ctx[\"media\"] = [\n+ button = viewmodels.Button.primary(text=_(\"eligibility.buttons.continue\"), url=reverse(\"eligibility:confirm\"))\n+ media = [\n dict(\n icon=viewmodels.Icon(\"idcardcheck\", pgettext(\"image alt text\", \"core.icons.idcardcheck\")),\n heading=_(verifier.start_item_name),\n@@ -124,7 +89,44 @@\n ]\n \n if verifier.requires_authentication:\n- ctx[\"media\"].insert(0, auth_media)\n+ if OAUTH_CLIENT_NAME is None:\n+ raise Exception(\"EligibilityVerifier requires authentication, but OAUTH_CLIENT_NAME is None\")\n+\n+ media.insert(\n+ 0,\n+ dict(\n+ icon=viewmodels.Icon(\"idscreencheck\", pgettext(\"image alt text\", \"core.icons.idscreencheck\")),\n+ heading=_(\"eligibility.media.heading\"),\n+ details=_(\"eligibility.media.details\"),\n+ links=[\n+ viewmodels.Button.link(\n+ classes=\"btn-text btn-link\",\n+ text=_(\"eligibility.media.link_text\"),\n+ url=_(\"eligibility.media.link_url\"),\n+ target=\"_blank\",\n+ rel=\"noopener noreferrer\",\n+ )\n+ ],\n+ ),\n+ )\n+\n+ if not session.auth(request):\n+ button = viewmodels.Button.external(\n+ text=_(verifier.auth_provider.sign_in_button_label),\n+ url=reverse(\"oauth:login\"),\n+ id=\"login\",\n+ )\n+\n+ page = viewmodels.Page(\n+ title=_(\"eligibility.pages.start.title\"),\n+ noimage=True,\n+ paragraphs=[_(verifier.start_blurb)],\n+ button=button,\n+ )\n+\n+ ctx = page.context_dict()\n+ ctx[\"title\"] = _(verifier.start_content_title)\n+ ctx[\"media\"] = media\n \n return TemplateResponse(request, \"eligibility/start.html\", ctx)\n", "issue": "Refactor start page context creation\nI got a strange error on the Start page:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/calitp/.local/lib/python3.9/site-packages/django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"/home/calitp/.local/lib/python3.9/site-packages/django/core/handlers/base.py\", line 181, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/home/calitp/.local/lib/python3.9/site-packages/django/utils/decorators.py\", line 130, in _wrapped_view\r\n response = view_func(request, *args, **kwargs)\r\n File \"/home/calitp/.local/lib/python3.9/site-packages/django/utils/decorators.py\", line 130, in _wrapped_view\r\n response = view_func(request, *args, **kwargs)\r\n File \"/home/calitp/app/benefits/eligibility/views.py\", line 127, in start\r\n ctx[\"media\"].insert(0, auth_media)\r\n\r\nException Type: UnboundLocalError at /eligibility/start\r\nException Value: local variable 'auth_media' referenced before assignment\r\n```\r\n\r\nMaybe now that the media list refactor and the login button are merged into dev, we can clean up the logic and context creation for this view?\r\n\r\n## Steps to recreate\r\n\r\n1. From start page, click \"Continue with Login.gov\" button\r\n2. Sign in with Login.gov account\r\n3. Land on confirm page (DMV form)\r\n4. Change URL manually to `/eligibility` (the verifier select page)\r\n5. Select \"Senior Discount Program\", click \"Continue\"\r\n6. \ud83d\udca5 \n", "before_files": [{"content": "\"\"\"\nThe eligibility application: view definitions for the eligibility verification flow.\n\"\"\"\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom benefits.core import middleware, recaptcha, session, viewmodels\nfrom benefits.core.models import EligibilityVerifier\nfrom benefits.core.views import PageTemplateResponse\nfrom benefits.settings import OAUTH_CLIENT_NAME\nfrom . import analytics, api, forms\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef index(request):\n \"\"\"View handler for the eligibility verifier selection form.\"\"\"\n\n session.update(request, eligibility_types=[], origin=reverse(\"eligibility:index\"))\n agency = session.agency(request)\n\n eligibility_start = reverse(\"eligibility:start\")\n\n page = viewmodels.Page(\n title=_(\"eligibility.pages.index.title\"),\n content_title=_(\"eligibility.pages.index.content_title\"),\n forms=forms.EligibilityVerifierSelectionForm(agency=agency),\n )\n\n if request.method == \"POST\":\n form = forms.EligibilityVerifierSelectionForm(data=request.POST, agency=agency)\n\n if form.is_valid():\n verifier_id = form.cleaned_data.get(\"verifier\")\n verifier = EligibilityVerifier.objects.get(id=verifier_id)\n session.update(request, verifier=verifier)\n\n response = redirect(eligibility_start)\n else:\n # form was not valid, allow for correction/resubmission\n page.forms = [form]\n response = PageTemplateResponse(request, page)\n else:\n if agency.eligibility_verifiers.count() == 1:\n verifier = agency.eligibility_verifiers.first()\n session.update(request, verifier=verifier)\n response = redirect(eligibility_start)\n else:\n response = PageTemplateResponse(request, page)\n\n return response\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\n@decorator_from_middleware(middleware.VerifierSessionRequired)\ndef start(request):\n \"\"\"View handler for the eligibility verification getting started screen.\"\"\"\n\n session.update(request, eligibility_types=[], origin=reverse(\"eligibility:start\"))\n verifier = session.verifier(request)\n\n if verifier.requires_authentication and not session.auth(request):\n if OAUTH_CLIENT_NAME is None:\n raise Exception(\"EligibilityVerifier requires authentication, but OAUTH_CLIENT_NAME is None\")\n\n auth_provider = verifier.auth_provider\n button = viewmodels.Button.external(\n text=_(auth_provider.sign_in_button_label),\n url=reverse(\"oauth:login\"),\n id=\"login\",\n )\n auth_media = dict(\n icon=viewmodels.Icon(\"idscreencheck\", pgettext(\"image alt text\", \"core.icons.idscreencheck\")),\n heading=_(\"eligibility.media.heading\"),\n details=_(\"eligibility.media.details\"),\n links=[\n viewmodels.Button.link(\n classes=\"btn-text btn-link\",\n text=_(\"eligibility.media.link_text\"),\n url=_(\"eligibility.media.link_url\"),\n target=\"_blank\",\n rel=\"noopener noreferrer\",\n )\n ],\n )\n else:\n button = viewmodels.Button.primary(text=_(\"eligibility.buttons.continue\"), url=reverse(\"eligibility:confirm\"))\n\n page = viewmodels.Page(\n title=_(\"eligibility.pages.start.title\"),\n noimage=True,\n paragraphs=[_(verifier.start_blurb)],\n button=button,\n )\n\n ctx = page.context_dict()\n ctx[\"title\"] = _(verifier.start_content_title)\n ctx[\"media\"] = [\n dict(\n icon=viewmodels.Icon(\"idcardcheck\", pgettext(\"image alt text\", \"core.icons.idcardcheck\")),\n heading=_(verifier.start_item_name),\n details=_(verifier.start_item_description),\n ),\n dict(\n icon=viewmodels.Icon(\"bankcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\")),\n heading=_(\"eligibility.pages.start.items[1].title\"),\n details=_(\"eligibility.pages.start.items[1].text\"),\n links=[\n viewmodels.Button.link(\n classes=\"btn-text btn-link\",\n text=_(\"eligibility.pages.start.items[1].button[0].link\"),\n url=_(\"eligibility.pages.start.items[1].button[0].url\"),\n ),\n viewmodels.Button.link(\n classes=\"btn-text btn-link\",\n text=_(\"eligibility.pages.start.items[1].button[1].link\"),\n url=_(\"eligibility.pages.start.items[1].button[1].url\"),\n ),\n ],\n ),\n ]\n\n if verifier.requires_authentication:\n ctx[\"media\"].insert(0, auth_media)\n\n return TemplateResponse(request, \"eligibility/start.html\", ctx)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\n@decorator_from_middleware(middleware.RateLimit)\n@decorator_from_middleware(middleware.VerifierSessionRequired)\ndef confirm(request):\n \"\"\"View handler for the eligibility verification form.\"\"\"\n\n verifier = session.verifier(request)\n\n page = viewmodels.Page(\n title=_(verifier.form_title),\n content_title=_(verifier.form_content_title),\n paragraphs=[_(verifier.form_blurb)],\n form=forms.EligibilityVerificationForm(auto_id=True, label_suffix=\"\", verifier=verifier),\n classes=\"text-lg-center\",\n )\n\n if request.method == \"POST\":\n analytics.started_eligibility(request)\n\n form = forms.EligibilityVerificationForm(data=request.POST, verifier=verifier)\n response = _verify(request, form)\n\n if response is None:\n # form was not valid, allow for correction/resubmission\n analytics.returned_error(request, form.errors)\n page.forms = [form]\n response = PageTemplateResponse(request, page)\n elif session.eligible(request):\n eligibility = session.eligibility(request)\n response = verified(request, [eligibility.name])\n else:\n response = PageTemplateResponse(request, page)\n\n return response\n\n\ndef _verify(request, form):\n \"\"\"Helper calls the eligibility verification API with user input.\"\"\"\n\n if not form.is_valid():\n if recaptcha.has_error(form):\n messages.error(request, \"Recaptcha failed. Please try again.\")\n return None\n\n sub, name = form.cleaned_data.get(\"sub\"), form.cleaned_data.get(\"name\")\n\n agency = session.agency(request)\n verifier = session.verifier(request)\n client = api.Client(agency, verifier)\n\n response = client.verify(sub, name)\n\n if response.error and any(response.error):\n form.add_api_errors(response.error)\n return None\n elif any(response.eligibility):\n return verified(request, response.eligibility)\n else:\n return unverified(request)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef verified(request, verified_types):\n \"\"\"View handler for the verified eligibility page.\"\"\"\n\n analytics.returned_success(request)\n\n enrollment_index = reverse(\"enrollment:index\")\n session.update(request, eligibility_types=verified_types, origin=enrollment_index)\n\n return redirect(enrollment_index)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\n@decorator_from_middleware(middleware.VerifierSessionRequired)\ndef unverified(request):\n \"\"\"View handler for the unverified eligibility page.\"\"\"\n\n analytics.returned_fail(request)\n\n # tel: link to agency phone number\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n\n verifier = session.verifier(request)\n\n page = viewmodels.Page(\n title=_(verifier.unverified_title),\n content_title=_(verifier.unverified_content_title),\n icon=viewmodels.Icon(\"idcardquestion\", pgettext(\"image alt text\", \"core.icons.idcardquestion\")),\n paragraphs=[_(verifier.unverified_blurb), _(\"eligibility.pages.unverified.p[1]\")],\n buttons=buttons,\n classes=\"text-lg-center\",\n )\n\n return PageTemplateResponse(request, page)\n", "path": "benefits/eligibility/views.py"}]}
| 3,186 | 789 |
gh_patches_debug_32213
|
rasdani/github-patches
|
git_diff
|
mesonbuild__meson-840
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
setup.py: Install scripts without extensions on UNIX-like platforms
Because of issue #394 , meson install scripts as `xxx.py` , but in linux, install script with a extension name is no a good practice. And change the installed script name also break some package depend on meson.
Could you deal with it for different platform?
thx.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2
3 # Copyright 2016 The Meson development team
4
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8
9 # http://www.apache.org/licenses/LICENSE-2.0
10
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 import sys
18
19 if sys.version_info[0] < 3:
20 print('Tried to install with Python 2, Meson only supports Python 3.')
21 sys.exit(1)
22
23 # We need to support Python installations that have nothing but the basic
24 # Python installation. Use setuptools when possible and fall back to
25 # plain distutils when setuptools is not available.
26 try:
27 from setuptools import setup
28 except ImportError:
29 from distutils.core import setup
30
31 from mesonbuild.coredata import version
32
33 setup(name='meson',
34 version=version,
35 description='A high performance build system',
36 author='Jussi Pakkanen',
37 author_email='[email protected]',
38 url='http://mesonbuild.com',
39 license=' Apache License, Version 2.0',
40 packages=['mesonbuild',
41 'mesonbuild.modules',
42 'mesonbuild.scripts',
43 'mesonbuild.backend',
44 'mesonbuild.wrap'],
45 scripts=['meson.py',
46 'mesonconf.py',
47 'mesonintrospect.py',
48 'wraptool.py'],
49 data_files=[('share/man/man1', ['man/meson.1',
50 'man/mesonconf.1',
51 'man/mesonintrospect.1',
52 'man/wraptool.1'])],
53 classifiers=['Development Status :: 5 - Production/Stable',
54 'Environment :: Console',
55 'Intended Audience :: Developers',
56 'License :: OSI Approved :: Apache Software License',
57 'Natural Language :: English',
58 'Operating System :: MacOS :: MacOS X',
59 'Operating System :: Microsoft :: Windows',
60 'Operating System :: POSIX :: BSD',
61 'Operating System :: POSIX :: Linux',
62 'Programming Language :: Python :: 3 :: Only',
63 'Topic :: Software Development :: Build Tools',
64 ],
65 long_description='''Meson is a cross-platform build system designed to be both as
66 fast and as user friendly as possible. It supports many languages and compilers, including
67 GCC, Clang and Visual Studio. Its build definitions are written in a simple non-turing
68 complete DSL.''')
69
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import os
import sys
+from os import path
if sys.version_info[0] < 3:
print('Tried to install with Python 2, Meson only supports Python 3.')
@@ -25,8 +27,32 @@
# plain distutils when setuptools is not available.
try:
from setuptools import setup
+ from setuptools.command.install_scripts import install_scripts as orig
except ImportError:
from distutils.core import setup
+ from distutils.command.install_scripts import install_scripts as orig
+
+from distutils.file_util import copy_file
+from distutils.dir_util import mkpath
+from stat import ST_MODE
+
+class install_scripts(orig):
+ def run(self):
+ if sys.platform == 'win32':
+ super().run()
+ return
+
+ self.outfiles = []
+ if not self.dry_run:
+ mkpath(self.install_dir)
+
+ # We want the files to be installed without a suffix on Unix
+ for infile in self.get_inputs():
+ in_stripped = infile[:-3] if infile.endswith('.py') else infile
+ outfile = path.join(self.install_dir, in_stripped)
+ # NOTE: Mode is preserved by default
+ copy_file(infile, outfile, dry_run=self.dry_run)
+ self.outfiles.append(outfile)
from mesonbuild.coredata import version
@@ -46,6 +72,7 @@
'mesonconf.py',
'mesonintrospect.py',
'wraptool.py'],
+ cmdclass={'install_scripts': install_scripts},
data_files=[('share/man/man1', ['man/meson.1',
'man/mesonconf.1',
'man/mesonintrospect.1',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,7 +14,9 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import os\n import sys\n+from os import path\n \n if sys.version_info[0] < 3:\n print('Tried to install with Python 2, Meson only supports Python 3.')\n@@ -25,8 +27,32 @@\n # plain distutils when setuptools is not available.\n try:\n from setuptools import setup\n+ from setuptools.command.install_scripts import install_scripts as orig\n except ImportError:\n from distutils.core import setup\n+ from distutils.command.install_scripts import install_scripts as orig\n+\n+from distutils.file_util import copy_file\n+from distutils.dir_util import mkpath\n+from stat import ST_MODE\n+\n+class install_scripts(orig):\n+ def run(self):\n+ if sys.platform == 'win32':\n+ super().run()\n+ return\n+\n+ self.outfiles = []\n+ if not self.dry_run:\n+ mkpath(self.install_dir)\n+\n+ # We want the files to be installed without a suffix on Unix\n+ for infile in self.get_inputs():\n+ in_stripped = infile[:-3] if infile.endswith('.py') else infile\n+ outfile = path.join(self.install_dir, in_stripped)\n+ # NOTE: Mode is preserved by default\n+ copy_file(infile, outfile, dry_run=self.dry_run)\n+ self.outfiles.append(outfile)\n \n from mesonbuild.coredata import version\n \n@@ -46,6 +72,7 @@\n 'mesonconf.py',\n 'mesonintrospect.py',\n 'wraptool.py'],\n+ cmdclass={'install_scripts': install_scripts},\n data_files=[('share/man/man1', ['man/meson.1',\n 'man/mesonconf.1',\n 'man/mesonintrospect.1',\n", "issue": "setup.py: Install scripts without extensions on UNIX-like platforms\nBecause of issue #394 , meson install scripts as `xxx.py` , but in linux, install script with a extension name is no a good practice. And change the installed script name also break some package depend on meson.\n\nCould you deal with it for different platform?\n\nthx.\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright 2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nif sys.version_info[0] < 3:\n print('Tried to install with Python 2, Meson only supports Python 3.')\n sys.exit(1)\n\n# We need to support Python installations that have nothing but the basic\n# Python installation. Use setuptools when possible and fall back to\n# plain distutils when setuptools is not available.\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nfrom mesonbuild.coredata import version\n\nsetup(name='meson',\n version=version,\n description='A high performance build system',\n author='Jussi Pakkanen',\n author_email='[email protected]',\n url='http://mesonbuild.com',\n license=' Apache License, Version 2.0',\n packages=['mesonbuild',\n 'mesonbuild.modules',\n 'mesonbuild.scripts',\n 'mesonbuild.backend',\n 'mesonbuild.wrap'],\n scripts=['meson.py',\n 'mesonconf.py',\n 'mesonintrospect.py',\n 'wraptool.py'],\n data_files=[('share/man/man1', ['man/meson.1',\n 'man/mesonconf.1',\n 'man/mesonintrospect.1',\n 'man/wraptool.1'])],\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: BSD',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Build Tools',\n ],\n long_description='''Meson is a cross-platform build system designed to be both as\nfast and as user friendly as possible. It supports many languages and compilers, including\nGCC, Clang and Visual Studio. Its build definitions are written in a simple non-turing\ncomplete DSL.''')\n", "path": "setup.py"}]}
| 1,321 | 436 |
gh_patches_debug_16211
|
rasdani/github-patches
|
git_diff
|
google__jax-326
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
jax missing scipy.special.expit
Would be possible to add gradients for `expit` and `logit`?
</issue>
<code>
[start of jax/scipy/special.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 from __future__ import division
17 from __future__ import print_function
18
19 import scipy.special as osp_special
20
21 from .. import lax
22 from ..numpy.lax_numpy import _wraps
23
24
25 # need to create new functions because _wraps sets the __name__ attribute
26 gammaln = _wraps(osp_special.gammaln)(lambda x: lax.lgamma(x))
27 digamma = _wraps(osp_special.digamma)(lambda x: lax.digamma(x))
28 erf = _wraps(osp_special.erf)(lambda x: lax.erf(x))
29 erfc = _wraps(osp_special.erfc)(lambda x: lax.erfc(x))
30 erfinv = _wraps(osp_special.erfinv)(lambda x: lax.erf_inv(x))
31
[end of jax/scipy/special.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/jax/scipy/special.py b/jax/scipy/special.py
--- a/jax/scipy/special.py
+++ b/jax/scipy/special.py
@@ -19,7 +19,7 @@
import scipy.special as osp_special
from .. import lax
-from ..numpy.lax_numpy import _wraps
+from ..numpy.lax_numpy import _wraps, asarray
# need to create new functions because _wraps sets the __name__ attribute
@@ -28,3 +28,16 @@
erf = _wraps(osp_special.erf)(lambda x: lax.erf(x))
erfc = _wraps(osp_special.erfc)(lambda x: lax.erfc(x))
erfinv = _wraps(osp_special.erfinv)(lambda x: lax.erf_inv(x))
+
+
+@_wraps(osp_special.logit)
+def logit(x):
+ x = asarray(x)
+ return lax.log(lax.div(x, lax.sub(lax._const(x, 1), x)))
+
+
+@_wraps(osp_special.expit)
+def expit(x):
+ x = asarray(x)
+ one = lax._const(x, 1)
+ return lax.div(one, lax.add(one, lax.exp(lax.neg(x))))
|
{"golden_diff": "diff --git a/jax/scipy/special.py b/jax/scipy/special.py\n--- a/jax/scipy/special.py\n+++ b/jax/scipy/special.py\n@@ -19,7 +19,7 @@\n import scipy.special as osp_special\n \n from .. import lax\n-from ..numpy.lax_numpy import _wraps\n+from ..numpy.lax_numpy import _wraps, asarray\n \n \n # need to create new functions because _wraps sets the __name__ attribute\n@@ -28,3 +28,16 @@\n erf = _wraps(osp_special.erf)(lambda x: lax.erf(x))\n erfc = _wraps(osp_special.erfc)(lambda x: lax.erfc(x))\n erfinv = _wraps(osp_special.erfinv)(lambda x: lax.erf_inv(x))\n+\n+\n+@_wraps(osp_special.logit)\n+def logit(x):\n+ x = asarray(x)\n+ return lax.log(lax.div(x, lax.sub(lax._const(x, 1), x)))\n+\n+\n+@_wraps(osp_special.expit)\n+def expit(x):\n+ x = asarray(x)\n+ one = lax._const(x, 1)\n+ return lax.div(one, lax.add(one, lax.exp(lax.neg(x))))\n", "issue": "jax missing scipy.special.expit\nWould be possible to add gradients for `expit` and `logit`?\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport scipy.special as osp_special\n\nfrom .. import lax\nfrom ..numpy.lax_numpy import _wraps\n\n\n# need to create new functions because _wraps sets the __name__ attribute\ngammaln = _wraps(osp_special.gammaln)(lambda x: lax.lgamma(x))\ndigamma = _wraps(osp_special.digamma)(lambda x: lax.digamma(x))\nerf = _wraps(osp_special.erf)(lambda x: lax.erf(x))\nerfc = _wraps(osp_special.erfc)(lambda x: lax.erfc(x))\nerfinv = _wraps(osp_special.erfinv)(lambda x: lax.erf_inv(x))\n", "path": "jax/scipy/special.py"}]}
| 921 | 290 |
gh_patches_debug_516
|
rasdani/github-patches
|
git_diff
|
meltano__meltano-7210
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
feature: Python 3.11 support
### Feature scope
Other
### Description
Python 3.11.0 is planned to be officially released as of 2022-10-24. We should add it to our test matrix, and build Docker images for it for each release.
</issue>
<code>
[start of noxfile.py]
1 """Nox configuration."""
2
3 from __future__ import annotations
4
5 import os
6 import sys
7 from pathlib import Path
8 from random import randint
9 from textwrap import dedent
10
11 try:
12 from nox_poetry import Session
13 from nox_poetry import session as nox_session
14 except ImportError:
15 message = f"""\
16 Nox failed to import the 'nox-poetry' package.
17 Please install it using the following command:
18 {sys.executable} -m pip install nox-poetry"""
19 raise SystemExit(dedent(message)) from None
20
21
22 package = "meltano"
23 python_versions = ["3.10", "3.9", "3.8", "3.7"]
24 main_python_version = "3.9"
25 locations = "src", "tests", "noxfile.py"
26
27
28 @nox_session(python=python_versions)
29 def tests(session: Session) -> None:
30 """Execute pytest tests and compute coverage.
31
32 Args:
33 session: Nox session.
34 """
35 backend_db = os.environ.get("PYTEST_BACKEND", "sqlite")
36
37 if backend_db == "mssql":
38 session.install(".[mssql,azure,gcs,s3]")
39
40 else:
41 session.install(".[azure,gcs,s3]")
42
43 session.install(
44 "colorama", # colored output in Windows
45 "freezegun",
46 "mock",
47 "pytest",
48 "pytest-asyncio",
49 "pytest-cov",
50 "pytest-docker",
51 "pytest-order",
52 "pytest-randomly",
53 "pytest-xdist",
54 "requests-mock",
55 )
56
57 try:
58 session.run(
59 "pytest",
60 f"--randomly-seed={randint(0, 2**32-1)}", # noqa: S311, WPS432
61 *session.posargs,
62 env={"NOX_CURRENT_SESSION": "tests"},
63 )
64 finally:
65 if session.interactive:
66 session.notify("coverage", posargs=[])
67
68
69 @nox_session(python=main_python_version)
70 def coverage(session: Session) -> None:
71 """Upload coverage data.
72
73 Args:
74 session: Nox session.
75 """
76 args = session.posargs or ["report"]
77
78 session.install("coverage[toml]")
79
80 if not session.posargs and any(Path().glob(".coverage.*")):
81 session.run("coverage", "combine")
82
83 session.run("coverage", *args)
84
85
86 @nox_session(python=main_python_version)
87 def mypy(session: Session) -> None:
88 """Run mypy type checking.
89
90 Args:
91 session: Nox session.
92 """
93 args = session.posargs or ["src/meltano", "--exclude", "src/meltano/migrations/"]
94
95 session.install(".")
96 session.install(
97 "mypy",
98 "sqlalchemy2-stubs",
99 "types-croniter",
100 "types-psutil",
101 "types-requests",
102 "boto3-stubs[essential]",
103 )
104 session.run("mypy", *args)
105
[end of noxfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -20,7 +20,7 @@
package = "meltano"
-python_versions = ["3.10", "3.9", "3.8", "3.7"]
+python_versions = ["3.11", "3.10", "3.9", "3.8", "3.7"]
main_python_version = "3.9"
locations = "src", "tests", "noxfile.py"
|
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -20,7 +20,7 @@\n \n \n package = \"meltano\"\n-python_versions = [\"3.10\", \"3.9\", \"3.8\", \"3.7\"]\n+python_versions = [\"3.11\", \"3.10\", \"3.9\", \"3.8\", \"3.7\"]\n main_python_version = \"3.9\"\n locations = \"src\", \"tests\", \"noxfile.py\"\n", "issue": "feature: Python 3.11 support\n### Feature scope\n\nOther\n\n### Description\n\nPython 3.11.0 is planned to be officially released as of 2022-10-24. We should add it to our test matrix, and build Docker images for it for each release.\n", "before_files": [{"content": "\"\"\"Nox configuration.\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nfrom pathlib import Path\nfrom random import randint\nfrom textwrap import dedent\n\ntry:\n from nox_poetry import Session\n from nox_poetry import session as nox_session\nexcept ImportError:\n message = f\"\"\"\\\n Nox failed to import the 'nox-poetry' package.\n Please install it using the following command:\n {sys.executable} -m pip install nox-poetry\"\"\"\n raise SystemExit(dedent(message)) from None\n\n\npackage = \"meltano\"\npython_versions = [\"3.10\", \"3.9\", \"3.8\", \"3.7\"]\nmain_python_version = \"3.9\"\nlocations = \"src\", \"tests\", \"noxfile.py\"\n\n\n@nox_session(python=python_versions)\ndef tests(session: Session) -> None:\n \"\"\"Execute pytest tests and compute coverage.\n\n Args:\n session: Nox session.\n \"\"\"\n backend_db = os.environ.get(\"PYTEST_BACKEND\", \"sqlite\")\n\n if backend_db == \"mssql\":\n session.install(\".[mssql,azure,gcs,s3]\")\n\n else:\n session.install(\".[azure,gcs,s3]\")\n\n session.install(\n \"colorama\", # colored output in Windows\n \"freezegun\",\n \"mock\",\n \"pytest\",\n \"pytest-asyncio\",\n \"pytest-cov\",\n \"pytest-docker\",\n \"pytest-order\",\n \"pytest-randomly\",\n \"pytest-xdist\",\n \"requests-mock\",\n )\n\n try:\n session.run(\n \"pytest\",\n f\"--randomly-seed={randint(0, 2**32-1)}\", # noqa: S311, WPS432\n *session.posargs,\n env={\"NOX_CURRENT_SESSION\": \"tests\"},\n )\n finally:\n if session.interactive:\n session.notify(\"coverage\", posargs=[])\n\n\n@nox_session(python=main_python_version)\ndef coverage(session: Session) -> None:\n \"\"\"Upload coverage data.\n\n Args:\n session: Nox session.\n \"\"\"\n args = session.posargs or [\"report\"]\n\n session.install(\"coverage[toml]\")\n\n if not session.posargs and any(Path().glob(\".coverage.*\")):\n session.run(\"coverage\", \"combine\")\n\n session.run(\"coverage\", *args)\n\n\n@nox_session(python=main_python_version)\ndef mypy(session: Session) -> None:\n \"\"\"Run mypy type checking.\n\n Args:\n session: Nox session.\n \"\"\"\n args = session.posargs or [\"src/meltano\", \"--exclude\", \"src/meltano/migrations/\"]\n\n session.install(\".\")\n session.install(\n \"mypy\",\n \"sqlalchemy2-stubs\",\n \"types-croniter\",\n \"types-psutil\",\n \"types-requests\",\n \"boto3-stubs[essential]\",\n )\n session.run(\"mypy\", *args)\n", "path": "noxfile.py"}]}
| 1,469 | 125 |
gh_patches_debug_11609
|
rasdani/github-patches
|
git_diff
|
google-research__text-to-text-transfer-transformer-39
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Do we support GPU distributed training?
Hi, thanks for the awesome project!
Does the code base support distributed training? If not, is it possible to support it after some code modifications?
By the way, what is the way to set batch size and gpu number if I want to use GPU to train the model?
Thank you for your kind attention.
</issue>
<code>
[start of setup.py]
1 # Copyright 2019 The T5 Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Install T5."""
16
17 import setuptools
18
19 # Get the long description from the README file.
20 with open('README.md') as fp:
21 _LONG_DESCRIPTION = fp.read()
22
23 setuptools.setup(
24 name='t5',
25 version='0.1.7',
26 description='Text-to-text transfer transformer',
27 long_description=_LONG_DESCRIPTION,
28 long_description_content_type='text/markdown',
29 author='Google Inc.',
30 author_email='[email protected]',
31 url='http://github.com/google-research/text-to-text-transfer-transformer',
32 license='Apache 2.0',
33 packages=setuptools.find_packages(),
34 package_data={
35 '': ['*.gin'],
36 },
37 scripts=[],
38 install_requires=[
39 'absl-py',
40 'allennlp',
41 'babel',
42 'future',
43 'gin-config',
44 'mesh-tensorflow[transformer]>=0.1.8',
45 'nltk',
46 'numpy',
47 'pandas',
48 'rouge-score',
49 'sacrebleu',
50 'scikit-learn',
51 'scipy',
52 'sentencepiece',
53 'six',
54 'tensorflow-datasets>=1.3.2',
55 'tensorflow-text==1.15.0rc0',
56 ],
57 extras_require={
58 'tensorflow': ['tensorflow==1.15'],
59 'gcp': ['gevent', 'google-api-python-client', 'google-compute-engine',
60 'google-cloud-storage', 'oauth2client'],
61 },
62 entry_points={
63 'console_scripts': [
64 't5_mesh_transformer = '
65 't5.models.mesh_transformer_main:console_entry_point',
66 ],
67 },
68 classifiers=[
69 'Development Status :: 4 - Beta',
70 'Intended Audience :: Developers',
71 'Intended Audience :: Science/Research',
72 'License :: OSI Approved :: Apache Software License',
73 'Topic :: Scientific/Engineering :: Artificial Intelligence',
74 ],
75 keywords='text nlp machinelearning',
76 )
77
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@
setuptools.setup(
name='t5',
- version='0.1.7',
+ version='0.1.8',
description='Text-to-text transfer transformer',
long_description=_LONG_DESCRIPTION,
long_description_content_type='text/markdown',
@@ -41,7 +41,7 @@
'babel',
'future',
'gin-config',
- 'mesh-tensorflow[transformer]>=0.1.8',
+ 'mesh-tensorflow[transformer]>=0.1.9',
'nltk',
'numpy',
'pandas',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,7 +22,7 @@\n \n setuptools.setup(\n name='t5',\n- version='0.1.7',\n+ version='0.1.8',\n description='Text-to-text transfer transformer',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n@@ -41,7 +41,7 @@\n 'babel',\n 'future',\n 'gin-config',\n- 'mesh-tensorflow[transformer]>=0.1.8',\n+ 'mesh-tensorflow[transformer]>=0.1.9',\n 'nltk',\n 'numpy',\n 'pandas',\n", "issue": "Do we support GPU distributed training?\nHi, thanks for the awesome project!\r\n\r\nDoes the code base support distributed training? If not, is it possible to support it after some code modifications?\r\n\r\nBy the way, what is the way to set batch size and gpu number if I want to use GPU to train the model?\r\n\r\nThank you for your kind attention.\n", "before_files": [{"content": "# Copyright 2019 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Install T5.\"\"\"\n\nimport setuptools\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\nsetuptools.setup(\n name='t5',\n version='0.1.7',\n description='Text-to-text transfer transformer',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n author='Google Inc.',\n author_email='[email protected]',\n url='http://github.com/google-research/text-to-text-transfer-transformer',\n license='Apache 2.0',\n packages=setuptools.find_packages(),\n package_data={\n '': ['*.gin'],\n },\n scripts=[],\n install_requires=[\n 'absl-py',\n 'allennlp',\n 'babel',\n 'future',\n 'gin-config',\n 'mesh-tensorflow[transformer]>=0.1.8',\n 'nltk',\n 'numpy',\n 'pandas',\n 'rouge-score',\n 'sacrebleu',\n 'scikit-learn',\n 'scipy',\n 'sentencepiece',\n 'six',\n 'tensorflow-datasets>=1.3.2',\n 'tensorflow-text==1.15.0rc0',\n ],\n extras_require={\n 'tensorflow': ['tensorflow==1.15'],\n 'gcp': ['gevent', 'google-api-python-client', 'google-compute-engine',\n 'google-cloud-storage', 'oauth2client'],\n },\n entry_points={\n 'console_scripts': [\n 't5_mesh_transformer = '\n 't5.models.mesh_transformer_main:console_entry_point',\n ],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n keywords='text nlp machinelearning',\n)\n", "path": "setup.py"}]}
| 1,302 | 161 |
gh_patches_debug_16265
|
rasdani/github-patches
|
git_diff
|
weecology__retriever-658
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a code of conduct
I keep planning on doing this and not getting around to it so I'm opening an issue. As usual we can probably just copy @ctb: https://github.com/dib-lab/khmer/blob/master/CODE_OF_CONDUCT.rst
</issue>
<code>
[start of docs/conf.py]
1 from builtins import str
2 from retriever import VERSION,COPYRIGHT
3 from retriever.lib.repository import check_for_updates
4 from retriever import SCRIPT_LIST
5
6 # Create the .rst file for the available datasets
7 datasetfile = open("datasets.rst", "w")
8 datasetfile_title = """
9 ==================
10 Datasets Available
11 ==================
12
13
14 """
15 check_for_updates()
16 script_list = SCRIPT_LIST()
17
18 # write the title of dataset rst file
19 datasetfile.write(datasetfile_title)
20
21 # get info from the scripts
22 for script_num, script in enumerate(script_list, start=1):
23 if script.ref.strip():
24 reference_link = script.ref
25 elif bool(script.urls.values()):
26 reference_link = list(script.urls.values())[0].rpartition('/')[0]
27 else:
28 reference_link = ""
29 datasetfile.write("| " + str(script_num) + ". **{}** \n| shortname: {}\n| reference: {}\n\n".format(script.name, script.shortname, reference_link))
30 datasetfile.close()
31
32 needs_sphinx = '1.3'
33
34 # Add any Sphinx extension module names here, as strings.
35 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
36
37 # Add any paths that contain templates here, relative to this directory.
38 templates_path = ['_templates']
39
40 # The suffix of source filenames.
41 source_suffix = '.rst'
42
43 # The encoding of source files.
44 # source_encoding = 'utf-8-sig'
45
46 # The master toctree document.
47 master_doc = 'index'
48
49 # General information about the project.
50 project = u'Data Retriever'
51 copyright = COPYRIGHT
52
53 version = release = VERSION
54
55 # List of patterns, relative to source directory, that match files and
56 # directories to ignore when looking for source files.
57 exclude_patterns = []
58
59 # The reST default role (used for this markup: `text`) to use for all documents.
60 #default_role = None
61
62 # If true, '()' will be appended to :func: etc. cross-reference text.
63 #add_function_parentheses = True
64
65 # If true, the current module name will be prepended to all description
66 # unit titles (such as .. function::).
67 #add_module_names = True
68
69 # If true, sectionauthor and moduleauthor directives will be shown in the
70 # output. They are ignored by default.
71 #show_authors = False
72
73 # The name of the Pygments (syntax highlighting) style to use.
74 pygments_style = 'sphinx'
75
76 # A list of ignored prefixes for module index sorting.
77 #modindex_common_prefix = []
78
79
80 # -- Options for HTML output ---------------------------------------------------
81
82 # The theme to use for HTML and HTML Help pages. See the documentation for
83 # a list of builtin themes.
84 html_theme = 'classic'
85
86 # Theme options are theme-specific and customize the look and feel of a theme
87 # further. For a list of options available for each theme, see the
88 # documentation.
89 #html_theme_options = {}
90
91 # Add any paths that contain custom themes here, relative to this directory.
92 #html_theme_path = []
93
94 # The name for this set of Sphinx documents. If None, it defaults to
95 # "<project> v<release> documentation".
96 #html_title = None
97
98 # A shorter title for the navigation bar. Default is the same as html_title.
99 #html_short_title = None
100
101 # The name of an image file (relative to this directory) to place at the top
102 # of the sidebar.
103 #html_logo = None
104
105 # The name of an image file (within the static path) to use as favicon of the
106 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
107 # pixels large.
108 #html_favicon = None
109
110 # Add any paths that contain custom static files (such as style sheets) here,
111 # relative to this directory. They are copied after the builtin static files,
112 # so a file named "default.css" will overwrite the builtin "default.css".
113
114
115 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
116 # using the given strftime format.
117 #html_last_updated_fmt = '%b %d, %Y'
118
119 # If true, SmartyPants will be used to convert quotes and dashes to
120 # typographically correct entities.
121 #html_use_smartypants = True
122
123 # Custom sidebar templates, maps document names to template names.
124 #html_sidebars = {}
125
126 # Additional templates that should be rendered to pages, maps page names to
127 # template names.
128 #html_additional_pages = {}
129
130 # If false, no module index is generated.
131 #html_domain_indices = True
132
133 # If false, no index is generated.
134 #html_use_index = True
135
136 # If true, the index is split into individual pages for each letter.
137 #html_split_index = False
138
139 # If true, links to the reST sources are added to the pages.
140 #html_show_sourcelink = True
141
142 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
143 #html_show_sphinx = True
144
145 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
146 #html_show_copyright = True
147
148 # If true, an OpenSearch description file will be output, and all pages will
149 # contain a <link> tag referring to it. The value of this option must be the
150 # base URL from which the finished HTML is served.
151 #html_use_opensearch = ''
152
153 # This is the file name suffix for HTML files (e.g. ".xhtml").
154 #html_file_suffix = None
155
156 # Output file base name for HTML help builder.
157
158
159
160 # -- Options for LaTeX output --------------------------------------------------
161
162 latex_elements = {
163 # The paper size ('letterpaper' or 'a4paper').
164 #'papersize': 'letterpaper',
165
166 # The font size ('10pt', '11pt' or '12pt').
167 #'pointsize': '10pt',
168
169 # Additional stuff for the LaTeX preamble.
170 #'preamble': '',
171 }
172
173 # Grouping the document tree into LaTeX files. List of tuples
174 # (source start file, target name, title, author, documentclass [howto/manual]).
175
176 # The name of an image file (relative to this directory) to place at the top of
177 # the title page.
178 #latex_logo = None
179
180 # For "manual" documents, if this is true, then toplevel headings are parts,
181 # not chapters.
182 #latex_use_parts = False
183
184 # If true, show page references after internal links.
185 #latex_show_pagerefs = False
186
187 # If true, show URL addresses after external links.
188 #latex_show_urls = False
189
190 # Documents to append as an appendix to all manuals.
191 #latex_appendices = []
192
193 # If false, no module index is generated.
194 #latex_domain_indices = True
195
196
197 # -- Options for manual page output --------------------------------------------
198
199 # One entry per manual page. List of tuples
200
201 # If true, show URL addresses after external links.
202 #man_show_urls = False
203
204
205 # -- Options for Texinfo output ------------------------------------------------
206
207 # Grouping the document tree into Texinfo files. List of tuples
208 # (source start file, target name, title, author,
209 # dir menu entry, description, category)
210
211
212 # Documents to append as an appendix to all manuals.
213 #texinfo_appendices = []
214
215 # If false, no module index is generated.
216 #texinfo_domain_indices = True
217
218 # How to display URL addresses: 'footnote', 'no', or 'inline'.
219 #texinfo_show_urls = 'footnote'
220
[end of docs/conf.py]
[start of try_install_all.py]
1 """Attempt to install all datasets into all database management systems
2
3 This module, when run, attempts to install datasets from all Retriever scripts
4 in the /scripts folder (except for those listed in IGNORE), for each engine in
5 ENGINE_LIST() from __init__.py. In other words, it runs trys to install using
6 all possible combinations of database platform and script and checks to
7 see if there are any errors. It does not check the values in the database.
8
9 """
10 from __future__ import print_function
11 from __future__ import absolute_import
12 import os
13 import sys
14 from importlib import reload
15 from retriever.lib.tools import choose_engine
16 from retriever import MODULE_LIST, ENGINE_LIST, SCRIPT_LIST
17
18 reload(sys)
19 if hasattr(sys, 'setdefaultencoding'):
20 sys.setdefaultencoding('latin-1')
21
22 MODULE_LIST = MODULE_LIST()
23 ENGINE_LIST = ENGINE_LIST()
24 if len(sys.argv) > 1:
25 ENGINE_LIST = [
26 e for e in ENGINE_LIST
27 if e.name in sys.argv[1:] or
28 e.abbreviation in sys.argv[1:]
29 ]
30 SCRIPT_LIST = SCRIPT_LIST()
31 TEST_ENGINES = {}
32 IGNORE = ["AvianBodyMass", "FIA", "Bioclim", "PRISM", "vertnet","NPN", "mammsupertree", "eBirdOD"]
33 IGNORE = [dataset.lower() for dataset in IGNORE]
34
35 for engine in ENGINE_LIST:
36 opts = {}
37 print("** %s **" % engine.name)
38 opts["engine"] = engine.abbreviation
39
40 try:
41 TEST_ENGINES[engine.abbreviation] = choose_engine(opts)
42 TEST_ENGINES[engine.abbreviation].get_input()
43 TEST_ENGINES[engine.abbreviation].get_cursor()
44 except:
45 TEST_ENGINES[engine.abbreviation] = None
46 pass
47
48 errors = []
49 for module in MODULE_LIST:
50 for (key, value) in list(TEST_ENGINES.items()):
51 if module.SCRIPT.shortname.lower() not in IGNORE:
52 if value != None:
53 print("==>", module.__name__, value.name, "..........", module.SCRIPT.shortname)
54 try:
55 module.SCRIPT.download(value)
56 except KeyboardInterrupt:
57 pass
58 except Exception as e:
59 print("ERROR.")
60 errors.append((key, module.__name__, e))
61 else:
62 errors.append((key, "No connection detected......" + module.SCRIPT.shortname))
63
64 print('')
65 if errors:
66 print("Engine, Dataset, Error")
67 for error in errors:
68 print(error)
69 else:
70 print("All tests passed")
71
[end of try_install_all.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,3 +1,13 @@
+from __future__ import print_function
+from __future__ import absolute_import
+from imp import reload
+import sys
+# sys removes the setdefaultencoding method at startup; reload to get it back
+reload(sys)
+if hasattr(sys, 'setdefaultencoding'):
+ # set default encoding to latin-1 to decode source text
+ sys.setdefaultencoding('latin-1')
+
from builtins import str
from retriever import VERSION,COPYRIGHT
from retriever.lib.repository import check_for_updates
diff --git a/try_install_all.py b/try_install_all.py
--- a/try_install_all.py
+++ b/try_install_all.py
@@ -11,7 +11,7 @@
from __future__ import absolute_import
import os
import sys
-from importlib import reload
+from imp import reload
from retriever.lib.tools import choose_engine
from retriever import MODULE_LIST, ENGINE_LIST, SCRIPT_LIST
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -1,3 +1,13 @@\n+from __future__ import print_function\n+from __future__ import absolute_import\n+from imp import reload\n+import sys\n+# sys removes the setdefaultencoding method at startup; reload to get it back\n+reload(sys)\n+if hasattr(sys, 'setdefaultencoding'):\n+ # set default encoding to latin-1 to decode source text\n+ sys.setdefaultencoding('latin-1')\n+\n from builtins import str\n from retriever import VERSION,COPYRIGHT\n from retriever.lib.repository import check_for_updates\ndiff --git a/try_install_all.py b/try_install_all.py\n--- a/try_install_all.py\n+++ b/try_install_all.py\n@@ -11,7 +11,7 @@\n from __future__ import absolute_import\n import os\n import sys\n-from importlib import reload\n+from imp import reload\n from retriever.lib.tools import choose_engine\n from retriever import MODULE_LIST, ENGINE_LIST, SCRIPT_LIST\n", "issue": "Add a code of conduct\nI keep planning on doing this and not getting around to it so I'm opening an issue. As usual we can probably just copy @ctb: https://github.com/dib-lab/khmer/blob/master/CODE_OF_CONDUCT.rst\n\n", "before_files": [{"content": "from builtins import str\nfrom retriever import VERSION,COPYRIGHT\nfrom retriever.lib.repository import check_for_updates\nfrom retriever import SCRIPT_LIST\n\n# Create the .rst file for the available datasets\ndatasetfile = open(\"datasets.rst\", \"w\")\ndatasetfile_title = \"\"\"\n==================\nDatasets Available\n==================\n\n\n\"\"\"\ncheck_for_updates()\nscript_list = SCRIPT_LIST()\n\n# write the title of dataset rst file\ndatasetfile.write(datasetfile_title)\n\n# get info from the scripts\nfor script_num, script in enumerate(script_list, start=1):\n if script.ref.strip():\n reference_link = script.ref\n elif bool(script.urls.values()):\n reference_link = list(script.urls.values())[0].rpartition('/')[0]\n else:\n reference_link = \"\"\n datasetfile.write(\"| \" + str(script_num) + \". **{}** \\n| shortname: {}\\n| reference: {}\\n\\n\".format(script.name, script.shortname, reference_link))\ndatasetfile.close()\n\nneeds_sphinx = '1.3'\n\n# Add any Sphinx extension module names here, as strings.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Data Retriever'\ncopyright = COPYRIGHT\n\nversion = release = VERSION\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'classic'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\n\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\n\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n", "path": "docs/conf.py"}, {"content": "\"\"\"Attempt to install all datasets into all database management systems\n\nThis module, when run, attempts to install datasets from all Retriever scripts\nin the /scripts folder (except for those listed in IGNORE), for each engine in\nENGINE_LIST() from __init__.py. In other words, it runs trys to install using\nall possible combinations of database platform and script and checks to\nsee if there are any errors. It does not check the values in the database.\n\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport os\nimport sys\nfrom importlib import reload\nfrom retriever.lib.tools import choose_engine\nfrom retriever import MODULE_LIST, ENGINE_LIST, SCRIPT_LIST\n\nreload(sys)\nif hasattr(sys, 'setdefaultencoding'):\n sys.setdefaultencoding('latin-1')\n\nMODULE_LIST = MODULE_LIST()\nENGINE_LIST = ENGINE_LIST()\nif len(sys.argv) > 1:\n ENGINE_LIST = [\n e for e in ENGINE_LIST\n if e.name in sys.argv[1:] or\n e.abbreviation in sys.argv[1:]\n ]\nSCRIPT_LIST = SCRIPT_LIST()\nTEST_ENGINES = {}\nIGNORE = [\"AvianBodyMass\", \"FIA\", \"Bioclim\", \"PRISM\", \"vertnet\",\"NPN\", \"mammsupertree\", \"eBirdOD\"]\nIGNORE = [dataset.lower() for dataset in IGNORE]\n\nfor engine in ENGINE_LIST:\n opts = {}\n print(\"** %s **\" % engine.name)\n opts[\"engine\"] = engine.abbreviation\n\n try:\n TEST_ENGINES[engine.abbreviation] = choose_engine(opts)\n TEST_ENGINES[engine.abbreviation].get_input()\n TEST_ENGINES[engine.abbreviation].get_cursor()\n except:\n TEST_ENGINES[engine.abbreviation] = None\n pass\n\nerrors = []\nfor module in MODULE_LIST:\n for (key, value) in list(TEST_ENGINES.items()):\n if module.SCRIPT.shortname.lower() not in IGNORE:\n if value != None:\n print(\"==>\", module.__name__, value.name, \"..........\", module.SCRIPT.shortname)\n try:\n module.SCRIPT.download(value)\n except KeyboardInterrupt:\n pass\n except Exception as e:\n print(\"ERROR.\")\n errors.append((key, module.__name__, e))\n else:\n errors.append((key, \"No connection detected......\" + module.SCRIPT.shortname))\n\nprint('')\nif errors:\n print(\"Engine, Dataset, Error\")\n for error in errors:\n print(error)\nelse:\n print(\"All tests passed\")\n", "path": "try_install_all.py"}]}
| 3,473 | 237 |
gh_patches_debug_43005
|
rasdani/github-patches
|
git_diff
|
deepset-ai__haystack-6304
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`HTMLToDocument` to add `ByteStream` metadata to Document
`HTMLToDocument` converter, when receiving a `ByteStream` from the `LinkContentFetcher` does not add the url to the metadata of the Document. The URL is in the metadata of the ByteStream
</issue>
<code>
[start of haystack/preview/components/file_converters/html.py]
1 import logging
2 from typing import List, Union
3 from pathlib import Path
4
5 from haystack.preview import Document, component
6 from haystack.preview.dataclasses import ByteStream
7 from haystack.preview.lazy_imports import LazyImport
8
9 logger = logging.getLogger(__name__)
10
11 with LazyImport("Run 'pip install boilerpy3'") as boilerpy3_import:
12 from boilerpy3 import extractors
13
14
15 @component
16 class HTMLToDocument:
17 """
18 Converts an HTML file to a Document.
19 """
20
21 def __init__(self):
22 """
23 Initializes the HTMLToDocument component.
24 """
25 boilerpy3_import.check()
26
27 @component.output_types(documents=List[Document])
28 def run(self, sources: List[Union[str, Path, ByteStream]]):
29 """
30 Converts a list of HTML files to Documents.
31
32 :param sources: List of HTML file paths or ByteStream objects.
33 :return: List of converted Documents.
34 """
35 documents = []
36 extractor = extractors.ArticleExtractor(raise_on_failure=False)
37 for source in sources:
38 try:
39 file_content = self._extract_content(source)
40 except Exception as e:
41 logger.warning("Could not read %s. Skipping it. Error: %s", source, e)
42 continue
43 try:
44 text = extractor.get_content(file_content)
45 except Exception as conversion_e: # Consider specifying the expected exception type(s) here
46 logger.warning("Failed to extract text from %s. Skipping it. Error: %s", source, conversion_e)
47 continue
48
49 document = Document(content=text)
50 documents.append(document)
51
52 return {"documents": documents}
53
54 def _extract_content(self, source: Union[str, Path, ByteStream]) -> str:
55 """
56 Extracts content from the given data source
57 :param source: The data source to extract content from.
58 :return: The extracted content.
59 """
60 if isinstance(source, (str, Path)):
61 with open(source) as text_file:
62 return text_file.read()
63 if isinstance(source, ByteStream):
64 return source.data.decode("utf-8")
65
66 raise ValueError(f"Unsupported source type: {type(source)}")
67
[end of haystack/preview/components/file_converters/html.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/haystack/preview/components/file_converters/html.py b/haystack/preview/components/file_converters/html.py
--- a/haystack/preview/components/file_converters/html.py
+++ b/haystack/preview/components/file_converters/html.py
@@ -1,6 +1,6 @@
import logging
-from typing import List, Union
from pathlib import Path
+from typing import Any, Dict, List, Optional, Union
from haystack.preview import Document, component
from haystack.preview.dataclasses import ByteStream
@@ -16,6 +16,18 @@
class HTMLToDocument:
"""
Converts an HTML file to a Document.
+
+ Usage example:
+ ```python
+ from haystack.preview.components.file_converters.html import HTMLToDocument
+
+ converter = HTMLToDocument()
+ results = converter.run(sources=["sample.html"])
+ documents = results["documents"]
+ print(documents[0].content)
+ # 'This is a text from the HTML file.'
+ ```
+
"""
def __init__(self):
@@ -25,18 +37,30 @@
boilerpy3_import.check()
@component.output_types(documents=List[Document])
- def run(self, sources: List[Union[str, Path, ByteStream]]):
+ def run(self, sources: List[Union[str, Path, ByteStream]], meta: Optional[List[Dict[str, Any]]] = None):
"""
Converts a list of HTML files to Documents.
:param sources: List of HTML file paths or ByteStream objects.
+ :param meta: Optional list of metadata to attach to the Documents.
+ The length of the list must match the number of sources. Defaults to `None`.
:return: List of converted Documents.
"""
+
documents = []
+
+ # Create metadata placeholders if not provided
+ if meta:
+ if len(sources) != len(meta):
+ raise ValueError("The length of the metadata list must match the number of sources.")
+ else:
+ meta = [{}] * len(sources)
+
extractor = extractors.ArticleExtractor(raise_on_failure=False)
- for source in sources:
+
+ for source, metadata in zip(sources, meta):
try:
- file_content = self._extract_content(source)
+ file_content, extracted_meta = self._extract_content(source)
except Exception as e:
logger.warning("Could not read %s. Skipping it. Error: %s", source, e)
continue
@@ -46,21 +70,25 @@
logger.warning("Failed to extract text from %s. Skipping it. Error: %s", source, conversion_e)
continue
- document = Document(content=text)
+ # Merge metadata received from ByteStream with supplied metadata
+ if extracted_meta:
+ # Supplied metadata overwrites metadata from ByteStream for overlapping keys.
+ metadata = {**extracted_meta, **metadata}
+ document = Document(content=text, meta=metadata)
documents.append(document)
return {"documents": documents}
- def _extract_content(self, source: Union[str, Path, ByteStream]) -> str:
+ def _extract_content(self, source: Union[str, Path, ByteStream]) -> tuple:
"""
Extracts content from the given data source
:param source: The data source to extract content from.
- :return: The extracted content.
+ :return: The extracted content and metadata.
"""
if isinstance(source, (str, Path)):
with open(source) as text_file:
- return text_file.read()
+ return (text_file.read(), None)
if isinstance(source, ByteStream):
- return source.data.decode("utf-8")
+ return (source.data.decode("utf-8"), source.metadata)
raise ValueError(f"Unsupported source type: {type(source)}")
|
{"golden_diff": "diff --git a/haystack/preview/components/file_converters/html.py b/haystack/preview/components/file_converters/html.py\n--- a/haystack/preview/components/file_converters/html.py\n+++ b/haystack/preview/components/file_converters/html.py\n@@ -1,6 +1,6 @@\n import logging\n-from typing import List, Union\n from pathlib import Path\n+from typing import Any, Dict, List, Optional, Union\n \n from haystack.preview import Document, component\n from haystack.preview.dataclasses import ByteStream\n@@ -16,6 +16,18 @@\n class HTMLToDocument:\n \"\"\"\n Converts an HTML file to a Document.\n+\n+ Usage example:\n+ ```python\n+ from haystack.preview.components.file_converters.html import HTMLToDocument\n+\n+ converter = HTMLToDocument()\n+ results = converter.run(sources=[\"sample.html\"])\n+ documents = results[\"documents\"]\n+ print(documents[0].content)\n+ # 'This is a text from the HTML file.'\n+ ```\n+\n \"\"\"\n \n def __init__(self):\n@@ -25,18 +37,30 @@\n boilerpy3_import.check()\n \n @component.output_types(documents=List[Document])\n- def run(self, sources: List[Union[str, Path, ByteStream]]):\n+ def run(self, sources: List[Union[str, Path, ByteStream]], meta: Optional[List[Dict[str, Any]]] = None):\n \"\"\"\n Converts a list of HTML files to Documents.\n \n :param sources: List of HTML file paths or ByteStream objects.\n+ :param meta: Optional list of metadata to attach to the Documents.\n+ The length of the list must match the number of sources. Defaults to `None`.\n :return: List of converted Documents.\n \"\"\"\n+\n documents = []\n+\n+ # Create metadata placeholders if not provided\n+ if meta:\n+ if len(sources) != len(meta):\n+ raise ValueError(\"The length of the metadata list must match the number of sources.\")\n+ else:\n+ meta = [{}] * len(sources)\n+\n extractor = extractors.ArticleExtractor(raise_on_failure=False)\n- for source in sources:\n+\n+ for source, metadata in zip(sources, meta):\n try:\n- file_content = self._extract_content(source)\n+ file_content, extracted_meta = self._extract_content(source)\n except Exception as e:\n logger.warning(\"Could not read %s. Skipping it. Error: %s\", source, e)\n continue\n@@ -46,21 +70,25 @@\n logger.warning(\"Failed to extract text from %s. Skipping it. Error: %s\", source, conversion_e)\n continue\n \n- document = Document(content=text)\n+ # Merge metadata received from ByteStream with supplied metadata\n+ if extracted_meta:\n+ # Supplied metadata overwrites metadata from ByteStream for overlapping keys.\n+ metadata = {**extracted_meta, **metadata}\n+ document = Document(content=text, meta=metadata)\n documents.append(document)\n \n return {\"documents\": documents}\n \n- def _extract_content(self, source: Union[str, Path, ByteStream]) -> str:\n+ def _extract_content(self, source: Union[str, Path, ByteStream]) -> tuple:\n \"\"\"\n Extracts content from the given data source\n :param source: The data source to extract content from.\n- :return: The extracted content.\n+ :return: The extracted content and metadata.\n \"\"\"\n if isinstance(source, (str, Path)):\n with open(source) as text_file:\n- return text_file.read()\n+ return (text_file.read(), None)\n if isinstance(source, ByteStream):\n- return source.data.decode(\"utf-8\")\n+ return (source.data.decode(\"utf-8\"), source.metadata)\n \n raise ValueError(f\"Unsupported source type: {type(source)}\")\n", "issue": "`HTMLToDocument` to add `ByteStream` metadata to Document \n`HTMLToDocument` converter, when receiving a `ByteStream` from the `LinkContentFetcher` does not add the url to the metadata of the Document. The URL is in the metadata of the ByteStream\r\n\n", "before_files": [{"content": "import logging\nfrom typing import List, Union\nfrom pathlib import Path\n\nfrom haystack.preview import Document, component\nfrom haystack.preview.dataclasses import ByteStream\nfrom haystack.preview.lazy_imports import LazyImport\n\nlogger = logging.getLogger(__name__)\n\nwith LazyImport(\"Run 'pip install boilerpy3'\") as boilerpy3_import:\n from boilerpy3 import extractors\n\n\n@component\nclass HTMLToDocument:\n \"\"\"\n Converts an HTML file to a Document.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initializes the HTMLToDocument component.\n \"\"\"\n boilerpy3_import.check()\n\n @component.output_types(documents=List[Document])\n def run(self, sources: List[Union[str, Path, ByteStream]]):\n \"\"\"\n Converts a list of HTML files to Documents.\n\n :param sources: List of HTML file paths or ByteStream objects.\n :return: List of converted Documents.\n \"\"\"\n documents = []\n extractor = extractors.ArticleExtractor(raise_on_failure=False)\n for source in sources:\n try:\n file_content = self._extract_content(source)\n except Exception as e:\n logger.warning(\"Could not read %s. Skipping it. Error: %s\", source, e)\n continue\n try:\n text = extractor.get_content(file_content)\n except Exception as conversion_e: # Consider specifying the expected exception type(s) here\n logger.warning(\"Failed to extract text from %s. Skipping it. Error: %s\", source, conversion_e)\n continue\n\n document = Document(content=text)\n documents.append(document)\n\n return {\"documents\": documents}\n\n def _extract_content(self, source: Union[str, Path, ByteStream]) -> str:\n \"\"\"\n Extracts content from the given data source\n :param source: The data source to extract content from.\n :return: The extracted content.\n \"\"\"\n if isinstance(source, (str, Path)):\n with open(source) as text_file:\n return text_file.read()\n if isinstance(source, ByteStream):\n return source.data.decode(\"utf-8\")\n\n raise ValueError(f\"Unsupported source type: {type(source)}\")\n", "path": "haystack/preview/components/file_converters/html.py"}]}
| 1,189 | 849 |
gh_patches_debug_23254
|
rasdani/github-patches
|
git_diff
|
facebookresearch__ParlAI-1939
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: 'dict' object has no attribute 'force_set'
**Bug description**
When I try the pretrained model of Self-feeding Chatbot, by `python projects/self_feeding/interactive.py --model-file zoo:self_feeding/hh131k_hb60k_fb60k_st1k/model --no-cuda`, error occurs: AttributeError: 'dict' object has no attribute 'force_set'
**Logs**
Please paste the command line output:
```
Enter Your Message: hello
Traceback (most recent call last):
File "projects/self_feeding/interactive.py", line 87, in <module>
interactive(parser.parse_args(print_args=False), print_parser=parser)
File "projects/self_feeding/interactive.py", line 78, in interactive
world.parley()
File "/home/han/Github/ParlAI/parlai/core/worlds.py", line 273, in parley
agents[1].observe(validate(acts[0]))
File "/home/han/Github/ParlAI/projects/self_feeding/self_feeding_agent.py", line 370, in observe
observation.force_set(
AttributeError: 'dict' object has no attribute 'force_set'
```
**Additional context**
Add any other context about the problem here. (like proxy settings, network setup, overall goals, etc.)
</issue>
<code>
[start of parlai/agents/local_human/local_human.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 """Agent does gets the local keyboard input in the act() function.
7 Example: python examples/eval_model.py -m local_human -t babi:Task1k:1 -dt valid
8 """
9
10 from parlai.core.agents import Agent
11 from parlai.core.utils import display_messages, load_cands
12
13
14 class LocalHumanAgent(Agent):
15 def add_cmdline_args(argparser):
16 """Add command-line arguments specifically for this agent."""
17 agent = argparser.add_argument_group('Local Human Arguments')
18 agent.add_argument(
19 '-fixedCands',
20 '--local-human-candidates-file',
21 default=None,
22 type=str,
23 help='File of label_candidates to send to other agent',
24 )
25 agent.add_argument(
26 '--single_turn',
27 type='bool',
28 default=False,
29 help='If on, assumes single turn episodes.',
30 )
31
32 def __init__(self, opt, shared=None):
33 super().__init__(opt)
34 self.id = 'localHuman'
35 self.episodeDone = False
36 self.fixedCands_txt = load_cands(self.opt.get('local_human_candidates_file'))
37 print("Enter [DONE] if you want to end the episode.\n")
38
39 def observe(self, msg):
40 print(
41 display_messages(
42 [msg],
43 ignore_fields=self.opt.get('display_ignore_fields', ''),
44 prettify=self.opt.get('display_prettify', False),
45 )
46 )
47
48 def act(self):
49 reply = {}
50 reply['id'] = self.getID()
51 reply_text = input("Enter Your Message: ")
52 reply_text = reply_text.replace('\\n', '\n')
53 if self.opt.get('single_turn', False):
54 reply_text += '[DONE]'
55 reply['episode_done'] = False
56 reply['label_candidates'] = self.fixedCands_txt
57 if '[DONE]' in reply_text:
58 reply['episode_done'] = True
59 self.episodeDone = True
60 reply_text = reply_text.replace('[DONE]', '')
61 reply['text'] = reply_text
62 return reply
63
64 def episode_done(self):
65 return self.episodeDone
66
[end of parlai/agents/local_human/local_human.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parlai/agents/local_human/local_human.py b/parlai/agents/local_human/local_human.py
--- a/parlai/agents/local_human/local_human.py
+++ b/parlai/agents/local_human/local_human.py
@@ -8,6 +8,7 @@
"""
from parlai.core.agents import Agent
+from parlai.core.message import Message
from parlai.core.utils import display_messages, load_cands
@@ -46,7 +47,7 @@
)
def act(self):
- reply = {}
+ reply = Message()
reply['id'] = self.getID()
reply_text = input("Enter Your Message: ")
reply_text = reply_text.replace('\\n', '\n')
@@ -55,7 +56,7 @@
reply['episode_done'] = False
reply['label_candidates'] = self.fixedCands_txt
if '[DONE]' in reply_text:
- reply['episode_done'] = True
+ reply.force_set('episode_done', True)
self.episodeDone = True
reply_text = reply_text.replace('[DONE]', '')
reply['text'] = reply_text
|
{"golden_diff": "diff --git a/parlai/agents/local_human/local_human.py b/parlai/agents/local_human/local_human.py\n--- a/parlai/agents/local_human/local_human.py\n+++ b/parlai/agents/local_human/local_human.py\n@@ -8,6 +8,7 @@\n \"\"\"\n \n from parlai.core.agents import Agent\n+from parlai.core.message import Message\n from parlai.core.utils import display_messages, load_cands\n \n \n@@ -46,7 +47,7 @@\n )\n \n def act(self):\n- reply = {}\n+ reply = Message()\n reply['id'] = self.getID()\n reply_text = input(\"Enter Your Message: \")\n reply_text = reply_text.replace('\\\\n', '\\n')\n@@ -55,7 +56,7 @@\n reply['episode_done'] = False\n reply['label_candidates'] = self.fixedCands_txt\n if '[DONE]' in reply_text:\n- reply['episode_done'] = True\n+ reply.force_set('episode_done', True)\n self.episodeDone = True\n reply_text = reply_text.replace('[DONE]', '')\n reply['text'] = reply_text\n", "issue": "AttributeError: 'dict' object has no attribute 'force_set'\n**Bug description**\r\nWhen I try the pretrained model of Self-feeding Chatbot, by `python projects/self_feeding/interactive.py --model-file zoo:self_feeding/hh131k_hb60k_fb60k_st1k/model --no-cuda`, error occurs: AttributeError: 'dict' object has no attribute 'force_set'\r\n\r\n**Logs**\r\nPlease paste the command line output:\r\n\r\n```\r\nEnter Your Message: hello\r\nTraceback (most recent call last):\r\n File \"projects/self_feeding/interactive.py\", line 87, in <module>\r\n interactive(parser.parse_args(print_args=False), print_parser=parser)\r\n File \"projects/self_feeding/interactive.py\", line 78, in interactive\r\n world.parley()\r\n File \"/home/han/Github/ParlAI/parlai/core/worlds.py\", line 273, in parley\r\n agents[1].observe(validate(acts[0]))\r\n File \"/home/han/Github/ParlAI/projects/self_feeding/self_feeding_agent.py\", line 370, in observe\r\n observation.force_set(\r\nAttributeError: 'dict' object has no attribute 'force_set'\r\n```\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. (like proxy settings, network setup, overall goals, etc.)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Agent does gets the local keyboard input in the act() function.\n Example: python examples/eval_model.py -m local_human -t babi:Task1k:1 -dt valid\n\"\"\"\n\nfrom parlai.core.agents import Agent\nfrom parlai.core.utils import display_messages, load_cands\n\n\nclass LocalHumanAgent(Agent):\n def add_cmdline_args(argparser):\n \"\"\"Add command-line arguments specifically for this agent.\"\"\"\n agent = argparser.add_argument_group('Local Human Arguments')\n agent.add_argument(\n '-fixedCands',\n '--local-human-candidates-file',\n default=None,\n type=str,\n help='File of label_candidates to send to other agent',\n )\n agent.add_argument(\n '--single_turn',\n type='bool',\n default=False,\n help='If on, assumes single turn episodes.',\n )\n\n def __init__(self, opt, shared=None):\n super().__init__(opt)\n self.id = 'localHuman'\n self.episodeDone = False\n self.fixedCands_txt = load_cands(self.opt.get('local_human_candidates_file'))\n print(\"Enter [DONE] if you want to end the episode.\\n\")\n\n def observe(self, msg):\n print(\n display_messages(\n [msg],\n ignore_fields=self.opt.get('display_ignore_fields', ''),\n prettify=self.opt.get('display_prettify', False),\n )\n )\n\n def act(self):\n reply = {}\n reply['id'] = self.getID()\n reply_text = input(\"Enter Your Message: \")\n reply_text = reply_text.replace('\\\\n', '\\n')\n if self.opt.get('single_turn', False):\n reply_text += '[DONE]'\n reply['episode_done'] = False\n reply['label_candidates'] = self.fixedCands_txt\n if '[DONE]' in reply_text:\n reply['episode_done'] = True\n self.episodeDone = True\n reply_text = reply_text.replace('[DONE]', '')\n reply['text'] = reply_text\n return reply\n\n def episode_done(self):\n return self.episodeDone\n", "path": "parlai/agents/local_human/local_human.py"}]}
| 1,459 | 256 |
gh_patches_debug_16688
|
rasdani/github-patches
|
git_diff
|
bids-standard__pybids-21
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
get_fieldmaps not compatible with multiple 'intendedfor'
currently `if path.endswith(metadata["IntendedFor"]):` assumes string, but current specs allow list as well
get_fieldmaps not compatible with multiple 'intendedfor'
currently `if path.endswith(metadata["IntendedFor"]):` assumes string, but current specs allow list as well
</issue>
<code>
[start of bids/grabbids/bids_layout.py]
1 import os
2 import re
3 import json
4
5 from itertools import combinations
6 from os.path import dirname
7 from os.path import realpath
8 from os.path import join as pathjoin
9 from os.path import split as pathsplit
10
11 from grabbit import Layout
12
13 __all__ = ['BIDSLayout']
14
15
16 class BIDSLayout(Layout):
17 def __init__(self, path, config=None):
18 if config is None:
19 root = dirname(realpath(__file__))
20 config = pathjoin(root, 'config', 'bids.json')
21 super(BIDSLayout, self).__init__(path, config, dynamic_getters=True)
22
23 def get_metadata(self, path):
24 sidecarJSON = path.replace(".nii.gz", ".json").replace(".nii", ".json")
25 path_components = pathsplit(sidecarJSON)
26 filename_components = path_components[-1].split("_")
27 ses = None
28 suffix = filename_components[-1]
29
30 sub = filename_components[0]
31 keyword_components = filename_components[1:-1]
32 if filename_components[1][:3] == "ses":
33 ses = filename_components[1]
34 keyword_components = filename_components[2:-1]
35
36 potentialJSONs = []
37 for prefixes, midlayer, conditional in ( # Levels
38 (tuple(), tuple(), True), # top
39 ((sub,), tuple(), True), # subject
40 ((sub, ), (pathsplit(path_components[-2])[-1],), True),
41 ((sub, ses), tuple(), ses), # session
42 ((sub, ses), (pathsplit(path_components[-2])[-1],), ses)
43 ):
44 if not conditional:
45 continue
46 for k in range(len(keyword_components) + 1):
47 for components in combinations(keyword_components, k):
48 potentialJSONs.append(
49 pathjoin(
50 self.root,
51 *(prefixes + midlayer +
52 ("_".join(prefixes + components + (suffix,)),))))
53
54 merged_param_dict = {}
55 for json_file_path in potentialJSONs:
56 if os.path.exists(json_file_path):
57 param_dict = json.load(open(json_file_path, "r"))
58 merged_param_dict.update(param_dict)
59
60 return merged_param_dict
61
62 def get_fieldmap(self, path):
63 sub = os.path.split(path)[1].split("_")[0].split("sub-")[1]
64 fieldmap_set = {}
65 for file in self.get(subject=sub,
66 type='(phase1|phase2|phasediff|epi|fieldmap)',
67 extensions=['nii.gz', 'nii']):
68 metadata = self.get_metadata(file.filename)
69 if metadata and "IntendedFor" in metadata.keys():
70 if path.endswith(metadata["IntendedFor"]):
71 if file.type == "phasediff":
72 fieldmap_set = {"phasediff": file.filename,
73 "magnitude1": file.filename.replace(
74 "phasediff", "magnitude1"),
75 "magnitude2": file.filename.replace(
76 "phasediff", "magnitude2"),
77 "type": "phasediff"}
78 break
79 elif file.type == "phase1":
80 fieldmap_set["phase1"] = file.filename
81 fieldmap_set["magnitude1"] = \
82 file.filename.replace("phase1", "magnitude1")
83 fieldmap_set["type"] = "phase"
84 elif file.type == "phase2":
85 fieldmap_set["phase2"] = file.filename
86 fieldmap_set["magnitude2"] = \
87 file.filename.replace("phase2", "magnitude2")
88 fieldmap_set["type"] = "phase"
89 elif file.type == "epi":
90 if "epi" not in fieldmap_set.keys():
91 fieldmap_set["epi"] = []
92 fieldmap_set["epi"].append(file.filename)
93 fieldmap_set["type"] = "epi"
94 elif file.type == "fieldmap":
95 fieldmap_set["fieldmap"] = file.filename
96 fieldmap_set["magnitude"] = \
97 file.filename.replace("fieldmap", "magnitude")
98 fieldmap_set["type"] = "fieldmap"
99 return fieldmap_set
100
101 def find_match(self, target, source=None):
102
103 # Try to take the easy way out
104 if source is not None:
105 _target = source.split('.')[0] + '.' + target
106 if os.path.exists(_target):
107 return target
108
109 if target in list(self.entities.keys()):
110 candidates = list(self.entities[target].files.keys())
111 else:
112 candidates = []
113
114 for root, directories, filenames in os.walk(self.root):
115 for f in filenames:
116 if re.search(target + '$', f):
117 if os.path.sep == "\\":
118 f = f.replace("\\", "\\\\")
119 candidates.append(f)
120
121 if source is None:
122 return candidates
123
124 # Walk up the file hierarchy from source, find first match
125 if not os.path.exists(source):
126 raise OSError("The file '%s' doesn't exist." % source)
127 elif not source.startswith(self.root):
128 raise ValueError("The file '%s' is not contained "
129 "within the current project "
130 "directory (%s)." % (source, self.root))
131 rel = os.path.relpath(dirname(source), self.root)
132 sep = os.path.sep
133 chunks = rel.split(sep)
134 n_chunks = len(chunks)
135 for i in range(n_chunks, -1, -1):
136 path = pathjoin(self.root, *chunks[:i])
137 patt = path + '\%s[^\%s]+$' % (sep, sep)
138 if sep == "\\":
139 patt = path + '\\[^\\]+$'
140 patt = patt.replace("\\", "\\\\")
141 matches = [x for x in candidates if re.search(patt, x)]
142 if matches:
143 if len(matches) == 1:
144 return matches[0]
145 else:
146 raise ValueError("Ambiguous target: more than one "
147 "candidate file found in "
148 "directory '%s'." % path)
149 return None
150
[end of bids/grabbids/bids_layout.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bids/grabbids/bids_layout.py b/bids/grabbids/bids_layout.py
--- a/bids/grabbids/bids_layout.py
+++ b/bids/grabbids/bids_layout.py
@@ -67,7 +67,11 @@
extensions=['nii.gz', 'nii']):
metadata = self.get_metadata(file.filename)
if metadata and "IntendedFor" in metadata.keys():
- if path.endswith(metadata["IntendedFor"]):
+ if isinstance(metadata["IntendedFor"], list):
+ intended_for = metadata["IntendedFor"]
+ else:
+ intended_for = [metadata["IntendedFor"]]
+ if any([path.endswith(suffix) for suffix in intended_for]):
if file.type == "phasediff":
fieldmap_set = {"phasediff": file.filename,
"magnitude1": file.filename.replace(
|
{"golden_diff": "diff --git a/bids/grabbids/bids_layout.py b/bids/grabbids/bids_layout.py\n--- a/bids/grabbids/bids_layout.py\n+++ b/bids/grabbids/bids_layout.py\n@@ -67,7 +67,11 @@\n extensions=['nii.gz', 'nii']):\n metadata = self.get_metadata(file.filename)\n if metadata and \"IntendedFor\" in metadata.keys():\n- if path.endswith(metadata[\"IntendedFor\"]):\n+ if isinstance(metadata[\"IntendedFor\"], list):\n+ intended_for = metadata[\"IntendedFor\"]\n+ else:\n+ intended_for = [metadata[\"IntendedFor\"]]\n+ if any([path.endswith(suffix) for suffix in intended_for]):\n if file.type == \"phasediff\":\n fieldmap_set = {\"phasediff\": file.filename,\n \"magnitude1\": file.filename.replace(\n", "issue": "get_fieldmaps not compatible with multiple 'intendedfor'\ncurrently `if path.endswith(metadata[\"IntendedFor\"]):` assumes string, but current specs allow list as well\n\nget_fieldmaps not compatible with multiple 'intendedfor'\ncurrently `if path.endswith(metadata[\"IntendedFor\"]):` assumes string, but current specs allow list as well\n\n", "before_files": [{"content": "import os\nimport re\nimport json\n\nfrom itertools import combinations\nfrom os.path import dirname\nfrom os.path import realpath\nfrom os.path import join as pathjoin\nfrom os.path import split as pathsplit\n\nfrom grabbit import Layout\n\n__all__ = ['BIDSLayout']\n\n\nclass BIDSLayout(Layout):\n def __init__(self, path, config=None):\n if config is None:\n root = dirname(realpath(__file__))\n config = pathjoin(root, 'config', 'bids.json')\n super(BIDSLayout, self).__init__(path, config, dynamic_getters=True)\n\n def get_metadata(self, path):\n sidecarJSON = path.replace(\".nii.gz\", \".json\").replace(\".nii\", \".json\")\n path_components = pathsplit(sidecarJSON)\n filename_components = path_components[-1].split(\"_\")\n ses = None\n suffix = filename_components[-1]\n\n sub = filename_components[0]\n keyword_components = filename_components[1:-1]\n if filename_components[1][:3] == \"ses\":\n ses = filename_components[1]\n keyword_components = filename_components[2:-1]\n\n potentialJSONs = []\n for prefixes, midlayer, conditional in ( # Levels\n (tuple(), tuple(), True), # top\n ((sub,), tuple(), True), # subject\n ((sub, ), (pathsplit(path_components[-2])[-1],), True),\n ((sub, ses), tuple(), ses), # session\n ((sub, ses), (pathsplit(path_components[-2])[-1],), ses)\n ):\n if not conditional:\n continue\n for k in range(len(keyword_components) + 1):\n for components in combinations(keyword_components, k):\n potentialJSONs.append(\n pathjoin(\n self.root,\n *(prefixes + midlayer +\n (\"_\".join(prefixes + components + (suffix,)),))))\n\n merged_param_dict = {}\n for json_file_path in potentialJSONs:\n if os.path.exists(json_file_path):\n param_dict = json.load(open(json_file_path, \"r\"))\n merged_param_dict.update(param_dict)\n\n return merged_param_dict\n\n def get_fieldmap(self, path):\n sub = os.path.split(path)[1].split(\"_\")[0].split(\"sub-\")[1]\n fieldmap_set = {}\n for file in self.get(subject=sub,\n type='(phase1|phase2|phasediff|epi|fieldmap)',\n extensions=['nii.gz', 'nii']):\n metadata = self.get_metadata(file.filename)\n if metadata and \"IntendedFor\" in metadata.keys():\n if path.endswith(metadata[\"IntendedFor\"]):\n if file.type == \"phasediff\":\n fieldmap_set = {\"phasediff\": file.filename,\n \"magnitude1\": file.filename.replace(\n \"phasediff\", \"magnitude1\"),\n \"magnitude2\": file.filename.replace(\n \"phasediff\", \"magnitude2\"),\n \"type\": \"phasediff\"}\n break\n elif file.type == \"phase1\":\n fieldmap_set[\"phase1\"] = file.filename\n fieldmap_set[\"magnitude1\"] = \\\n file.filename.replace(\"phase1\", \"magnitude1\")\n fieldmap_set[\"type\"] = \"phase\"\n elif file.type == \"phase2\":\n fieldmap_set[\"phase2\"] = file.filename\n fieldmap_set[\"magnitude2\"] = \\\n file.filename.replace(\"phase2\", \"magnitude2\")\n fieldmap_set[\"type\"] = \"phase\"\n elif file.type == \"epi\":\n if \"epi\" not in fieldmap_set.keys():\n fieldmap_set[\"epi\"] = []\n fieldmap_set[\"epi\"].append(file.filename)\n fieldmap_set[\"type\"] = \"epi\"\n elif file.type == \"fieldmap\":\n fieldmap_set[\"fieldmap\"] = file.filename\n fieldmap_set[\"magnitude\"] = \\\n file.filename.replace(\"fieldmap\", \"magnitude\")\n fieldmap_set[\"type\"] = \"fieldmap\"\n return fieldmap_set\n\n def find_match(self, target, source=None):\n\n # Try to take the easy way out\n if source is not None:\n _target = source.split('.')[0] + '.' + target\n if os.path.exists(_target):\n return target\n\n if target in list(self.entities.keys()):\n candidates = list(self.entities[target].files.keys())\n else:\n candidates = []\n\n for root, directories, filenames in os.walk(self.root):\n for f in filenames:\n if re.search(target + '$', f):\n if os.path.sep == \"\\\\\":\n f = f.replace(\"\\\\\", \"\\\\\\\\\")\n candidates.append(f)\n\n if source is None:\n return candidates\n\n # Walk up the file hierarchy from source, find first match\n if not os.path.exists(source):\n raise OSError(\"The file '%s' doesn't exist.\" % source)\n elif not source.startswith(self.root):\n raise ValueError(\"The file '%s' is not contained \"\n \"within the current project \"\n \"directory (%s).\" % (source, self.root))\n rel = os.path.relpath(dirname(source), self.root)\n sep = os.path.sep\n chunks = rel.split(sep)\n n_chunks = len(chunks)\n for i in range(n_chunks, -1, -1):\n path = pathjoin(self.root, *chunks[:i])\n patt = path + '\\%s[^\\%s]+$' % (sep, sep)\n if sep == \"\\\\\":\n patt = path + '\\\\[^\\\\]+$'\n patt = patt.replace(\"\\\\\", \"\\\\\\\\\")\n matches = [x for x in candidates if re.search(patt, x)]\n if matches:\n if len(matches) == 1:\n return matches[0]\n else:\n raise ValueError(\"Ambiguous target: more than one \"\n \"candidate file found in \"\n \"directory '%s'.\" % path)\n return None\n", "path": "bids/grabbids/bids_layout.py"}]}
| 2,244 | 192 |
gh_patches_debug_628
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-1633
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of tools/build_docs.py]
1 from __future__ import annotations
2
3 import argparse
4 import importlib.metadata
5 import json
6 import os
7 import shutil
8 import subprocess
9 from contextlib import contextmanager
10 from pathlib import Path
11 from typing import TypedDict
12
13 REDIRECT_TEMPLATE = """
14 <!DOCTYPE HTML>
15 <html lang="en-US">
16 <head>
17 <title>Page Redirection</title>
18 <meta charset="UTF-8">
19 <meta http-equiv="refresh" content="0; url={target}">
20 <script type="text/javascript">window.location.href = "{target}"</script>
21 </head>
22 <body>
23 You are being redirected. If this does not work, click <a href='{target}'>this link</a>
24 </body>
25 </html>
26 """
27
28 parser = argparse.ArgumentParser()
29 parser.add_argument("--version", required=False)
30 parser.add_argument("--ignore-missing-examples-output", action="store_true", default=False)
31 parser.add_argument("output")
32
33
34 class VersionSpec(TypedDict):
35 versions: list[str]
36 latest: str
37
38
39 @contextmanager
40 def checkout(branch: str) -> None:
41 subprocess.run(["git", "checkout", branch], check=True) # noqa: S603 S607
42 yield
43 subprocess.run(["git", "checkout", "-"], check=True) # noqa: S603 S607
44
45
46 def load_version_spec() -> VersionSpec:
47 versions_file = Path("docs/_static/versions.json")
48 if versions_file.exists():
49 return json.loads(versions_file.read_text())
50 return {"versions": [], "latest": ""}
51
52
53 def build(output_dir: str, version: str | None, ignore_missing_output: bool) -> None:
54 if version is None:
55 version = importlib.metadata.version("litestar").rsplit(".")[0]
56 else:
57 os.environ["_LITESTAR_DOCS_BUILD_VERSION"] = version
58
59 if ignore_missing_output:
60 os.environ["_LITESTAR_DOCS_IGNORE_MISSING_EXAMPLE_OUTPUT"] = "1"
61
62 subprocess.run(["make", "docs"], check=True) # noqa: S603 S607
63
64 output_dir = Path(output_dir)
65 output_dir.mkdir()
66 output_dir.joinpath(".nojekyll").touch(exist_ok=True)
67
68 version_spec = load_version_spec()
69 is_latest = version == version_spec["latest"]
70
71 docs_src_path = Path("docs/_build/html")
72
73 output_dir.joinpath("index.html").write_text(REDIRECT_TEMPLATE.format(target="latest"))
74
75 if is_latest:
76 shutil.copytree(docs_src_path, output_dir / "latest", dirs_exist_ok=True)
77 shutil.copytree(docs_src_path, output_dir / version, dirs_exist_ok=True)
78
79 # copy existing versions into our output dir to preserve them when cleaning the branch
80 with checkout("gh-pages"):
81 for other_version in [*version_spec["versions"], "latest"]:
82 other_version_path = Path(other_version)
83 other_version_target_path = output_dir / other_version
84 if other_version_path.exists() and not other_version_target_path.exists():
85 shutil.copytree(other_version_path, other_version_target_path)
86
87
88 def main() -> None:
89 args = parser.parse_args()
90 build(
91 output_dir=args.output,
92 version=args.version,
93 ignore_missing_output=args.ignore_missing_output,
94 )
95
96
97 if __name__ == "__main__":
98 main()
99
[end of tools/build_docs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tools/build_docs.py b/tools/build_docs.py
--- a/tools/build_docs.py
+++ b/tools/build_docs.py
@@ -90,7 +90,7 @@
build(
output_dir=args.output,
version=args.version,
- ignore_missing_output=args.ignore_missing_output,
+ ignore_missing_output=args.ignore_missing_examples_output,
)
|
{"golden_diff": "diff --git a/tools/build_docs.py b/tools/build_docs.py\n--- a/tools/build_docs.py\n+++ b/tools/build_docs.py\n@@ -90,7 +90,7 @@\n build(\n output_dir=args.output,\n version=args.version,\n- ignore_missing_output=args.ignore_missing_output,\n+ ignore_missing_output=args.ignore_missing_examples_output,\n )\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nimport argparse\nimport importlib.metadata\nimport json\nimport os\nimport shutil\nimport subprocess\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import TypedDict\n\nREDIRECT_TEMPLATE = \"\"\"\n<!DOCTYPE HTML>\n<html lang=\"en-US\">\n <head>\n <title>Page Redirection</title>\n <meta charset=\"UTF-8\">\n <meta http-equiv=\"refresh\" content=\"0; url={target}\">\n <script type=\"text/javascript\">window.location.href = \"{target}\"</script>\n </head>\n <body>\n You are being redirected. If this does not work, click <a href='{target}'>this link</a>\n </body>\n</html>\n\"\"\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--version\", required=False)\nparser.add_argument(\"--ignore-missing-examples-output\", action=\"store_true\", default=False)\nparser.add_argument(\"output\")\n\n\nclass VersionSpec(TypedDict):\n versions: list[str]\n latest: str\n\n\n@contextmanager\ndef checkout(branch: str) -> None:\n subprocess.run([\"git\", \"checkout\", branch], check=True) # noqa: S603 S607\n yield\n subprocess.run([\"git\", \"checkout\", \"-\"], check=True) # noqa: S603 S607\n\n\ndef load_version_spec() -> VersionSpec:\n versions_file = Path(\"docs/_static/versions.json\")\n if versions_file.exists():\n return json.loads(versions_file.read_text())\n return {\"versions\": [], \"latest\": \"\"}\n\n\ndef build(output_dir: str, version: str | None, ignore_missing_output: bool) -> None:\n if version is None:\n version = importlib.metadata.version(\"litestar\").rsplit(\".\")[0]\n else:\n os.environ[\"_LITESTAR_DOCS_BUILD_VERSION\"] = version\n\n if ignore_missing_output:\n os.environ[\"_LITESTAR_DOCS_IGNORE_MISSING_EXAMPLE_OUTPUT\"] = \"1\"\n\n subprocess.run([\"make\", \"docs\"], check=True) # noqa: S603 S607\n\n output_dir = Path(output_dir)\n output_dir.mkdir()\n output_dir.joinpath(\".nojekyll\").touch(exist_ok=True)\n\n version_spec = load_version_spec()\n is_latest = version == version_spec[\"latest\"]\n\n docs_src_path = Path(\"docs/_build/html\")\n\n output_dir.joinpath(\"index.html\").write_text(REDIRECT_TEMPLATE.format(target=\"latest\"))\n\n if is_latest:\n shutil.copytree(docs_src_path, output_dir / \"latest\", dirs_exist_ok=True)\n shutil.copytree(docs_src_path, output_dir / version, dirs_exist_ok=True)\n\n # copy existing versions into our output dir to preserve them when cleaning the branch\n with checkout(\"gh-pages\"):\n for other_version in [*version_spec[\"versions\"], \"latest\"]:\n other_version_path = Path(other_version)\n other_version_target_path = output_dir / other_version\n if other_version_path.exists() and not other_version_target_path.exists():\n shutil.copytree(other_version_path, other_version_target_path)\n\n\ndef main() -> None:\n args = parser.parse_args()\n build(\n output_dir=args.output,\n version=args.version,\n ignore_missing_output=args.ignore_missing_output,\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/build_docs.py"}]}
| 1,619 | 77 |
gh_patches_debug_29896
|
rasdani/github-patches
|
git_diff
|
bentoml__BentoML-932
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add python version mismatch warning
**Is your feature request related to a problem? Please describe.**
Currently, when the user saves a BentoML bundle, the `bentoml.yml` file under the bundle directory contains the Python version in which the bundle was created.
When deploying with Docker, bentoml ensures that the python version matches the saved bundle python version. Although when the user is using a custom docker image, or when the user is loading directly with the `bentoml.load` API, there's no guarantee the python version matches.
This can cause issues in some cases, especially where PickleArtifact or PyTorchArtifact is being used.
We should add a warning when loading a BentoML saved bundle if the current Python version does not match the saved bundle's python version.
**Describe the solution you'd like**
Add a warning to tell the user about Python version mismatch
**Describe alternatives you've considered**
Instead of warning, raise an exception.
We can still allow user to specifically set a config to allow loading BentoML bundle of a different python version, e.g. 'BENTOML__CORE__PYPASS_BUNDLE_PY_VERSION_CHECK=true'
**Additional context**
n/a
</issue>
<code>
[start of bentoml/saved_bundle/config.py]
1 # Copyright 2019 Atalaya Tech, Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import logging
17 from datetime import datetime
18 from pathlib import Path
19
20 from ruamel.yaml import YAML
21
22 from bentoml import __version__ as BENTOML_VERSION
23 from bentoml import config
24 from bentoml.configuration import get_bentoml_deploy_version
25 from bentoml.utils import dump_to_yaml_str
26 from bentoml.exceptions import BentoMLConfigException
27
28 BENTOML_CONFIG_YAML_TEPMLATE = """\
29 version: {bentoml_version}
30 kind: {kind}
31 metadata:
32 created_at: {created_at}
33 """
34
35 logger = logging.getLogger(__name__)
36 DEFAULT_MAX_LATENCY = config("marshal_server").getint("default_max_latency")
37 DEFAULT_MAX_BATCH_SIZE = config("marshal_server").getint("default_max_batch_size")
38
39
40 def _get_apis_list(bento_service):
41 result = []
42 for api in bento_service.inference_apis:
43 api_obj = {
44 "name": api.name,
45 "docs": api.doc,
46 "input_type": api.handler.__class__.__name__,
47 "output_type": api.handler.output_adapter.__class__.__name__,
48 "mb_max_batch_size": api.mb_max_batch_size,
49 "mb_max_latency": api.mb_max_latency,
50 }
51 if api.handler.config:
52 api_obj["input_config"] = api.handler.config
53 if api.output_adapter.config:
54 api_obj["output_config"] = api.output_adapter.config
55 result.append(api_obj)
56 return result
57
58
59 def _get_artifacts_list(bento_service):
60 result = []
61 for artifact_name, artifact in bento_service.artifacts.items():
62 result.append(
63 {'name': artifact_name, 'artifact_type': artifact.__class__.__name__}
64 )
65 return result
66
67
68 class SavedBundleConfig(object):
69 def __init__(self, bento_service=None, kind="BentoService"):
70 self.kind = kind
71 self._yaml = YAML()
72 self._yaml.default_flow_style = False
73 self.config = self._yaml.load(
74 BENTOML_CONFIG_YAML_TEPMLATE.format(
75 kind=self.kind,
76 bentoml_version=get_bentoml_deploy_version(),
77 created_at=str(datetime.utcnow()),
78 )
79 )
80
81 if bento_service is not None:
82 self.config["metadata"].update(
83 {
84 "service_name": bento_service.name,
85 "service_version": bento_service.version,
86 }
87 )
88 self.config["env"] = bento_service.env.to_dict()
89 self.config['apis'] = _get_apis_list(bento_service)
90 self.config['artifacts'] = _get_artifacts_list(bento_service)
91
92 def write_to_path(self, path, filename="bentoml.yml"):
93 return self._yaml.dump(self.config, Path(os.path.join(path, filename)))
94
95 @classmethod
96 def load(cls, filepath):
97 conf = cls()
98 with open(filepath, "rb") as config_file:
99 yml_content = config_file.read()
100 conf.config = conf._yaml.load(yml_content)
101 ver = str(conf["version"])
102
103 if ver != BENTOML_VERSION:
104 msg = (
105 "Saved BentoService bundle version mismatch: loading BentoService "
106 "bundle create with BentoML version {}, but loading from BentoML "
107 "version {}".format(conf["version"], BENTOML_VERSION)
108 )
109
110 # If major version is different, then there could be incompatible API
111 # changes. Raise error in this case.
112 if ver.split(".")[0] != BENTOML_VERSION.split(".")[0]:
113 if not BENTOML_VERSION.startswith('0+untagged'):
114 raise BentoMLConfigException(msg)
115 else:
116 logger.warning(msg)
117 else: # Otherwise just show a warning.
118 logger.warning(msg)
119
120 return conf
121
122 def get_bento_service_metadata_pb(self):
123 from bentoml.yatai.proto.repository_pb2 import BentoServiceMetadata
124
125 bento_service_metadata = BentoServiceMetadata()
126 bento_service_metadata.name = self.config["metadata"]["service_name"]
127 bento_service_metadata.version = self.config["metadata"]["service_version"]
128 bento_service_metadata.created_at.FromDatetime(
129 self.config["metadata"]["created_at"]
130 )
131
132 if "env" in self.config:
133 if "setup_sh" in self.config["env"]:
134 bento_service_metadata.env.setup_sh = self.config["env"]["setup_sh"]
135
136 if "conda_env" in self.config["env"]:
137 bento_service_metadata.env.conda_env = dump_to_yaml_str(
138 self.config["env"]["conda_env"]
139 )
140
141 if "pip_dependencies" in self.config["env"]:
142 bento_service_metadata.env.pip_dependencies = "\n".join(
143 self.config["env"]["pip_dependencies"]
144 )
145 if "python_version" in self.config["env"]:
146 bento_service_metadata.env.python_version = self.config["env"][
147 "python_version"
148 ]
149 if "docker_base_image" in self.config["env"]:
150 bento_service_metadata.env.docker_base_image = self.config["env"][
151 "docker_base_image"
152 ]
153
154 if "apis" in self.config:
155 for api_config in self.config["apis"]:
156 if 'handler_type' in api_config:
157 # Convert handler type to input type for saved bundle created
158 # before version 0.8.0
159 input_type = api_config.get('handler_type')
160 elif 'input_type' in api_config:
161 input_type = api_config.get('input_type')
162 else:
163 input_type = "unknown"
164
165 if 'output_type' in api_config:
166 output_type = api_config.get('output_type')
167 else:
168 output_type = "DefaultOutput"
169
170 api_metadata = BentoServiceMetadata.BentoServiceApi(
171 name=api_config["name"],
172 docs=api_config["docs"],
173 input_type=input_type,
174 output_type=output_type,
175 )
176 if "handler_config" in api_config:
177 # Supports viewing API input config info for saved bundle created
178 # before version 0.8.0
179 for k, v in api_config["handler_config"].items():
180 if k in {'mb_max_latency', 'mb_max_batch_size'}:
181 setattr(api_metadata, k, v)
182 else:
183 api_metadata.input_config[k] = v
184 else:
185 if 'mb_max_latency' in api_config:
186 api_metadata.mb_max_latency = api_config["mb_max_latency"]
187 else:
188 api_metadata.mb_max_latency = DEFAULT_MAX_LATENCY
189
190 if 'mb_max_batch_size' in api_config:
191 api_metadata.mb_max_batch_size = api_config["mb_max_batch_size"]
192 else:
193 api_metadata.mb_max_batch_size = DEFAULT_MAX_BATCH_SIZE
194
195 if "input_config" in api_config:
196 for k, v in api_config["input_config"].items():
197 api_metadata.input_config[k] = v
198
199 if "output_config" in api_config:
200 for k, v in api_config["output_config"].items():
201 api_metadata.output_config[k] = v
202 bento_service_metadata.apis.extend([api_metadata])
203
204 if "artifacts" in self.config:
205 for artifact_config in self.config["artifacts"]:
206 artifact_metadata = BentoServiceMetadata.BentoArtifact()
207 if "name" in artifact_config:
208 artifact_metadata.name = artifact_config["name"]
209 if "artifact_type" in artifact_config:
210 artifact_metadata.artifact_type = artifact_config["artifact_type"]
211 bento_service_metadata.artifacts.extend([artifact_metadata])
212
213 return bento_service_metadata
214
215 def __getitem__(self, item):
216 return self.config[item]
217
218 def __setitem__(self, key, value):
219 self.config[key] = value
220
221 def __contains__(self, item):
222 return item in self.config
223
[end of bentoml/saved_bundle/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bentoml/saved_bundle/config.py b/bentoml/saved_bundle/config.py
--- a/bentoml/saved_bundle/config.py
+++ b/bentoml/saved_bundle/config.py
@@ -16,6 +16,7 @@
import logging
from datetime import datetime
from pathlib import Path
+from sys import version_info
from ruamel.yaml import YAML
@@ -35,6 +36,7 @@
logger = logging.getLogger(__name__)
DEFAULT_MAX_LATENCY = config("marshal_server").getint("default_max_latency")
DEFAULT_MAX_BATCH_SIZE = config("marshal_server").getint("default_max_batch_size")
+PYTHON_VERSION = f"{version_info.major}.{version_info.minor}.{version_info.micro}"
def _get_apis_list(bento_service):
@@ -99,6 +101,7 @@
yml_content = config_file.read()
conf.config = conf._yaml.load(yml_content)
ver = str(conf["version"])
+ py_ver = conf.config["env"]["python_version"]
if ver != BENTOML_VERSION:
msg = (
@@ -117,6 +120,13 @@
else: # Otherwise just show a warning.
logger.warning(msg)
+ if py_ver != PYTHON_VERSION:
+ logger.warning(
+ f"Saved BentoService Python version mismatch: loading "
+ f"BentoService bundle created with Python version {py_ver}, "
+ f"but current environment version is {PYTHON_VERSION}."
+ )
+
return conf
def get_bento_service_metadata_pb(self):
|
{"golden_diff": "diff --git a/bentoml/saved_bundle/config.py b/bentoml/saved_bundle/config.py\n--- a/bentoml/saved_bundle/config.py\n+++ b/bentoml/saved_bundle/config.py\n@@ -16,6 +16,7 @@\n import logging\n from datetime import datetime\n from pathlib import Path\n+from sys import version_info\n \n from ruamel.yaml import YAML\n \n@@ -35,6 +36,7 @@\n logger = logging.getLogger(__name__)\n DEFAULT_MAX_LATENCY = config(\"marshal_server\").getint(\"default_max_latency\")\n DEFAULT_MAX_BATCH_SIZE = config(\"marshal_server\").getint(\"default_max_batch_size\")\n+PYTHON_VERSION = f\"{version_info.major}.{version_info.minor}.{version_info.micro}\"\n \n \n def _get_apis_list(bento_service):\n@@ -99,6 +101,7 @@\n yml_content = config_file.read()\n conf.config = conf._yaml.load(yml_content)\n ver = str(conf[\"version\"])\n+ py_ver = conf.config[\"env\"][\"python_version\"]\n \n if ver != BENTOML_VERSION:\n msg = (\n@@ -117,6 +120,13 @@\n else: # Otherwise just show a warning.\n logger.warning(msg)\n \n+ if py_ver != PYTHON_VERSION:\n+ logger.warning(\n+ f\"Saved BentoService Python version mismatch: loading \"\n+ f\"BentoService bundle created with Python version {py_ver}, \"\n+ f\"but current environment version is {PYTHON_VERSION}.\"\n+ )\n+\n return conf\n \n def get_bento_service_metadata_pb(self):\n", "issue": "Add python version mismatch warning\n**Is your feature request related to a problem? Please describe.**\r\n\r\nCurrently, when the user saves a BentoML bundle, the `bentoml.yml` file under the bundle directory contains the Python version in which the bundle was created.\r\n\r\nWhen deploying with Docker, bentoml ensures that the python version matches the saved bundle python version. Although when the user is using a custom docker image, or when the user is loading directly with the `bentoml.load` API, there's no guarantee the python version matches.\r\n\r\nThis can cause issues in some cases, especially where PickleArtifact or PyTorchArtifact is being used.\r\n\r\nWe should add a warning when loading a BentoML saved bundle if the current Python version does not match the saved bundle's python version.\r\n\r\n\r\n**Describe the solution you'd like**\r\nAdd a warning to tell the user about Python version mismatch\r\n\r\n**Describe alternatives you've considered**\r\nInstead of warning, raise an exception. \r\n\r\nWe can still allow user to specifically set a config to allow loading BentoML bundle of a different python version, e.g. 'BENTOML__CORE__PYPASS_BUNDLE_PY_VERSION_CHECK=true' \r\n\r\n**Additional context**\r\nn/a\n", "before_files": [{"content": "# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport logging\nfrom datetime import datetime\nfrom pathlib import Path\n\nfrom ruamel.yaml import YAML\n\nfrom bentoml import __version__ as BENTOML_VERSION\nfrom bentoml import config\nfrom bentoml.configuration import get_bentoml_deploy_version\nfrom bentoml.utils import dump_to_yaml_str\nfrom bentoml.exceptions import BentoMLConfigException\n\nBENTOML_CONFIG_YAML_TEPMLATE = \"\"\"\\\nversion: {bentoml_version}\nkind: {kind}\nmetadata:\n created_at: {created_at}\n\"\"\"\n\nlogger = logging.getLogger(__name__)\nDEFAULT_MAX_LATENCY = config(\"marshal_server\").getint(\"default_max_latency\")\nDEFAULT_MAX_BATCH_SIZE = config(\"marshal_server\").getint(\"default_max_batch_size\")\n\n\ndef _get_apis_list(bento_service):\n result = []\n for api in bento_service.inference_apis:\n api_obj = {\n \"name\": api.name,\n \"docs\": api.doc,\n \"input_type\": api.handler.__class__.__name__,\n \"output_type\": api.handler.output_adapter.__class__.__name__,\n \"mb_max_batch_size\": api.mb_max_batch_size,\n \"mb_max_latency\": api.mb_max_latency,\n }\n if api.handler.config:\n api_obj[\"input_config\"] = api.handler.config\n if api.output_adapter.config:\n api_obj[\"output_config\"] = api.output_adapter.config\n result.append(api_obj)\n return result\n\n\ndef _get_artifacts_list(bento_service):\n result = []\n for artifact_name, artifact in bento_service.artifacts.items():\n result.append(\n {'name': artifact_name, 'artifact_type': artifact.__class__.__name__}\n )\n return result\n\n\nclass SavedBundleConfig(object):\n def __init__(self, bento_service=None, kind=\"BentoService\"):\n self.kind = kind\n self._yaml = YAML()\n self._yaml.default_flow_style = False\n self.config = self._yaml.load(\n BENTOML_CONFIG_YAML_TEPMLATE.format(\n kind=self.kind,\n bentoml_version=get_bentoml_deploy_version(),\n created_at=str(datetime.utcnow()),\n )\n )\n\n if bento_service is not None:\n self.config[\"metadata\"].update(\n {\n \"service_name\": bento_service.name,\n \"service_version\": bento_service.version,\n }\n )\n self.config[\"env\"] = bento_service.env.to_dict()\n self.config['apis'] = _get_apis_list(bento_service)\n self.config['artifacts'] = _get_artifacts_list(bento_service)\n\n def write_to_path(self, path, filename=\"bentoml.yml\"):\n return self._yaml.dump(self.config, Path(os.path.join(path, filename)))\n\n @classmethod\n def load(cls, filepath):\n conf = cls()\n with open(filepath, \"rb\") as config_file:\n yml_content = config_file.read()\n conf.config = conf._yaml.load(yml_content)\n ver = str(conf[\"version\"])\n\n if ver != BENTOML_VERSION:\n msg = (\n \"Saved BentoService bundle version mismatch: loading BentoService \"\n \"bundle create with BentoML version {}, but loading from BentoML \"\n \"version {}\".format(conf[\"version\"], BENTOML_VERSION)\n )\n\n # If major version is different, then there could be incompatible API\n # changes. Raise error in this case.\n if ver.split(\".\")[0] != BENTOML_VERSION.split(\".\")[0]:\n if not BENTOML_VERSION.startswith('0+untagged'):\n raise BentoMLConfigException(msg)\n else:\n logger.warning(msg)\n else: # Otherwise just show a warning.\n logger.warning(msg)\n\n return conf\n\n def get_bento_service_metadata_pb(self):\n from bentoml.yatai.proto.repository_pb2 import BentoServiceMetadata\n\n bento_service_metadata = BentoServiceMetadata()\n bento_service_metadata.name = self.config[\"metadata\"][\"service_name\"]\n bento_service_metadata.version = self.config[\"metadata\"][\"service_version\"]\n bento_service_metadata.created_at.FromDatetime(\n self.config[\"metadata\"][\"created_at\"]\n )\n\n if \"env\" in self.config:\n if \"setup_sh\" in self.config[\"env\"]:\n bento_service_metadata.env.setup_sh = self.config[\"env\"][\"setup_sh\"]\n\n if \"conda_env\" in self.config[\"env\"]:\n bento_service_metadata.env.conda_env = dump_to_yaml_str(\n self.config[\"env\"][\"conda_env\"]\n )\n\n if \"pip_dependencies\" in self.config[\"env\"]:\n bento_service_metadata.env.pip_dependencies = \"\\n\".join(\n self.config[\"env\"][\"pip_dependencies\"]\n )\n if \"python_version\" in self.config[\"env\"]:\n bento_service_metadata.env.python_version = self.config[\"env\"][\n \"python_version\"\n ]\n if \"docker_base_image\" in self.config[\"env\"]:\n bento_service_metadata.env.docker_base_image = self.config[\"env\"][\n \"docker_base_image\"\n ]\n\n if \"apis\" in self.config:\n for api_config in self.config[\"apis\"]:\n if 'handler_type' in api_config:\n # Convert handler type to input type for saved bundle created\n # before version 0.8.0\n input_type = api_config.get('handler_type')\n elif 'input_type' in api_config:\n input_type = api_config.get('input_type')\n else:\n input_type = \"unknown\"\n\n if 'output_type' in api_config:\n output_type = api_config.get('output_type')\n else:\n output_type = \"DefaultOutput\"\n\n api_metadata = BentoServiceMetadata.BentoServiceApi(\n name=api_config[\"name\"],\n docs=api_config[\"docs\"],\n input_type=input_type,\n output_type=output_type,\n )\n if \"handler_config\" in api_config:\n # Supports viewing API input config info for saved bundle created\n # before version 0.8.0\n for k, v in api_config[\"handler_config\"].items():\n if k in {'mb_max_latency', 'mb_max_batch_size'}:\n setattr(api_metadata, k, v)\n else:\n api_metadata.input_config[k] = v\n else:\n if 'mb_max_latency' in api_config:\n api_metadata.mb_max_latency = api_config[\"mb_max_latency\"]\n else:\n api_metadata.mb_max_latency = DEFAULT_MAX_LATENCY\n\n if 'mb_max_batch_size' in api_config:\n api_metadata.mb_max_batch_size = api_config[\"mb_max_batch_size\"]\n else:\n api_metadata.mb_max_batch_size = DEFAULT_MAX_BATCH_SIZE\n\n if \"input_config\" in api_config:\n for k, v in api_config[\"input_config\"].items():\n api_metadata.input_config[k] = v\n\n if \"output_config\" in api_config:\n for k, v in api_config[\"output_config\"].items():\n api_metadata.output_config[k] = v\n bento_service_metadata.apis.extend([api_metadata])\n\n if \"artifacts\" in self.config:\n for artifact_config in self.config[\"artifacts\"]:\n artifact_metadata = BentoServiceMetadata.BentoArtifact()\n if \"name\" in artifact_config:\n artifact_metadata.name = artifact_config[\"name\"]\n if \"artifact_type\" in artifact_config:\n artifact_metadata.artifact_type = artifact_config[\"artifact_type\"]\n bento_service_metadata.artifacts.extend([artifact_metadata])\n\n return bento_service_metadata\n\n def __getitem__(self, item):\n return self.config[item]\n\n def __setitem__(self, key, value):\n self.config[key] = value\n\n def __contains__(self, item):\n return item in self.config\n", "path": "bentoml/saved_bundle/config.py"}]}
| 3,169 | 349 |
gh_patches_debug_32977
|
rasdani/github-patches
|
git_diff
|
Flexget__Flexget-293
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Switch to GitHub Actions for CI
### Motivation for changes:
Try out github actions as a replacement for circleci
Hopefully we don't need to maintain custom docker images this way
#### To Do:
- [ ] Get it working
- [ ] Verify we like it better than circleci
- [ ] Add a release workflow
</issue>
<code>
[start of flexget/plugins/output/pyload.py]
1 # -*- coding: utf-8 -*-
2
3 from __future__ import unicode_literals, division, absolute_import
4 from logging import getLogger
5 from urllib import quote
6
7 from requests.exceptions import RequestException
8
9 from flexget import plugin, validator
10 from flexget.event import event
11 from flexget.utils import json, requests
12
13 log = getLogger('pyload')
14
15
16 class PluginPyLoad(object):
17 """
18 Parse task content or url for hoster links and adds them to pyLoad.
19
20 Example::
21
22 pyload:
23 api: http://localhost:8000/api
24 queue: yes
25 username: my_username
26 password: my_password
27 folder: desired_folder
28 package: desired_package_name (jinja2 supported)
29 hoster:
30 - YoutubeCom
31 parse_url: no
32 multiple_hoster: yes
33 enabled: yes
34
35 Default values for the config elements::
36
37 pyload:
38 api: http://localhost:8000/api
39 queue: no
40 hoster: ALL
41 parse_url: no
42 multiple_hoster: yes
43 enabled: yes
44 """
45
46 __author__ = 'http://pyload.org'
47 __version__ = '0.4'
48
49 DEFAULT_API = 'http://localhost:8000/api'
50 DEFAULT_QUEUE = False
51 DEFAULT_FOLDER = ''
52 DEFAULT_HOSTER = []
53 DEFAULT_PARSE_URL = False
54 DEFAULT_MULTIPLE_HOSTER = True
55 DEFAULT_PREFERRED_HOSTER_ONLY = False
56 DEFAULT_HANDLE_NO_URL_AS_FAILURE = False
57
58 def validator(self):
59 """Return config validator"""
60 root = validator.factory()
61 root.accept('boolean')
62 advanced = root.accept('dict')
63 advanced.accept('text', key='api')
64 advanced.accept('text', key='username')
65 advanced.accept('text', key='password')
66 advanced.accept('text', key='folder')
67 advanced.accept('text', key='package')
68 advanced.accept('boolean', key='queue')
69 advanced.accept('boolean', key='parse_url')
70 advanced.accept('boolean', key='multiple_hoster')
71 advanced.accept('list', key='hoster').accept('text')
72 advanced.accept('boolean', key='preferred_hoster_only')
73 advanced.accept('boolean', key='handle_no_url_as_failure')
74 advanced.accept('boolean', key='enabled')
75 return root
76
77
78 def on_task_output(self, task, config):
79 if not config.get('enabled', True):
80 return
81 if not task.accepted:
82 return
83
84 self.add_entries(task, config)
85
86 def add_entries(self, task, config):
87 """Adds accepted entries"""
88
89 try:
90 session = self.get_session(config)
91 except IOError:
92 raise plugin.PluginError('pyLoad not reachable', log)
93 except plugin.PluginError:
94 raise
95 except Exception as e:
96 raise plugin.PluginError('Unknown error: %s' % str(e), log)
97
98 api = config.get('api', self.DEFAULT_API)
99 hoster = config.get('hoster', self.DEFAULT_HOSTER)
100 folder = config.get('folder', self.DEFAULT_FOLDER)
101
102 for entry in task.accepted:
103 # bunch of urls now going to check
104 content = entry.get('description', '') + ' ' + quote(entry['url'])
105 content = json.dumps(content.encode("utf8"))
106
107 url = json.dumps(entry['url']) if config.get('parse_url', self.DEFAULT_PARSE_URL) else "''"
108
109 log.debug("Parsing url %s" % url)
110
111 result = query_api(api, "parseURLs", {"html": content, "url": url, "session": session})
112
113 # parsed { plugins: [urls] }
114 parsed = result.json()
115
116 urls = []
117
118 # check for preferred hoster
119 for name in hoster:
120 if name in parsed:
121 urls.extend(parsed[name])
122 if not config.get('multiple_hoster', self.DEFAULT_MULTIPLE_HOSTER):
123 break
124
125 # no preferred hoster and not preferred hoster only - add all recognized plugins
126 if not urls and not config.get('preferred_hoster_only', self.DEFAULT_PREFERRED_HOSTER_ONLY):
127 for name, purls in parsed.iteritems():
128 if name != "BasePlugin":
129 urls.extend(purls)
130
131 if task.options.test:
132 log.info('Would add `%s` to pyload' % urls)
133 continue
134
135 # no urls found
136 if not urls:
137 if config.get('handle_no_url_as_failure', self.DEFAULT_HANDLE_NO_URL_AS_FAILURE):
138 entry.fail("No suited urls in entry %s" % entry['title'])
139 else:
140 log.info("No suited urls in entry %s" % entry['title'])
141 continue
142
143 log.debug("Add %d urls to pyLoad" % len(urls))
144
145 try:
146 dest = 1 if config.get('queue', self.DEFAULT_QUEUE) else 0 # Destination.Queue = 1
147
148 # Use the title of the enty, if no naming schema for the package is defined.
149 name = config.get('package', entry['title'])
150
151 # If name has jinja template, render it
152 try:
153 name = entry.render(name)
154 except RenderError as e:
155 name = entry['title']
156 log.error('Error rendering jinja event: %s' % e)
157
158 post = {'name': "'%s'" % name.encode("ascii", "ignore"),
159 'links': str(urls),
160 'dest': dest,
161 'session': session}
162
163 pid = query_api(api, "addPackage", post).text
164 log.debug('added package pid: %s' % pid)
165
166 if folder:
167 # set folder with api
168 data = {'folder': folder}
169 query_api(api, "setPackageData", {'pid': pid, 'data': data, 'session': session})
170
171 except Exception as e:
172 entry.fail(str(e))
173
174 def get_session(self, config):
175 url = config.get('api', self.DEFAULT_API)
176
177 # Login
178 post = {'username': config['username'], 'password': config['password']}
179 result = query_api(url, "login", post)
180 response = result.json()
181 if not response:
182 raise plugin.PluginError('Login failed', log)
183 return response.replace('"', '')
184
185
186 def query_api(url, method, post=None):
187 try:
188 response = requests.request(
189 'post' if post is not None else 'get',
190 url.rstrip("/") + "/" + method.strip("/"),
191 data=post)
192 response.raise_for_status()
193 return response
194 except RequestException as e:
195 if e.response.status_code == 500:
196 raise plugin.PluginError('Internal API Error: <%s> <%s> <%s>' % (method, url, post), log)
197 raise
198
199
200 @event('plugin.register')
201 def register_plugin():
202 plugin.register(PluginPyLoad, 'pyload', api_ver=2)
203
[end of flexget/plugins/output/pyload.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/flexget/plugins/output/pyload.py b/flexget/plugins/output/pyload.py
--- a/flexget/plugins/output/pyload.py
+++ b/flexget/plugins/output/pyload.py
@@ -97,7 +97,6 @@
api = config.get('api', self.DEFAULT_API)
hoster = config.get('hoster', self.DEFAULT_HOSTER)
- folder = config.get('folder', self.DEFAULT_FOLDER)
for entry in task.accepted:
# bunch of urls now going to check
@@ -145,7 +144,7 @@
try:
dest = 1 if config.get('queue', self.DEFAULT_QUEUE) else 0 # Destination.Queue = 1
- # Use the title of the enty, if no naming schema for the package is defined.
+ # Use the title of the entry, if no naming schema for the package is defined.
name = config.get('package', entry['title'])
# If name has jinja template, render it
@@ -163,9 +162,18 @@
pid = query_api(api, "addPackage", post).text
log.debug('added package pid: %s' % pid)
+ # Set Folder
+ folder = config.get('folder', self.DEFAULT_FOLDER)
+ folder = entry.get('path', folder)
if folder:
+ # If folder has jinja template, render it
+ try:
+ folder = entry.render(folder)
+ except RenderError as e:
+ folder = self.DEFAULT_FOLDER
+ log.error('Error rendering jinja event: %s' % e)
# set folder with api
- data = {'folder': folder}
+ data = json.dumps({'folder': folder})
query_api(api, "setPackageData", {'pid': pid, 'data': data, 'session': session})
except Exception as e:
|
{"golden_diff": "diff --git a/flexget/plugins/output/pyload.py b/flexget/plugins/output/pyload.py\n--- a/flexget/plugins/output/pyload.py\n+++ b/flexget/plugins/output/pyload.py\n@@ -97,7 +97,6 @@\n \n api = config.get('api', self.DEFAULT_API)\n hoster = config.get('hoster', self.DEFAULT_HOSTER)\n- folder = config.get('folder', self.DEFAULT_FOLDER)\n \n for entry in task.accepted:\n # bunch of urls now going to check\n@@ -145,7 +144,7 @@\n try:\n dest = 1 if config.get('queue', self.DEFAULT_QUEUE) else 0 # Destination.Queue = 1\n \n- # Use the title of the enty, if no naming schema for the package is defined.\n+ # Use the title of the entry, if no naming schema for the package is defined.\n name = config.get('package', entry['title'])\n \n # If name has jinja template, render it\n@@ -163,9 +162,18 @@\n pid = query_api(api, \"addPackage\", post).text\n log.debug('added package pid: %s' % pid)\n \n+ # Set Folder\n+ folder = config.get('folder', self.DEFAULT_FOLDER)\n+ folder = entry.get('path', folder)\n if folder:\n+ # If folder has jinja template, render it\n+ try:\n+ folder = entry.render(folder)\n+ except RenderError as e:\n+ folder = self.DEFAULT_FOLDER\n+ log.error('Error rendering jinja event: %s' % e)\n # set folder with api\n- data = {'folder': folder}\n+ data = json.dumps({'folder': folder})\n query_api(api, \"setPackageData\", {'pid': pid, 'data': data, 'session': session})\n \n except Exception as e:\n", "issue": "Switch to GitHub Actions for CI\n### Motivation for changes:\r\nTry out github actions as a replacement for circleci\r\nHopefully we don't need to maintain custom docker images this way\r\n\r\n#### To Do:\r\n\r\n- [ ] Get it working\r\n- [ ] Verify we like it better than circleci\r\n- [ ] Add a release workflow\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals, division, absolute_import\nfrom logging import getLogger\nfrom urllib import quote\n\nfrom requests.exceptions import RequestException\n\nfrom flexget import plugin, validator\nfrom flexget.event import event\nfrom flexget.utils import json, requests\n\nlog = getLogger('pyload')\n\n\nclass PluginPyLoad(object):\n \"\"\"\n Parse task content or url for hoster links and adds them to pyLoad.\n\n Example::\n\n pyload:\n api: http://localhost:8000/api\n queue: yes\n username: my_username\n password: my_password\n folder: desired_folder\n package: desired_package_name (jinja2 supported)\n hoster:\n - YoutubeCom\n parse_url: no\n multiple_hoster: yes\n enabled: yes\n\n Default values for the config elements::\n\n pyload:\n api: http://localhost:8000/api\n queue: no\n hoster: ALL\n parse_url: no\n multiple_hoster: yes\n enabled: yes\n \"\"\"\n\n __author__ = 'http://pyload.org'\n __version__ = '0.4'\n\n DEFAULT_API = 'http://localhost:8000/api'\n DEFAULT_QUEUE = False\n DEFAULT_FOLDER = ''\n DEFAULT_HOSTER = []\n DEFAULT_PARSE_URL = False\n DEFAULT_MULTIPLE_HOSTER = True\n DEFAULT_PREFERRED_HOSTER_ONLY = False\n DEFAULT_HANDLE_NO_URL_AS_FAILURE = False\n\n def validator(self):\n \"\"\"Return config validator\"\"\"\n root = validator.factory()\n root.accept('boolean')\n advanced = root.accept('dict')\n advanced.accept('text', key='api')\n advanced.accept('text', key='username')\n advanced.accept('text', key='password')\n advanced.accept('text', key='folder')\n advanced.accept('text', key='package')\n advanced.accept('boolean', key='queue')\n advanced.accept('boolean', key='parse_url')\n advanced.accept('boolean', key='multiple_hoster')\n advanced.accept('list', key='hoster').accept('text')\n advanced.accept('boolean', key='preferred_hoster_only')\n advanced.accept('boolean', key='handle_no_url_as_failure')\n advanced.accept('boolean', key='enabled')\n return root\n\n\n def on_task_output(self, task, config):\n if not config.get('enabled', True):\n return\n if not task.accepted:\n return\n\n self.add_entries(task, config)\n\n def add_entries(self, task, config):\n \"\"\"Adds accepted entries\"\"\"\n\n try:\n session = self.get_session(config)\n except IOError:\n raise plugin.PluginError('pyLoad not reachable', log)\n except plugin.PluginError:\n raise\n except Exception as e:\n raise plugin.PluginError('Unknown error: %s' % str(e), log)\n\n api = config.get('api', self.DEFAULT_API)\n hoster = config.get('hoster', self.DEFAULT_HOSTER)\n folder = config.get('folder', self.DEFAULT_FOLDER)\n\n for entry in task.accepted:\n # bunch of urls now going to check\n content = entry.get('description', '') + ' ' + quote(entry['url'])\n content = json.dumps(content.encode(\"utf8\"))\n\n url = json.dumps(entry['url']) if config.get('parse_url', self.DEFAULT_PARSE_URL) else \"''\"\n\n log.debug(\"Parsing url %s\" % url)\n\n result = query_api(api, \"parseURLs\", {\"html\": content, \"url\": url, \"session\": session})\n\n # parsed { plugins: [urls] }\n parsed = result.json()\n\n urls = []\n\n # check for preferred hoster\n for name in hoster:\n if name in parsed:\n urls.extend(parsed[name])\n if not config.get('multiple_hoster', self.DEFAULT_MULTIPLE_HOSTER):\n break\n\n # no preferred hoster and not preferred hoster only - add all recognized plugins\n if not urls and not config.get('preferred_hoster_only', self.DEFAULT_PREFERRED_HOSTER_ONLY):\n for name, purls in parsed.iteritems():\n if name != \"BasePlugin\":\n urls.extend(purls)\n\n if task.options.test:\n log.info('Would add `%s` to pyload' % urls)\n continue\n\n # no urls found\n if not urls:\n if config.get('handle_no_url_as_failure', self.DEFAULT_HANDLE_NO_URL_AS_FAILURE):\n entry.fail(\"No suited urls in entry %s\" % entry['title'])\n else:\n log.info(\"No suited urls in entry %s\" % entry['title'])\n continue\n\n log.debug(\"Add %d urls to pyLoad\" % len(urls))\n\n try:\n dest = 1 if config.get('queue', self.DEFAULT_QUEUE) else 0 # Destination.Queue = 1\n\n # Use the title of the enty, if no naming schema for the package is defined.\n name = config.get('package', entry['title'])\n\n # If name has jinja template, render it\n try:\n name = entry.render(name)\n except RenderError as e:\n name = entry['title']\n log.error('Error rendering jinja event: %s' % e)\n\n post = {'name': \"'%s'\" % name.encode(\"ascii\", \"ignore\"),\n 'links': str(urls),\n 'dest': dest,\n 'session': session}\n\n pid = query_api(api, \"addPackage\", post).text\n log.debug('added package pid: %s' % pid)\n\n if folder:\n # set folder with api\n data = {'folder': folder}\n query_api(api, \"setPackageData\", {'pid': pid, 'data': data, 'session': session})\n\n except Exception as e:\n entry.fail(str(e))\n\n def get_session(self, config):\n url = config.get('api', self.DEFAULT_API)\n\n # Login\n post = {'username': config['username'], 'password': config['password']}\n result = query_api(url, \"login\", post)\n response = result.json()\n if not response:\n raise plugin.PluginError('Login failed', log)\n return response.replace('\"', '')\n\n\ndef query_api(url, method, post=None):\n try:\n response = requests.request(\n 'post' if post is not None else 'get',\n url.rstrip(\"/\") + \"/\" + method.strip(\"/\"),\n data=post)\n response.raise_for_status()\n return response\n except RequestException as e:\n if e.response.status_code == 500:\n raise plugin.PluginError('Internal API Error: <%s> <%s> <%s>' % (method, url, post), log)\n raise\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(PluginPyLoad, 'pyload', api_ver=2)\n", "path": "flexget/plugins/output/pyload.py"}]}
| 2,608 | 421 |
gh_patches_debug_22748
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-2246
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Membership application uses email object instead of email-address
Same as #2219
https://sentry.io/organizations/dotkom/issues/890887549/?project=204971&referrer=github_plugin
```
TypeError: object of type 'Email' has no len()
(16 additional frame(s) were not displayed)
...
File "guardian/decorators.py", line 126, in _wrapped_view
return view_func(request, *args, **kwargs)
File "apps/approval/dashboard/views.py", line 105, in approve_application
app.save()
File "apps/approval/signals.py", line 50, in notify_membership_applicant_handler
send_approval_status_update(instance)
File "apps/approval/tasks.py", line 45, in send_approval_status_update
[approval.applicant.get_email()],
TypeError: object of type 'Email' has no len()
```
Membership application uses email object instead of email-address
Same as #2219
https://sentry.io/organizations/dotkom/issues/890887549/?project=204971&referrer=github_plugin
```
TypeError: object of type 'Email' has no len()
(16 additional frame(s) were not displayed)
...
File "guardian/decorators.py", line 126, in _wrapped_view
return view_func(request, *args, **kwargs)
File "apps/approval/dashboard/views.py", line 105, in approve_application
app.save()
File "apps/approval/signals.py", line 50, in notify_membership_applicant_handler
send_approval_status_update(instance)
File "apps/approval/tasks.py", line 45, in send_approval_status_update
[approval.applicant.get_email()],
TypeError: object of type 'Email' has no len()
```
</issue>
<code>
[start of apps/approval/signals.py]
1 from django.conf import settings
2 from django.db.models.signals import post_save
3 from django.dispatch import receiver
4
5 from apps.approval.models import CommitteeApplication, MembershipApproval
6
7 from .tasks import (send_approval_notification, send_approval_status_update,
8 send_committee_application_notification)
9
10
11 @receiver(post_save, sender=MembershipApproval)
12 def new_membership_approval_handler(sender, instance, created, **kwargs):
13 """
14
15 :param sender: The sending model.
16 :type sender: MembershipApproval
17 :param instance: The MembershipApproval instance
18 :type instance: MembershipApproval
19 :param created: True or False, whether this instance is new or not.
20 :type created: bool
21 :param kwargs: Other parameters.
22 :type kwargs: dict
23 :return: Nothing
24 :rtype: None
25 """
26
27 if created and not instance.processed:
28 if settings.APPROVAL_SETTINGS.get('SEND_APPROVER_NOTIFICATION_EMAIL', False):
29 send_approval_notification(instance)
30
31
32 @receiver(post_save, sender=MembershipApproval)
33 def notify_membership_applicant_handler(sender, instance, created, **kwargs):
34 """
35
36 :param sender: The sending model.
37 :type sender: Approval
38 :param instance: The Approval instance
39 :type instance: Approval
40 :param approved: True or False, whether this instance is new or not.
41 :type created: bool
42 :param kwargs: Other parameters.
43 :type kwargs: dict
44 :return: Nothing
45 :rtype: None
46 """
47
48 if not created and instance.processed and instance.applicant.get_email():
49 if settings.APPROVAL_SETTINGS.get('SEND_APPLICANT_NOTIFICATION_EMAIL', False):
50 send_approval_status_update(instance)
51
52
53 @receiver(post_save, sender=CommitteeApplication)
54 def notify_new_committee_application(sender, instance, created, **kwargs):
55 if created:
56 send_committee_application_notification(instance, [settings.EMAIL_OPPTAK], link_to_admin=True)
57 if settings.APPROVAL_SETTINGS.get('SEND_COMMITTEEAPPLICATION_APPLICANT_EMAIL', False):
58 send_committee_application_notification(instance, [instance.get_email()], link_to_admin=False)
59
[end of apps/approval/signals.py]
[start of apps/approval/tasks.py]
1 import logging
2
3 from django.conf import settings
4 from django.core.exceptions import ImproperlyConfigured
5 from django.core.mail import EmailMessage, send_mail
6 from django.template.loader import render_to_string
7 from django.urls import reverse
8
9
10 def send_approval_notification(approval):
11 logger = logging.getLogger(__name__)
12 d = {
13 'approval': approval,
14 'approval_url': settings.BASE_URL + reverse('approvals')
15 }
16
17 to_emails = [settings.EMAIL_HS]
18 content = render_to_string('approval/email/approval_notification.txt', d)
19
20 try:
21 EmailMessage("[Medlemskapssøknad] %s" % approval.applicant.get_full_name(),
22 content, settings.DEFAULT_FROM_EMAIL, to_emails).send()
23 except ImproperlyConfigured:
24 logger.warn('Failed to send approval approver notification email for approval#{pk}.'.format(
25 {'pk': approval.pk}))
26
27
28 def send_approval_status_update(approval):
29 logger = logging.getLogger(__name__)
30
31 accepted = approval.approved
32 message = "Ditt medlemskap i Online er "
33 if accepted:
34 message += "godkjent."
35 else:
36 message += "ikke godkjent."
37 if len(approval.message) == 0:
38 message += " Ta kontakt med Online for begrunnelse."
39 else:
40 message += approval.message
41 try:
42 EmailMessage("Soknad om medlemskap i Online er vurdert",
43 message,
44 settings.DEFAULT_FROM_EMAIL,
45 [approval.applicant.get_email()],
46 ).send()
47 except ImproperlyConfigured:
48 logger.warn('Failed to notify applicant about updated status on membership for approval#{pk}.'.format(
49 {'pk': approval.pk}))
50
51
52 def send_committee_application_notification(application, to_emails, link_to_admin=False):
53 context = {
54 'link_to_admin': link_to_admin,
55 'absolute_url': settings.BASE_URL + application.get_absolute_url(),
56 'applicant_name': application.get_name(),
57 }
58 message = render_to_string('approval/email/committeeapplication_notification.txt', context)
59 send_mail('[opptak] Bekreftelse på komitesøknad', message, settings.DEFAULT_FROM_EMAIL, to_emails)
60
[end of apps/approval/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/apps/approval/signals.py b/apps/approval/signals.py
--- a/apps/approval/signals.py
+++ b/apps/approval/signals.py
@@ -55,4 +55,4 @@
if created:
send_committee_application_notification(instance, [settings.EMAIL_OPPTAK], link_to_admin=True)
if settings.APPROVAL_SETTINGS.get('SEND_COMMITTEEAPPLICATION_APPLICANT_EMAIL', False):
- send_committee_application_notification(instance, [instance.get_email()], link_to_admin=False)
+ send_committee_application_notification(instance, [instance.get_email().email], link_to_admin=False)
diff --git a/apps/approval/tasks.py b/apps/approval/tasks.py
--- a/apps/approval/tasks.py
+++ b/apps/approval/tasks.py
@@ -42,7 +42,7 @@
EmailMessage("Soknad om medlemskap i Online er vurdert",
message,
settings.DEFAULT_FROM_EMAIL,
- [approval.applicant.get_email()],
+ [approval.applicant.get_email().email],
).send()
except ImproperlyConfigured:
logger.warn('Failed to notify applicant about updated status on membership for approval#{pk}.'.format(
|
{"golden_diff": "diff --git a/apps/approval/signals.py b/apps/approval/signals.py\n--- a/apps/approval/signals.py\n+++ b/apps/approval/signals.py\n@@ -55,4 +55,4 @@\n if created:\n send_committee_application_notification(instance, [settings.EMAIL_OPPTAK], link_to_admin=True)\n if settings.APPROVAL_SETTINGS.get('SEND_COMMITTEEAPPLICATION_APPLICANT_EMAIL', False):\n- send_committee_application_notification(instance, [instance.get_email()], link_to_admin=False)\n+ send_committee_application_notification(instance, [instance.get_email().email], link_to_admin=False)\ndiff --git a/apps/approval/tasks.py b/apps/approval/tasks.py\n--- a/apps/approval/tasks.py\n+++ b/apps/approval/tasks.py\n@@ -42,7 +42,7 @@\n EmailMessage(\"Soknad om medlemskap i Online er vurdert\",\n message,\n settings.DEFAULT_FROM_EMAIL,\n- [approval.applicant.get_email()],\n+ [approval.applicant.get_email().email],\n ).send()\n except ImproperlyConfigured:\n logger.warn('Failed to notify applicant about updated status on membership for approval#{pk}.'.format(\n", "issue": "Membership application uses email object instead of email-address\nSame as #2219\n\nhttps://sentry.io/organizations/dotkom/issues/890887549/?project=204971&referrer=github_plugin\n\n```\nTypeError: object of type 'Email' has no len()\n(16 additional frame(s) were not displayed)\n...\n File \"guardian/decorators.py\", line 126, in _wrapped_view\n return view_func(request, *args, **kwargs)\n File \"apps/approval/dashboard/views.py\", line 105, in approve_application\n app.save()\n File \"apps/approval/signals.py\", line 50, in notify_membership_applicant_handler\n send_approval_status_update(instance)\n File \"apps/approval/tasks.py\", line 45, in send_approval_status_update\n [approval.applicant.get_email()],\n\nTypeError: object of type 'Email' has no len()\n```\nMembership application uses email object instead of email-address\nSame as #2219\n\nhttps://sentry.io/organizations/dotkom/issues/890887549/?project=204971&referrer=github_plugin\n\n```\nTypeError: object of type 'Email' has no len()\n(16 additional frame(s) were not displayed)\n...\n File \"guardian/decorators.py\", line 126, in _wrapped_view\n return view_func(request, *args, **kwargs)\n File \"apps/approval/dashboard/views.py\", line 105, in approve_application\n app.save()\n File \"apps/approval/signals.py\", line 50, in notify_membership_applicant_handler\n send_approval_status_update(instance)\n File \"apps/approval/tasks.py\", line 45, in send_approval_status_update\n [approval.applicant.get_email()],\n\nTypeError: object of type 'Email' has no len()\n```\n", "before_files": [{"content": "from django.conf import settings\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom apps.approval.models import CommitteeApplication, MembershipApproval\n\nfrom .tasks import (send_approval_notification, send_approval_status_update,\n send_committee_application_notification)\n\n\n@receiver(post_save, sender=MembershipApproval)\ndef new_membership_approval_handler(sender, instance, created, **kwargs):\n \"\"\"\n\n :param sender: The sending model.\n :type sender: MembershipApproval\n :param instance: The MembershipApproval instance\n :type instance: MembershipApproval\n :param created: True or False, whether this instance is new or not.\n :type created: bool\n :param kwargs: Other parameters.\n :type kwargs: dict\n :return: Nothing\n :rtype: None\n \"\"\"\n\n if created and not instance.processed:\n if settings.APPROVAL_SETTINGS.get('SEND_APPROVER_NOTIFICATION_EMAIL', False):\n send_approval_notification(instance)\n\n\n@receiver(post_save, sender=MembershipApproval)\ndef notify_membership_applicant_handler(sender, instance, created, **kwargs):\n \"\"\"\n\n :param sender: The sending model.\n :type sender: Approval\n :param instance: The Approval instance\n :type instance: Approval\n :param approved: True or False, whether this instance is new or not.\n :type created: bool\n :param kwargs: Other parameters.\n :type kwargs: dict\n :return: Nothing\n :rtype: None\n \"\"\"\n\n if not created and instance.processed and instance.applicant.get_email():\n if settings.APPROVAL_SETTINGS.get('SEND_APPLICANT_NOTIFICATION_EMAIL', False):\n send_approval_status_update(instance)\n\n\n@receiver(post_save, sender=CommitteeApplication)\ndef notify_new_committee_application(sender, instance, created, **kwargs):\n if created:\n send_committee_application_notification(instance, [settings.EMAIL_OPPTAK], link_to_admin=True)\n if settings.APPROVAL_SETTINGS.get('SEND_COMMITTEEAPPLICATION_APPLICANT_EMAIL', False):\n send_committee_application_notification(instance, [instance.get_email()], link_to_admin=False)\n", "path": "apps/approval/signals.py"}, {"content": "import logging\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.mail import EmailMessage, send_mail\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse\n\n\ndef send_approval_notification(approval):\n logger = logging.getLogger(__name__)\n d = {\n 'approval': approval,\n 'approval_url': settings.BASE_URL + reverse('approvals')\n }\n\n to_emails = [settings.EMAIL_HS]\n content = render_to_string('approval/email/approval_notification.txt', d)\n\n try:\n EmailMessage(\"[Medlemskapss\u00f8knad] %s\" % approval.applicant.get_full_name(),\n content, settings.DEFAULT_FROM_EMAIL, to_emails).send()\n except ImproperlyConfigured:\n logger.warn('Failed to send approval approver notification email for approval#{pk}.'.format(\n {'pk': approval.pk}))\n\n\ndef send_approval_status_update(approval):\n logger = logging.getLogger(__name__)\n\n accepted = approval.approved\n message = \"Ditt medlemskap i Online er \"\n if accepted:\n message += \"godkjent.\"\n else:\n message += \"ikke godkjent.\"\n if len(approval.message) == 0:\n message += \" Ta kontakt med Online for begrunnelse.\"\n else:\n message += approval.message\n try:\n EmailMessage(\"Soknad om medlemskap i Online er vurdert\",\n message,\n settings.DEFAULT_FROM_EMAIL,\n [approval.applicant.get_email()],\n ).send()\n except ImproperlyConfigured:\n logger.warn('Failed to notify applicant about updated status on membership for approval#{pk}.'.format(\n {'pk': approval.pk}))\n\n\ndef send_committee_application_notification(application, to_emails, link_to_admin=False):\n context = {\n 'link_to_admin': link_to_admin,\n 'absolute_url': settings.BASE_URL + application.get_absolute_url(),\n 'applicant_name': application.get_name(),\n }\n message = render_to_string('approval/email/committeeapplication_notification.txt', context)\n send_mail('[opptak] Bekreftelse p\u00e5 komites\u00f8knad', message, settings.DEFAULT_FROM_EMAIL, to_emails)\n", "path": "apps/approval/tasks.py"}]}
| 2,129 | 264 |
gh_patches_debug_32292
|
rasdani/github-patches
|
git_diff
|
CiviWiki__OpenCiviWiki-1088
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Migrate threads urls to path
in `threads` app, we need to change `url()` function with `path()` function as discussed in #1066
https://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41
Conversion to [path](https://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41) is simple.
For example,
```python
url(r"^thread_data/(?P<thread_id>\w+)/$", get_thread, name="get thread"),
```
should become
```python
path("thread_data/(<int:thread_id>/", get_thread, name="get thread"),
```
We need to be changed all usages of `url()` function in `threads` app.
Migrate threads urls to path
in `threads` app, we need to change `url()` function with `path()` function as discussed in #1066
https://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41
Conversion to [path](https://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41) is simple.
For example,
```python
url(r"^thread_data/(?P<thread_id>\w+)/$", get_thread, name="get thread"),
```
should become
```python
path("thread_data/(<int:thread_id>/", get_thread, name="get thread"),
```
We need to be changed all usages of `url()` function in `threads` app.
</issue>
<code>
[start of project/threads/urls.py]
1 from django.conf.urls import include, url
2 from rest_framework.routers import DefaultRouter
3
4 from .api import (create_civi, delete_civi, edit_civi, edit_thread, get_civi,
5 get_thread, rate_civi, upload_civi_image, new_thread, get_civis,
6 get_responses, upload_thread_image)
7
8 from .views import (
9 ThreadViewSet, CategoryViewSet,
10 CiviViewSet
11 )
12 from accounts.api import ProfileViewSet
13
14 router = DefaultRouter(trailing_slash=False)
15 router.register(r"threads", ThreadViewSet)
16 router.register(r"categories", CategoryViewSet)
17 router.register(r"civis", CiviViewSet)
18 router.register(r"accounts", ProfileViewSet)
19
20 urlpatterns = [
21 url(r"^v1/", include(router.urls)),
22 ]
23
24 urlpatterns += [
25 url(r"^thread_data/(?P<thread_id>\w+)/$", get_thread, name="get thread"),
26 url(r"^civi_data/(?P<civi_id>\w+)$", get_civi, name="get civi"),
27 url(r"^threads/(?P<thread_id>\w+)/civis$", get_civis, name="get civis"),
28 url(
29 r"^response_data/(?P<thread_id>\w+)/(?P<civi_id>\w+)/$",
30 get_responses,
31 name="get responses",
32 ),
33 url(r"^new_thread/$", new_thread, name="new thread"),
34 url(r"^edit_thread/$", edit_thread, name="edit thread"),
35 url(r"^new_civi/$", create_civi, name="new civi"),
36 url(r"^rate_civi/$", rate_civi, name="rate civi"),
37 url(r"^edit_civi/$", edit_civi, name="edit civi"),
38 url(r"^delete_civi/$", delete_civi, name="delete civi"),
39 url(r"^upload_images/$", upload_civi_image, name="upload images"),
40 url(r"^upload_image/$", upload_thread_image, name="upload image"),
41 ]
42
[end of project/threads/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/project/threads/urls.py b/project/threads/urls.py
--- a/project/threads/urls.py
+++ b/project/threads/urls.py
@@ -1,4 +1,5 @@
-from django.conf.urls import include, url
+from django.conf.urls import include
+from django.urls import path
from rest_framework.routers import DefaultRouter
from .api import (create_civi, delete_civi, edit_civi, edit_thread, get_civi,
@@ -18,24 +19,24 @@
router.register(r"accounts", ProfileViewSet)
urlpatterns = [
- url(r"^v1/", include(router.urls)),
+ path("v1/", include(router.urls)),
]
urlpatterns += [
- url(r"^thread_data/(?P<thread_id>\w+)/$", get_thread, name="get thread"),
- url(r"^civi_data/(?P<civi_id>\w+)$", get_civi, name="get civi"),
- url(r"^threads/(?P<thread_id>\w+)/civis$", get_civis, name="get civis"),
- url(
- r"^response_data/(?P<thread_id>\w+)/(?P<civi_id>\w+)/$",
+ path("thread_data/<int:thread_id>/", get_thread, name="get thread"),
+ path("civi_data/<int:civi_id>/", get_civi, name="get civi"),
+ path("threads/<int:thread_id>/civis", get_civis, name="get civis"),
+ path(
+ "response_data/<int:thread_id>/<int:civi_id>/",
get_responses,
name="get responses",
),
- url(r"^new_thread/$", new_thread, name="new thread"),
- url(r"^edit_thread/$", edit_thread, name="edit thread"),
- url(r"^new_civi/$", create_civi, name="new civi"),
- url(r"^rate_civi/$", rate_civi, name="rate civi"),
- url(r"^edit_civi/$", edit_civi, name="edit civi"),
- url(r"^delete_civi/$", delete_civi, name="delete civi"),
- url(r"^upload_images/$", upload_civi_image, name="upload images"),
- url(r"^upload_image/$", upload_thread_image, name="upload image"),
+ path("new_thread/", new_thread, name="new thread"),
+ path("edit_thread/", edit_thread, name="edit thread"),
+ path("new_civi/", create_civi, name="new civi"),
+ path("rate_civi/", rate_civi, name="rate civi"),
+ path("edit_civi/", edit_civi, name="edit civi"),
+ path("delete_civi/", delete_civi, name="delete civi"),
+ path("upload_images/", upload_civi_image, name="upload images"),
+ path("upload_image/", upload_thread_image, name="upload image"),
]
|
{"golden_diff": "diff --git a/project/threads/urls.py b/project/threads/urls.py\n--- a/project/threads/urls.py\n+++ b/project/threads/urls.py\n@@ -1,4 +1,5 @@\n-from django.conf.urls import include, url\r\n+from django.conf.urls import include\r\n+from django.urls import path\r\n from rest_framework.routers import DefaultRouter\r\n \r\n from .api import (create_civi, delete_civi, edit_civi, edit_thread, get_civi,\r\n@@ -18,24 +19,24 @@\n router.register(r\"accounts\", ProfileViewSet)\r\n \r\n urlpatterns = [\r\n- url(r\"^v1/\", include(router.urls)),\r\n+ path(\"v1/\", include(router.urls)),\r\n ]\r\n \r\n urlpatterns += [\r\n- url(r\"^thread_data/(?P<thread_id>\\w+)/$\", get_thread, name=\"get thread\"),\r\n- url(r\"^civi_data/(?P<civi_id>\\w+)$\", get_civi, name=\"get civi\"),\r\n- url(r\"^threads/(?P<thread_id>\\w+)/civis$\", get_civis, name=\"get civis\"),\r\n- url(\r\n- r\"^response_data/(?P<thread_id>\\w+)/(?P<civi_id>\\w+)/$\",\r\n+ path(\"thread_data/<int:thread_id>/\", get_thread, name=\"get thread\"),\r\n+ path(\"civi_data/<int:civi_id>/\", get_civi, name=\"get civi\"),\r\n+ path(\"threads/<int:thread_id>/civis\", get_civis, name=\"get civis\"),\r\n+ path(\r\n+ \"response_data/<int:thread_id>/<int:civi_id>/\",\r\n get_responses,\r\n name=\"get responses\",\r\n ),\r\n- url(r\"^new_thread/$\", new_thread, name=\"new thread\"),\r\n- url(r\"^edit_thread/$\", edit_thread, name=\"edit thread\"),\r\n- url(r\"^new_civi/$\", create_civi, name=\"new civi\"),\r\n- url(r\"^rate_civi/$\", rate_civi, name=\"rate civi\"),\r\n- url(r\"^edit_civi/$\", edit_civi, name=\"edit civi\"),\r\n- url(r\"^delete_civi/$\", delete_civi, name=\"delete civi\"),\r\n- url(r\"^upload_images/$\", upload_civi_image, name=\"upload images\"),\r\n- url(r\"^upload_image/$\", upload_thread_image, name=\"upload image\"),\r\n+ path(\"new_thread/\", new_thread, name=\"new thread\"),\r\n+ path(\"edit_thread/\", edit_thread, name=\"edit thread\"),\r\n+ path(\"new_civi/\", create_civi, name=\"new civi\"),\r\n+ path(\"rate_civi/\", rate_civi, name=\"rate civi\"),\r\n+ path(\"edit_civi/\", edit_civi, name=\"edit civi\"),\r\n+ path(\"delete_civi/\", delete_civi, name=\"delete civi\"),\r\n+ path(\"upload_images/\", upload_civi_image, name=\"upload images\"),\r\n+ path(\"upload_image/\", upload_thread_image, name=\"upload image\"),\r\n ]\n", "issue": "Migrate threads urls to path\nin `threads` app, we need to change `url()` function with `path()` function as discussed in #1066\r\n\r\nhttps://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41\r\n\r\nConversion to [path](https://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41) is simple.\r\n\r\nFor example,\r\n\r\n```python\r\nurl(r\"^thread_data/(?P<thread_id>\\w+)/$\", get_thread, name=\"get thread\"),\r\n```\r\n\r\nshould become\r\n\r\n```python\r\npath(\"thread_data/(<int:thread_id>/\", get_thread, name=\"get thread\"),\r\n```\r\n\r\nWe need to be changed all usages of `url()` function in `threads` app.\nMigrate threads urls to path\nin `threads` app, we need to change `url()` function with `path()` function as discussed in #1066\r\n\r\nhttps://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41\r\n\r\nConversion to [path](https://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41) is simple.\r\n\r\nFor example,\r\n\r\n```python\r\nurl(r\"^thread_data/(?P<thread_id>\\w+)/$\", get_thread, name=\"get thread\"),\r\n```\r\n\r\nshould become\r\n\r\n```python\r\npath(\"thread_data/(<int:thread_id>/\", get_thread, name=\"get thread\"),\r\n```\r\n\r\nWe need to be changed all usages of `url()` function in `threads` app.\n", "before_files": [{"content": "from django.conf.urls import include, url\r\nfrom rest_framework.routers import DefaultRouter\r\n\r\nfrom .api import (create_civi, delete_civi, edit_civi, edit_thread, get_civi,\r\n get_thread, rate_civi, upload_civi_image, new_thread, get_civis,\r\n get_responses, upload_thread_image)\r\n\r\nfrom .views import (\r\n ThreadViewSet, CategoryViewSet,\r\n CiviViewSet\r\n)\r\nfrom accounts.api import ProfileViewSet\r\n\r\nrouter = DefaultRouter(trailing_slash=False)\r\nrouter.register(r\"threads\", ThreadViewSet)\r\nrouter.register(r\"categories\", CategoryViewSet)\r\nrouter.register(r\"civis\", CiviViewSet)\r\nrouter.register(r\"accounts\", ProfileViewSet)\r\n\r\nurlpatterns = [\r\n url(r\"^v1/\", include(router.urls)),\r\n]\r\n\r\nurlpatterns += [\r\n url(r\"^thread_data/(?P<thread_id>\\w+)/$\", get_thread, name=\"get thread\"),\r\n url(r\"^civi_data/(?P<civi_id>\\w+)$\", get_civi, name=\"get civi\"),\r\n url(r\"^threads/(?P<thread_id>\\w+)/civis$\", get_civis, name=\"get civis\"),\r\n url(\r\n r\"^response_data/(?P<thread_id>\\w+)/(?P<civi_id>\\w+)/$\",\r\n get_responses,\r\n name=\"get responses\",\r\n ),\r\n url(r\"^new_thread/$\", new_thread, name=\"new thread\"),\r\n url(r\"^edit_thread/$\", edit_thread, name=\"edit thread\"),\r\n url(r\"^new_civi/$\", create_civi, name=\"new civi\"),\r\n url(r\"^rate_civi/$\", rate_civi, name=\"rate civi\"),\r\n url(r\"^edit_civi/$\", edit_civi, name=\"edit civi\"),\r\n url(r\"^delete_civi/$\", delete_civi, name=\"delete civi\"),\r\n url(r\"^upload_images/$\", upload_civi_image, name=\"upload images\"),\r\n url(r\"^upload_image/$\", upload_thread_image, name=\"upload image\"),\r\n]\r\n", "path": "project/threads/urls.py"}]}
| 1,522 | 660 |
gh_patches_debug_18327
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-3528
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a `region` arg to `OpenStackLatentWorker`
Would be nice to allow passing a region_name to novaclient in `OpenStackLatentWorker`.
</issue>
<code>
[start of master/buildbot/worker/openstack.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Portions Copyright Buildbot Team Members
15 # Portions Copyright 2013 Cray Inc.
16
17 from __future__ import absolute_import
18 from __future__ import division
19 from __future__ import print_function
20
21 import math
22 import time
23
24 from twisted.internet import defer
25 from twisted.internet import threads
26 from twisted.python import log
27
28 from buildbot import config
29 from buildbot.interfaces import LatentWorkerFailedToSubstantiate
30 from buildbot.worker import AbstractLatentWorker
31
32 try:
33 from keystoneauth1 import loading
34 from keystoneauth1 import session
35 from novaclient import client
36 from novaclient.exceptions import NotFound
37 _hush_pyflakes = [client]
38 except ImportError:
39 NotFound = Exception
40 client = None
41 loading = None
42 session = None
43
44
45 ACTIVE = 'ACTIVE'
46 BUILD = 'BUILD'
47 DELETED = 'DELETED'
48 UNKNOWN = 'UNKNOWN'
49
50
51 class OpenStackLatentWorker(AbstractLatentWorker):
52
53 instance = None
54 _poll_resolution = 5 # hook point for tests
55
56 def __init__(self, name, password,
57 flavor,
58 os_username,
59 os_password,
60 os_tenant_name,
61 os_auth_url,
62 block_devices=None,
63 image=None,
64 meta=None,
65 # Have a nova_args parameter to allow passing things directly
66 # to novaclient.
67 nova_args=None,
68 client_version='2',
69 **kwargs):
70
71 if not client:
72 config.error("The python module 'novaclient' is needed "
73 "to use a OpenStackLatentWorker. "
74 "Please install 'python-novaclient' package.")
75 if not loading or not session:
76 config.error("The python module 'keystoneauth1' is needed "
77 "to use a OpenStackLatentWorker. "
78 "Please install the 'keystoneauth1' package.")
79
80 if not block_devices and not image:
81 raise ValueError('One of block_devices or image must be given')
82
83 AbstractLatentWorker.__init__(self, name, password, **kwargs)
84
85 self.flavor = flavor
86 self.client_version = client_version
87 if client:
88 self.novaclient = self._constructClient(
89 client_version, os_username, os_password, os_tenant_name,
90 os_auth_url)
91
92 if block_devices is not None:
93 self.block_devices = [
94 self._parseBlockDevice(bd) for bd in block_devices]
95 else:
96 self.block_devices = None
97 self.image = image
98 self.meta = meta
99 self.nova_args = nova_args if nova_args is not None else {}
100
101 @staticmethod
102 def _constructClient(client_version, username, password, project_name,
103 auth_url):
104 """Return a novaclient from the given args."""
105 loader = loading.get_plugin_loader('password')
106 auth = loader.load_from_options(auth_url=auth_url, username=username,
107 password=password, project_name=project_name)
108 sess = session.Session(auth=auth)
109 return client.Client(client_version, session=sess)
110
111 def _parseBlockDevice(self, block_device):
112 """
113 Parse a higher-level view of the block device mapping into something
114 novaclient wants. This should be similar to how Horizon presents it.
115 Required keys:
116 device_name: The name of the device; e.g. vda or xda.
117 source_type: image, snapshot, volume, or blank/None.
118 destination_type: Destination of block device: volume or local.
119 delete_on_termination: True/False.
120 uuid: The image, snapshot, or volume id.
121 boot_index: Integer used for boot order.
122 volume_size: Size of the device in GiB.
123 """
124 client_block_device = {}
125 client_block_device['device_name'] = block_device.get(
126 'device_name', 'vda')
127 client_block_device['source_type'] = block_device.get(
128 'source_type', 'image')
129 client_block_device['destination_type'] = block_device.get(
130 'destination_type', 'volume')
131 client_block_device['delete_on_termination'] = bool(
132 block_device.get('delete_on_termination', True))
133 client_block_device['uuid'] = block_device['uuid']
134 client_block_device['boot_index'] = int(
135 block_device.get('boot_index', 0))
136 # Allow None here. It will be rendered later.
137 client_block_device['volume_size'] = block_device.get('volume_size')
138 return client_block_device
139
140 @defer.inlineCallbacks
141 def _renderBlockDevice(self, block_device, build):
142 """Render all of the block device's values."""
143 rendered_block_device = yield build.render(block_device)
144 if rendered_block_device['volume_size'] is None:
145 source_type = rendered_block_device['source_type']
146 source_uuid = rendered_block_device['uuid']
147 volume_size = self._determineVolumeSize(source_type, source_uuid)
148 rendered_block_device['volume_size'] = volume_size
149 defer.returnValue(rendered_block_device)
150
151 def _determineVolumeSize(self, source_type, source_uuid):
152 """
153 Determine the minimum size the volume needs to be for the source.
154 Returns the size in GiB.
155 """
156 nova = self.novaclient
157 if source_type == 'image':
158 # The size returned for an image is in bytes. Round up to the next
159 # integer GiB.
160 image = nova.images.get(source_uuid)
161 if hasattr(image, 'OS-EXT-IMG-SIZE:size'):
162 size = getattr(image, 'OS-EXT-IMG-SIZE:size')
163 size_gb = int(math.ceil(size / 1024.0**3))
164 return size_gb
165 elif source_type == 'volume':
166 # Volumes are easy because they are already in GiB.
167 volume = nova.volumes.get(source_uuid)
168 return volume.size
169 elif source_type == 'snapshot':
170 snap = nova.volume_snapshots.get(source_uuid)
171 return snap.size
172 else:
173 unknown_source = ("The source type '%s' for UUID '%s' is"
174 " unknown" % (source_type, source_uuid))
175 raise ValueError(unknown_source)
176
177 @defer.inlineCallbacks
178 def _getImage(self, build):
179 # If image is a callable, then pass it the list of images. The
180 # function should return the image's UUID to use.
181 image = self.image
182 if callable(image):
183 image_uuid = image(self.novaclient.images.list())
184 else:
185 image_uuid = yield build.render(image)
186 defer.returnValue(image_uuid)
187
188 @defer.inlineCallbacks
189 def start_instance(self, build):
190 if self.instance is not None:
191 raise ValueError('instance active')
192 image = yield self._getImage(build)
193 if self.block_devices is not None:
194 block_devices = []
195 for bd in self.block_devices:
196 rendered_block_device = yield self._renderBlockDevice(bd, build)
197 block_devices.append(rendered_block_device)
198 else:
199 block_devices = None
200 res = yield threads.deferToThread(self._start_instance, image,
201 block_devices)
202 defer.returnValue(res)
203
204 def _start_instance(self, image_uuid, block_devices):
205 boot_args = [self.workername, image_uuid, self.flavor]
206 boot_kwargs = dict(
207 meta=self.meta,
208 block_device_mapping_v2=block_devices,
209 **self.nova_args)
210 instance = self.novaclient.servers.create(*boot_args, **boot_kwargs)
211 # There is an issue when using sessions that the status is not
212 # available on the first try. Trying again will work fine. Fetch the
213 # instance to avoid that.
214 try:
215 instance = self.novaclient.servers.get(instance.id)
216 except NotFound:
217 log.msg('{class_name} {name} instance {instance.id} '
218 '({instance.name}) never found',
219 class_name=self.__class__.__name__, name=self.workername,
220 instance=instance)
221 raise LatentWorkerFailedToSubstantiate(
222 instance.id, BUILD)
223 self.instance = instance
224 log.msg('%s %s starting instance %s (image %s)' %
225 (self.__class__.__name__, self.workername, instance.id,
226 image_uuid))
227 duration = 0
228 interval = self._poll_resolution
229 while instance.status.startswith(BUILD):
230 time.sleep(interval)
231 duration += interval
232 if duration % 60 == 0:
233 log.msg('%s %s has waited %d minutes for instance %s' %
234 (self.__class__.__name__, self.workername, duration // 60,
235 instance.id))
236 try:
237 instance = self.novaclient.servers.get(instance.id)
238 except NotFound:
239 log.msg('%s %s instance %s (%s) went missing' %
240 (self.__class__.__name__, self.workername,
241 instance.id, instance.name))
242 raise LatentWorkerFailedToSubstantiate(
243 instance.id, instance.status)
244 if instance.status == ACTIVE:
245 minutes = duration // 60
246 seconds = duration % 60
247 log.msg('%s %s instance %s (%s) started '
248 'in about %d minutes %d seconds' %
249 (self.__class__.__name__, self.workername,
250 instance.id, instance.name, minutes, seconds))
251 return [instance.id, image_uuid,
252 '%02d:%02d:%02d' % (minutes // 60, minutes % 60, seconds)]
253 else:
254 self.failed_to_start(instance.id, instance.status)
255
256 def stop_instance(self, fast=False):
257 if self.instance is None:
258 # be gentle. Something may just be trying to alert us that an
259 # instance never attached, and it's because, somehow, we never
260 # started.
261 return defer.succeed(None)
262 instance = self.instance
263 self.instance = None
264 self._stop_instance(instance, fast)
265
266 def _stop_instance(self, instance, fast):
267 try:
268 instance = self.novaclient.servers.get(instance.id)
269 except NotFound:
270 # If can't find the instance, then it's already gone.
271 log.msg('%s %s instance %s (%s) already terminated' %
272 (self.__class__.__name__, self.workername, instance.id,
273 instance.name))
274 return
275 if instance.status not in (DELETED, UNKNOWN):
276 instance.delete()
277 log.msg('%s %s terminating instance %s (%s)' %
278 (self.__class__.__name__, self.workername, instance.id,
279 instance.name))
280
[end of master/buildbot/worker/openstack.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/master/buildbot/worker/openstack.py b/master/buildbot/worker/openstack.py
--- a/master/buildbot/worker/openstack.py
+++ b/master/buildbot/worker/openstack.py
@@ -60,6 +60,7 @@
os_tenant_name,
os_auth_url,
block_devices=None,
+ region=None,
image=None,
meta=None,
# Have a nova_args parameter to allow passing things directly
@@ -88,6 +89,8 @@
self.novaclient = self._constructClient(
client_version, os_username, os_password, os_tenant_name,
os_auth_url)
+ if region is not None:
+ self.novaclient.client.region_name = region
if block_devices is not None:
self.block_devices = [
|
{"golden_diff": "diff --git a/master/buildbot/worker/openstack.py b/master/buildbot/worker/openstack.py\n--- a/master/buildbot/worker/openstack.py\n+++ b/master/buildbot/worker/openstack.py\n@@ -60,6 +60,7 @@\n os_tenant_name,\n os_auth_url,\n block_devices=None,\n+ region=None,\n image=None,\n meta=None,\n # Have a nova_args parameter to allow passing things directly\n@@ -88,6 +89,8 @@\n self.novaclient = self._constructClient(\n client_version, os_username, os_password, os_tenant_name,\n os_auth_url)\n+ if region is not None:\n+ self.novaclient.client.region_name = region\n \n if block_devices is not None:\n self.block_devices = [\n", "issue": "Add a `region` arg to `OpenStackLatentWorker`\nWould be nice to allow passing a region_name to novaclient in `OpenStackLatentWorker`.\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Portions Copyright Buildbot Team Members\n# Portions Copyright 2013 Cray Inc.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport time\n\nfrom twisted.internet import defer\nfrom twisted.internet import threads\nfrom twisted.python import log\n\nfrom buildbot import config\nfrom buildbot.interfaces import LatentWorkerFailedToSubstantiate\nfrom buildbot.worker import AbstractLatentWorker\n\ntry:\n from keystoneauth1 import loading\n from keystoneauth1 import session\n from novaclient import client\n from novaclient.exceptions import NotFound\n _hush_pyflakes = [client]\nexcept ImportError:\n NotFound = Exception\n client = None\n loading = None\n session = None\n\n\nACTIVE = 'ACTIVE'\nBUILD = 'BUILD'\nDELETED = 'DELETED'\nUNKNOWN = 'UNKNOWN'\n\n\nclass OpenStackLatentWorker(AbstractLatentWorker):\n\n instance = None\n _poll_resolution = 5 # hook point for tests\n\n def __init__(self, name, password,\n flavor,\n os_username,\n os_password,\n os_tenant_name,\n os_auth_url,\n block_devices=None,\n image=None,\n meta=None,\n # Have a nova_args parameter to allow passing things directly\n # to novaclient.\n nova_args=None,\n client_version='2',\n **kwargs):\n\n if not client:\n config.error(\"The python module 'novaclient' is needed \"\n \"to use a OpenStackLatentWorker. \"\n \"Please install 'python-novaclient' package.\")\n if not loading or not session:\n config.error(\"The python module 'keystoneauth1' is needed \"\n \"to use a OpenStackLatentWorker. \"\n \"Please install the 'keystoneauth1' package.\")\n\n if not block_devices and not image:\n raise ValueError('One of block_devices or image must be given')\n\n AbstractLatentWorker.__init__(self, name, password, **kwargs)\n\n self.flavor = flavor\n self.client_version = client_version\n if client:\n self.novaclient = self._constructClient(\n client_version, os_username, os_password, os_tenant_name,\n os_auth_url)\n\n if block_devices is not None:\n self.block_devices = [\n self._parseBlockDevice(bd) for bd in block_devices]\n else:\n self.block_devices = None\n self.image = image\n self.meta = meta\n self.nova_args = nova_args if nova_args is not None else {}\n\n @staticmethod\n def _constructClient(client_version, username, password, project_name,\n auth_url):\n \"\"\"Return a novaclient from the given args.\"\"\"\n loader = loading.get_plugin_loader('password')\n auth = loader.load_from_options(auth_url=auth_url, username=username,\n password=password, project_name=project_name)\n sess = session.Session(auth=auth)\n return client.Client(client_version, session=sess)\n\n def _parseBlockDevice(self, block_device):\n \"\"\"\n Parse a higher-level view of the block device mapping into something\n novaclient wants. This should be similar to how Horizon presents it.\n Required keys:\n device_name: The name of the device; e.g. vda or xda.\n source_type: image, snapshot, volume, or blank/None.\n destination_type: Destination of block device: volume or local.\n delete_on_termination: True/False.\n uuid: The image, snapshot, or volume id.\n boot_index: Integer used for boot order.\n volume_size: Size of the device in GiB.\n \"\"\"\n client_block_device = {}\n client_block_device['device_name'] = block_device.get(\n 'device_name', 'vda')\n client_block_device['source_type'] = block_device.get(\n 'source_type', 'image')\n client_block_device['destination_type'] = block_device.get(\n 'destination_type', 'volume')\n client_block_device['delete_on_termination'] = bool(\n block_device.get('delete_on_termination', True))\n client_block_device['uuid'] = block_device['uuid']\n client_block_device['boot_index'] = int(\n block_device.get('boot_index', 0))\n # Allow None here. It will be rendered later.\n client_block_device['volume_size'] = block_device.get('volume_size')\n return client_block_device\n\n @defer.inlineCallbacks\n def _renderBlockDevice(self, block_device, build):\n \"\"\"Render all of the block device's values.\"\"\"\n rendered_block_device = yield build.render(block_device)\n if rendered_block_device['volume_size'] is None:\n source_type = rendered_block_device['source_type']\n source_uuid = rendered_block_device['uuid']\n volume_size = self._determineVolumeSize(source_type, source_uuid)\n rendered_block_device['volume_size'] = volume_size\n defer.returnValue(rendered_block_device)\n\n def _determineVolumeSize(self, source_type, source_uuid):\n \"\"\"\n Determine the minimum size the volume needs to be for the source.\n Returns the size in GiB.\n \"\"\"\n nova = self.novaclient\n if source_type == 'image':\n # The size returned for an image is in bytes. Round up to the next\n # integer GiB.\n image = nova.images.get(source_uuid)\n if hasattr(image, 'OS-EXT-IMG-SIZE:size'):\n size = getattr(image, 'OS-EXT-IMG-SIZE:size')\n size_gb = int(math.ceil(size / 1024.0**3))\n return size_gb\n elif source_type == 'volume':\n # Volumes are easy because they are already in GiB.\n volume = nova.volumes.get(source_uuid)\n return volume.size\n elif source_type == 'snapshot':\n snap = nova.volume_snapshots.get(source_uuid)\n return snap.size\n else:\n unknown_source = (\"The source type '%s' for UUID '%s' is\"\n \" unknown\" % (source_type, source_uuid))\n raise ValueError(unknown_source)\n\n @defer.inlineCallbacks\n def _getImage(self, build):\n # If image is a callable, then pass it the list of images. The\n # function should return the image's UUID to use.\n image = self.image\n if callable(image):\n image_uuid = image(self.novaclient.images.list())\n else:\n image_uuid = yield build.render(image)\n defer.returnValue(image_uuid)\n\n @defer.inlineCallbacks\n def start_instance(self, build):\n if self.instance is not None:\n raise ValueError('instance active')\n image = yield self._getImage(build)\n if self.block_devices is not None:\n block_devices = []\n for bd in self.block_devices:\n rendered_block_device = yield self._renderBlockDevice(bd, build)\n block_devices.append(rendered_block_device)\n else:\n block_devices = None\n res = yield threads.deferToThread(self._start_instance, image,\n block_devices)\n defer.returnValue(res)\n\n def _start_instance(self, image_uuid, block_devices):\n boot_args = [self.workername, image_uuid, self.flavor]\n boot_kwargs = dict(\n meta=self.meta,\n block_device_mapping_v2=block_devices,\n **self.nova_args)\n instance = self.novaclient.servers.create(*boot_args, **boot_kwargs)\n # There is an issue when using sessions that the status is not\n # available on the first try. Trying again will work fine. Fetch the\n # instance to avoid that.\n try:\n instance = self.novaclient.servers.get(instance.id)\n except NotFound:\n log.msg('{class_name} {name} instance {instance.id} '\n '({instance.name}) never found',\n class_name=self.__class__.__name__, name=self.workername,\n instance=instance)\n raise LatentWorkerFailedToSubstantiate(\n instance.id, BUILD)\n self.instance = instance\n log.msg('%s %s starting instance %s (image %s)' %\n (self.__class__.__name__, self.workername, instance.id,\n image_uuid))\n duration = 0\n interval = self._poll_resolution\n while instance.status.startswith(BUILD):\n time.sleep(interval)\n duration += interval\n if duration % 60 == 0:\n log.msg('%s %s has waited %d minutes for instance %s' %\n (self.__class__.__name__, self.workername, duration // 60,\n instance.id))\n try:\n instance = self.novaclient.servers.get(instance.id)\n except NotFound:\n log.msg('%s %s instance %s (%s) went missing' %\n (self.__class__.__name__, self.workername,\n instance.id, instance.name))\n raise LatentWorkerFailedToSubstantiate(\n instance.id, instance.status)\n if instance.status == ACTIVE:\n minutes = duration // 60\n seconds = duration % 60\n log.msg('%s %s instance %s (%s) started '\n 'in about %d minutes %d seconds' %\n (self.__class__.__name__, self.workername,\n instance.id, instance.name, minutes, seconds))\n return [instance.id, image_uuid,\n '%02d:%02d:%02d' % (minutes // 60, minutes % 60, seconds)]\n else:\n self.failed_to_start(instance.id, instance.status)\n\n def stop_instance(self, fast=False):\n if self.instance is None:\n # be gentle. Something may just be trying to alert us that an\n # instance never attached, and it's because, somehow, we never\n # started.\n return defer.succeed(None)\n instance = self.instance\n self.instance = None\n self._stop_instance(instance, fast)\n\n def _stop_instance(self, instance, fast):\n try:\n instance = self.novaclient.servers.get(instance.id)\n except NotFound:\n # If can't find the instance, then it's already gone.\n log.msg('%s %s instance %s (%s) already terminated' %\n (self.__class__.__name__, self.workername, instance.id,\n instance.name))\n return\n if instance.status not in (DELETED, UNKNOWN):\n instance.delete()\n log.msg('%s %s terminating instance %s (%s)' %\n (self.__class__.__name__, self.workername, instance.id,\n instance.name))\n", "path": "master/buildbot/worker/openstack.py"}]}
| 3,724 | 175 |
gh_patches_debug_19561
|
rasdani/github-patches
|
git_diff
|
networkx__networkx-6503
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Join operation in trees---not handling label_attribute
<!-- If you have a general question about NetworkX, please use the discussions tab to create a new discussion -->
<!--- Provide a general summary of the issue in the Title above -->
[https://github.com/networkx/networkx/blob/main/networkx/algorithms/tree/operations.py](https://github.com/networkx/networkx/blob/main/networkx/algorithms/tree/operations.py)
1. The resulting graph of join operation in trees isn't including the old labels of inputs.
2. Not handling the cases where label_attribute is passed as an argument.
### Current Behavior

<!--- Tell us what happens instead of the expected behavior -->
### Expected Behavior

<!--- Tell us what should happen -->
### Steps to Reproduce
As shown above
<!--- Provide a minimal example that reproduces the bug -->
### Environment
<!--- Please provide details about your local environment -->
Python version: 3.10.6
NetworkX version: 3.0
### Additional context
[https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.tree.operations.join.html](https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.tree.operations.join.html)
<!--- Add any other context about the problem here, screenshots, etc. -->
Improve test coverage for operations.py (join)
<!-- If you have a general question about NetworkX, please use the discussions tab to create a new discussion -->
<!--- Provide a general summary of the issue in the Title above -->
### Current Behavior
https://app.codecov.io/gh/networkx/networkx/blob/main/networkx/algorithms/tree/operations.py the current test coverage is 92.8%. There are still some cases needed to be handled.
<!--- Tell us what happens instead of the expected behavior -->
### Expected Behavior
<!--- Tell us what should happen -->
https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.tree.operations.join.html
1. Test case to check label_attribute should be added
2. In the documentation its written that the inputs must be tree. But this function works for graphs too. Could you tell me if its for trees or graphs as well?
### Steps to Reproduce
<!--- Provide a minimal example that reproduces the bug -->
### Environment
<!--- Please provide details about your local environment -->
Python version:3.10.6
NetworkX version:3.0
### Additional context
<!--- Add any other context about the problem here, screenshots, etc. -->
</issue>
<code>
[start of networkx/algorithms/tree/operations.py]
1 """Operations on trees."""
2 from functools import partial
3 from itertools import accumulate, chain
4
5 import networkx as nx
6
7 __all__ = ["join"]
8
9
10 def join(rooted_trees, label_attribute=None):
11 """Returns a new rooted tree with a root node joined with the roots
12 of each of the given rooted trees.
13
14 Parameters
15 ----------
16 rooted_trees : list
17 A list of pairs in which each left element is a NetworkX graph
18 object representing a tree and each right element is the root
19 node of that tree. The nodes of these trees will be relabeled to
20 integers.
21
22 label_attribute : str
23 If provided, the old node labels will be stored in the new tree
24 under this node attribute. If not provided, the node attribute
25 ``'_old'`` will store the original label of the node in the
26 rooted trees given in the input.
27
28 Returns
29 -------
30 NetworkX graph
31 The rooted tree whose subtrees are the given rooted trees. The
32 new root node is labeled 0. Each non-root node has an attribute,
33 as described under the keyword argument ``label_attribute``,
34 that indicates the label of the original node in the input tree.
35
36 Notes
37 -----
38 Graph, edge, and node attributes are propagated from the given
39 rooted trees to the created tree. If there are any overlapping graph
40 attributes, those from later trees will overwrite those from earlier
41 trees in the tuple of positional arguments.
42
43 Examples
44 --------
45 Join two full balanced binary trees of height *h* to get a full
46 balanced binary tree of depth *h* + 1::
47
48 >>> h = 4
49 >>> left = nx.balanced_tree(2, h)
50 >>> right = nx.balanced_tree(2, h)
51 >>> joined_tree = nx.join([(left, 0), (right, 0)])
52 >>> nx.is_isomorphic(joined_tree, nx.balanced_tree(2, h + 1))
53 True
54
55 """
56 if len(rooted_trees) == 0:
57 return nx.empty_graph(1)
58
59 # Unzip the zipped list of (tree, root) pairs.
60 trees, roots = zip(*rooted_trees)
61
62 # The join of the trees has the same type as the type of the first
63 # tree.
64 R = type(trees[0])()
65
66 # Relabel the nodes so that their union is the integers starting at 1.
67 if label_attribute is None:
68 label_attribute = "_old"
69 relabel = partial(
70 nx.convert_node_labels_to_integers, label_attribute=label_attribute
71 )
72 lengths = (len(tree) for tree in trees[:-1])
73 first_labels = chain([0], accumulate(lengths))
74 trees = [
75 relabel(tree, first_label=first_label + 1)
76 for tree, first_label in zip(trees, first_labels)
77 ]
78
79 # Get the relabeled roots.
80 roots = [
81 next(v for v, d in tree.nodes(data=True) if d.get("_old") == root)
82 for tree, root in zip(trees, roots)
83 ]
84
85 # Remove the old node labels.
86 for tree in trees:
87 for v in tree:
88 tree.nodes[v].pop("_old")
89
90 # Add all sets of nodes and edges, with data.
91 nodes = (tree.nodes(data=True) for tree in trees)
92 edges = (tree.edges(data=True) for tree in trees)
93 R.add_nodes_from(chain.from_iterable(nodes))
94 R.add_edges_from(chain.from_iterable(edges))
95
96 # Add graph attributes; later attributes take precedent over earlier
97 # attributes.
98 for tree in trees:
99 R.graph.update(tree.graph)
100
101 # Finally, join the subtrees at the root. We know 0 is unused by the
102 # way we relabeled the subtrees.
103 R.add_node(0)
104 R.add_edges_from((0, root) for root in roots)
105
106 return R
107
[end of networkx/algorithms/tree/operations.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/networkx/algorithms/tree/operations.py b/networkx/algorithms/tree/operations.py
--- a/networkx/algorithms/tree/operations.py
+++ b/networkx/algorithms/tree/operations.py
@@ -78,25 +78,13 @@
# Get the relabeled roots.
roots = [
- next(v for v, d in tree.nodes(data=True) if d.get("_old") == root)
+ next(v for v, d in tree.nodes(data=True) if d.get(label_attribute) == root)
for tree, root in zip(trees, roots)
]
- # Remove the old node labels.
+ # Add all sets of nodes and edges, attributes
for tree in trees:
- for v in tree:
- tree.nodes[v].pop("_old")
-
- # Add all sets of nodes and edges, with data.
- nodes = (tree.nodes(data=True) for tree in trees)
- edges = (tree.edges(data=True) for tree in trees)
- R.add_nodes_from(chain.from_iterable(nodes))
- R.add_edges_from(chain.from_iterable(edges))
-
- # Add graph attributes; later attributes take precedent over earlier
- # attributes.
- for tree in trees:
- R.graph.update(tree.graph)
+ R.update(tree)
# Finally, join the subtrees at the root. We know 0 is unused by the
# way we relabeled the subtrees.
|
{"golden_diff": "diff --git a/networkx/algorithms/tree/operations.py b/networkx/algorithms/tree/operations.py\n--- a/networkx/algorithms/tree/operations.py\n+++ b/networkx/algorithms/tree/operations.py\n@@ -78,25 +78,13 @@\n \n # Get the relabeled roots.\n roots = [\n- next(v for v, d in tree.nodes(data=True) if d.get(\"_old\") == root)\n+ next(v for v, d in tree.nodes(data=True) if d.get(label_attribute) == root)\n for tree, root in zip(trees, roots)\n ]\n \n- # Remove the old node labels.\n+ # Add all sets of nodes and edges, attributes\n for tree in trees:\n- for v in tree:\n- tree.nodes[v].pop(\"_old\")\n-\n- # Add all sets of nodes and edges, with data.\n- nodes = (tree.nodes(data=True) for tree in trees)\n- edges = (tree.edges(data=True) for tree in trees)\n- R.add_nodes_from(chain.from_iterable(nodes))\n- R.add_edges_from(chain.from_iterable(edges))\n-\n- # Add graph attributes; later attributes take precedent over earlier\n- # attributes.\n- for tree in trees:\n- R.graph.update(tree.graph)\n+ R.update(tree)\n \n # Finally, join the subtrees at the root. We know 0 is unused by the\n # way we relabeled the subtrees.\n", "issue": "Join operation in trees---not handling label_attribute\n<!-- If you have a general question about NetworkX, please use the discussions tab to create a new discussion -->\r\n\r\n<!--- Provide a general summary of the issue in the Title above -->\r\n[https://github.com/networkx/networkx/blob/main/networkx/algorithms/tree/operations.py](https://github.com/networkx/networkx/blob/main/networkx/algorithms/tree/operations.py)\r\n1. The resulting graph of join operation in trees isn't including the old labels of inputs.\r\n2. Not handling the cases where label_attribute is passed as an argument.\r\n\r\n### Current Behavior\r\n\r\n\r\n<!--- Tell us what happens instead of the expected behavior -->\r\n\r\n### Expected Behavior\r\n\r\n\r\n\r\n\r\n<!--- Tell us what should happen -->\r\n\r\n### Steps to Reproduce\r\nAs shown above\r\n<!--- Provide a minimal example that reproduces the bug -->\r\n\r\n### Environment\r\n\r\n<!--- Please provide details about your local environment -->\r\n\r\nPython version: 3.10.6\r\nNetworkX version: 3.0\r\n\r\n### Additional context\r\n[https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.tree.operations.join.html](https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.tree.operations.join.html)\r\n\r\n<!--- Add any other context about the problem here, screenshots, etc. -->\nImprove test coverage for operations.py (join)\n<!-- If you have a general question about NetworkX, please use the discussions tab to create a new discussion -->\r\n\r\n<!--- Provide a general summary of the issue in the Title above -->\r\n\r\n### Current Behavior\r\nhttps://app.codecov.io/gh/networkx/networkx/blob/main/networkx/algorithms/tree/operations.py the current test coverage is 92.8%. There are still some cases needed to be handled.\r\n<!--- Tell us what happens instead of the expected behavior -->\r\n\r\n### Expected Behavior\r\n\r\n<!--- Tell us what should happen -->\r\nhttps://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.tree.operations.join.html\r\n1. Test case to check label_attribute should be added \r\n2. In the documentation its written that the inputs must be tree. But this function works for graphs too. Could you tell me if its for trees or graphs as well?\r\n### Steps to Reproduce\r\n\r\n<!--- Provide a minimal example that reproduces the bug -->\r\n\r\n### Environment\r\n\r\n<!--- Please provide details about your local environment -->\r\n\r\nPython version:3.10.6\r\nNetworkX version:3.0\r\n\r\n### Additional context\r\n\r\n<!--- Add any other context about the problem here, screenshots, etc. -->\r\n\n", "before_files": [{"content": "\"\"\"Operations on trees.\"\"\"\nfrom functools import partial\nfrom itertools import accumulate, chain\n\nimport networkx as nx\n\n__all__ = [\"join\"]\n\n\ndef join(rooted_trees, label_attribute=None):\n \"\"\"Returns a new rooted tree with a root node joined with the roots\n of each of the given rooted trees.\n\n Parameters\n ----------\n rooted_trees : list\n A list of pairs in which each left element is a NetworkX graph\n object representing a tree and each right element is the root\n node of that tree. The nodes of these trees will be relabeled to\n integers.\n\n label_attribute : str\n If provided, the old node labels will be stored in the new tree\n under this node attribute. If not provided, the node attribute\n ``'_old'`` will store the original label of the node in the\n rooted trees given in the input.\n\n Returns\n -------\n NetworkX graph\n The rooted tree whose subtrees are the given rooted trees. The\n new root node is labeled 0. Each non-root node has an attribute,\n as described under the keyword argument ``label_attribute``,\n that indicates the label of the original node in the input tree.\n\n Notes\n -----\n Graph, edge, and node attributes are propagated from the given\n rooted trees to the created tree. If there are any overlapping graph\n attributes, those from later trees will overwrite those from earlier\n trees in the tuple of positional arguments.\n\n Examples\n --------\n Join two full balanced binary trees of height *h* to get a full\n balanced binary tree of depth *h* + 1::\n\n >>> h = 4\n >>> left = nx.balanced_tree(2, h)\n >>> right = nx.balanced_tree(2, h)\n >>> joined_tree = nx.join([(left, 0), (right, 0)])\n >>> nx.is_isomorphic(joined_tree, nx.balanced_tree(2, h + 1))\n True\n\n \"\"\"\n if len(rooted_trees) == 0:\n return nx.empty_graph(1)\n\n # Unzip the zipped list of (tree, root) pairs.\n trees, roots = zip(*rooted_trees)\n\n # The join of the trees has the same type as the type of the first\n # tree.\n R = type(trees[0])()\n\n # Relabel the nodes so that their union is the integers starting at 1.\n if label_attribute is None:\n label_attribute = \"_old\"\n relabel = partial(\n nx.convert_node_labels_to_integers, label_attribute=label_attribute\n )\n lengths = (len(tree) for tree in trees[:-1])\n first_labels = chain([0], accumulate(lengths))\n trees = [\n relabel(tree, first_label=first_label + 1)\n for tree, first_label in zip(trees, first_labels)\n ]\n\n # Get the relabeled roots.\n roots = [\n next(v for v, d in tree.nodes(data=True) if d.get(\"_old\") == root)\n for tree, root in zip(trees, roots)\n ]\n\n # Remove the old node labels.\n for tree in trees:\n for v in tree:\n tree.nodes[v].pop(\"_old\")\n\n # Add all sets of nodes and edges, with data.\n nodes = (tree.nodes(data=True) for tree in trees)\n edges = (tree.edges(data=True) for tree in trees)\n R.add_nodes_from(chain.from_iterable(nodes))\n R.add_edges_from(chain.from_iterable(edges))\n\n # Add graph attributes; later attributes take precedent over earlier\n # attributes.\n for tree in trees:\n R.graph.update(tree.graph)\n\n # Finally, join the subtrees at the root. We know 0 is unused by the\n # way we relabeled the subtrees.\n R.add_node(0)\n R.add_edges_from((0, root) for root in roots)\n\n return R\n", "path": "networkx/algorithms/tree/operations.py"}]}
| 2,288 | 319 |
gh_patches_debug_26658
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-2779
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Append of chunked upload processes raw data
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.
</issue>
<code>
[start of pulpcore/app/models/upload.py]
1 import hashlib
2 import os
3
4 from django.core.files.base import ContentFile
5 from django.db import models
6 from django.db.models.signals import post_delete
7 from django.dispatch import receiver
8 from rest_framework import serializers
9
10 from pulpcore.app.models import BaseModel, fields, storage
11
12
13 class Upload(BaseModel):
14 """
15 A chunked upload. Stores chunks until used to create an artifact, etc.
16
17 Fields:
18
19 size (models.BigIntegerField): The size of the file in bytes.
20 """
21
22 size = models.BigIntegerField()
23
24 def append(self, chunk, offset, sha256=None):
25 """
26 Append a chunk to an upload.
27
28 Args:
29 chunk (File): Binary file to append to the upload file.
30 offset (int): First byte position to write chunk to.
31 """
32 chunk_read = chunk.read()
33 current_sha256 = hashlib.sha256(chunk_read).hexdigest()
34 if sha256 and sha256 != current_sha256:
35 raise serializers.ValidationError("Checksum does not match chunk upload.")
36
37 upload_chunk = UploadChunk(upload=self, offset=offset, size=len(chunk))
38 filename = os.path.basename(upload_chunk.storage_path(""))
39 upload_chunk.file.save(filename, ContentFile(chunk_read))
40
41
42 class UploadChunk(BaseModel):
43 """
44 A chunk for an uploaded file.
45
46 Fields:
47
48 file (fields.FileField): A file where the uploaded chunk is stored.
49 upload (models.ForeignKey): Upload this chunk belongs to.
50 offset (models.BigIntegerField): Start of the chunk in bytes.
51 size (models.BigIntegerField): Size of the chunk in bytes.
52 """
53
54 def storage_path(self, name):
55 """
56 Callable used by FileField to determine where the uploaded file should be stored.
57
58 Args:
59 name (str): Original name of uploaded file. It is ignored by this method because the
60 pulp_id is used to determine a file path instead.
61 """
62 return storage.get_upload_chunk_file_path(self.pulp_id)
63
64 file = fields.FileField(null=False, upload_to=storage_path, max_length=255)
65 upload = models.ForeignKey(Upload, on_delete=models.CASCADE, related_name="chunks")
66 offset = models.BigIntegerField()
67 size = models.BigIntegerField()
68
69
70 @receiver(post_delete, sender=UploadChunk)
71 def upload_chunk_delete(instance, **kwargs):
72 instance.file.delete(save=False)
73
[end of pulpcore/app/models/upload.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pulpcore/app/models/upload.py b/pulpcore/app/models/upload.py
--- a/pulpcore/app/models/upload.py
+++ b/pulpcore/app/models/upload.py
@@ -1,6 +1,8 @@
import hashlib
import os
+from gettext import gettext as _
+
from django.core.files.base import ContentFile
from django.db import models
from django.db.models.signals import post_delete
@@ -26,17 +28,18 @@
Append a chunk to an upload.
Args:
- chunk (File): Binary file to append to the upload file.
+ chunk (File): Binary data to append to the upload file.
offset (int): First byte position to write chunk to.
"""
- chunk_read = chunk.read()
- current_sha256 = hashlib.sha256(chunk_read).hexdigest()
- if sha256 and sha256 != current_sha256:
- raise serializers.ValidationError("Checksum does not match chunk upload.")
+ chunk = chunk.read()
+ if sha256:
+ current_sha256 = hashlib.sha256(chunk).hexdigest()
+ if sha256 != current_sha256:
+ raise serializers.ValidationError(_("Checksum does not match chunk upload."))
upload_chunk = UploadChunk(upload=self, offset=offset, size=len(chunk))
filename = os.path.basename(upload_chunk.storage_path(""))
- upload_chunk.file.save(filename, ContentFile(chunk_read))
+ upload_chunk.file.save(filename, ContentFile(chunk))
class UploadChunk(BaseModel):
|
{"golden_diff": "diff --git a/pulpcore/app/models/upload.py b/pulpcore/app/models/upload.py\n--- a/pulpcore/app/models/upload.py\n+++ b/pulpcore/app/models/upload.py\n@@ -1,6 +1,8 @@\n import hashlib\n import os\n \n+from gettext import gettext as _\n+\n from django.core.files.base import ContentFile\n from django.db import models\n from django.db.models.signals import post_delete\n@@ -26,17 +28,18 @@\n Append a chunk to an upload.\n \n Args:\n- chunk (File): Binary file to append to the upload file.\n+ chunk (File): Binary data to append to the upload file.\n offset (int): First byte position to write chunk to.\n \"\"\"\n- chunk_read = chunk.read()\n- current_sha256 = hashlib.sha256(chunk_read).hexdigest()\n- if sha256 and sha256 != current_sha256:\n- raise serializers.ValidationError(\"Checksum does not match chunk upload.\")\n+ chunk = chunk.read()\n+ if sha256:\n+ current_sha256 = hashlib.sha256(chunk).hexdigest()\n+ if sha256 != current_sha256:\n+ raise serializers.ValidationError(_(\"Checksum does not match chunk upload.\"))\n \n upload_chunk = UploadChunk(upload=self, offset=offset, size=len(chunk))\n filename = os.path.basename(upload_chunk.storage_path(\"\"))\n- upload_chunk.file.save(filename, ContentFile(chunk_read))\n+ upload_chunk.file.save(filename, ContentFile(chunk))\n \n \n class UploadChunk(BaseModel):\n", "issue": "Append of chunked upload processes raw data\n**Version**\r\nPlease provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.\r\n\r\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.\r\n\n", "before_files": [{"content": "import hashlib\nimport os\n\nfrom django.core.files.base import ContentFile\nfrom django.db import models\nfrom django.db.models.signals import post_delete\nfrom django.dispatch import receiver\nfrom rest_framework import serializers\n\nfrom pulpcore.app.models import BaseModel, fields, storage\n\n\nclass Upload(BaseModel):\n \"\"\"\n A chunked upload. Stores chunks until used to create an artifact, etc.\n\n Fields:\n\n size (models.BigIntegerField): The size of the file in bytes.\n \"\"\"\n\n size = models.BigIntegerField()\n\n def append(self, chunk, offset, sha256=None):\n \"\"\"\n Append a chunk to an upload.\n\n Args:\n chunk (File): Binary file to append to the upload file.\n offset (int): First byte position to write chunk to.\n \"\"\"\n chunk_read = chunk.read()\n current_sha256 = hashlib.sha256(chunk_read).hexdigest()\n if sha256 and sha256 != current_sha256:\n raise serializers.ValidationError(\"Checksum does not match chunk upload.\")\n\n upload_chunk = UploadChunk(upload=self, offset=offset, size=len(chunk))\n filename = os.path.basename(upload_chunk.storage_path(\"\"))\n upload_chunk.file.save(filename, ContentFile(chunk_read))\n\n\nclass UploadChunk(BaseModel):\n \"\"\"\n A chunk for an uploaded file.\n\n Fields:\n\n file (fields.FileField): A file where the uploaded chunk is stored.\n upload (models.ForeignKey): Upload this chunk belongs to.\n offset (models.BigIntegerField): Start of the chunk in bytes.\n size (models.BigIntegerField): Size of the chunk in bytes.\n \"\"\"\n\n def storage_path(self, name):\n \"\"\"\n Callable used by FileField to determine where the uploaded file should be stored.\n\n Args:\n name (str): Original name of uploaded file. It is ignored by this method because the\n pulp_id is used to determine a file path instead.\n \"\"\"\n return storage.get_upload_chunk_file_path(self.pulp_id)\n\n file = fields.FileField(null=False, upload_to=storage_path, max_length=255)\n upload = models.ForeignKey(Upload, on_delete=models.CASCADE, related_name=\"chunks\")\n offset = models.BigIntegerField()\n size = models.BigIntegerField()\n\n\n@receiver(post_delete, sender=UploadChunk)\ndef upload_chunk_delete(instance, **kwargs):\n instance.file.delete(save=False)\n", "path": "pulpcore/app/models/upload.py"}]}
| 1,305 | 338 |
gh_patches_debug_16493
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2808
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Invited from queue email missing some stuff
### Describe the bug

### How to reproduce
Steps to reproduce the behaviour:
1. Be in queue
2. Get invited (by people deregistering)
</issue>
<code>
[start of website/events/emails.py]
1 """The emails defined by the events package."""
2 from django.conf import settings
3 from django.core.mail import EmailMessage
4 from django.template.loader import get_template
5 from django.utils.translation import gettext_lazy as _
6
7
8 def notify_first_waiting(event):
9 """Send an email to the first person on the waiting list when someone cancels their registration.
10
11 :param event: the event
12 """
13 if (
14 event.max_participants is not None
15 and event.eventregistration_set.filter(date_cancelled=None).count()
16 > event.max_participants
17 ):
18 # Prepare email to send to the first person on the waiting list
19 first_waiting = event.eventregistration_set.filter(
20 date_cancelled=None
21 ).order_by("date")[event.max_participants]
22
23 text_template = get_template("events/member_email.txt")
24
25 subject = _("[THALIA] Notification about your registration for '{}'").format(
26 event.title
27 )
28 text_message = text_template.render(
29 {
30 "event": event,
31 "registration": first_waiting,
32 "name": first_waiting.name or first_waiting.member.first_name,
33 "base_url": settings.BASE_URL,
34 }
35 )
36
37 EmailMessage(subject, text_message, to=[first_waiting.email]).send()
38
39
40 def notify_organiser(event, registration):
41 """Send an email to the organiser of the event if someone cancels their registration.
42
43 :param event: the event
44 :param registration: the registration that was cancelled
45 """
46 if not event.organisers.exists():
47 return
48
49 text_template = get_template("events/organiser_email.txt")
50 subject = f"Registration for {event.title} cancelled by member"
51 text_message = text_template.render({"event": event, "registration": registration})
52
53 EmailMessage(
54 subject,
55 text_message,
56 to=[
57 organiser.contact_mailinglist.name + "@" + settings.SITE_DOMAIN
58 for organiser in event.organisers.all()
59 ],
60 ).send()
61
62
63 def notify_waiting(event, registration):
64 text_template = get_template("events/more_places_email.txt")
65 subject = _("[THALIA] Notification about your registration for '{}'").format(
66 event.title
67 )
68 text_message = text_template.render(
69 {
70 "event": event,
71 "registration": registration,
72 "name": registration.name or registration.member.first_name,
73 "base_url": settings.BASE_URL,
74 }
75 )
76 EmailMessage(subject, text_message, to=[registration.email]).send()
77
[end of website/events/emails.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/events/emails.py b/website/events/emails.py
--- a/website/events/emails.py
+++ b/website/events/emails.py
@@ -25,12 +25,19 @@
subject = _("[THALIA] Notification about your registration for '{}'").format(
event.title
)
+
+ organiser_emails = [
+ organiser.contact_address
+ for organiser in event.organisers.all()
+ if organiser.contact_address is not None
+ ]
text_message = text_template.render(
{
"event": event,
"registration": first_waiting,
"name": first_waiting.name or first_waiting.member.first_name,
"base_url": settings.BASE_URL,
+ "organisers": organiser_emails,
}
)
|
{"golden_diff": "diff --git a/website/events/emails.py b/website/events/emails.py\n--- a/website/events/emails.py\n+++ b/website/events/emails.py\n@@ -25,12 +25,19 @@\n subject = _(\"[THALIA] Notification about your registration for '{}'\").format(\n event.title\n )\n+\n+ organiser_emails = [\n+ organiser.contact_address\n+ for organiser in event.organisers.all()\n+ if organiser.contact_address is not None\n+ ]\n text_message = text_template.render(\n {\n \"event\": event,\n \"registration\": first_waiting,\n \"name\": first_waiting.name or first_waiting.member.first_name,\n \"base_url\": settings.BASE_URL,\n+ \"organisers\": organiser_emails,\n }\n )\n", "issue": "Invited from queue email missing some stuff\n### Describe the bug\n\n\n\n### How to reproduce\nSteps to reproduce the behaviour:\n1. Be in queue\n2. Get invited (by people deregistering)\n", "before_files": [{"content": "\"\"\"The emails defined by the events package.\"\"\"\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import get_template\nfrom django.utils.translation import gettext_lazy as _\n\n\ndef notify_first_waiting(event):\n \"\"\"Send an email to the first person on the waiting list when someone cancels their registration.\n\n :param event: the event\n \"\"\"\n if (\n event.max_participants is not None\n and event.eventregistration_set.filter(date_cancelled=None).count()\n > event.max_participants\n ):\n # Prepare email to send to the first person on the waiting list\n first_waiting = event.eventregistration_set.filter(\n date_cancelled=None\n ).order_by(\"date\")[event.max_participants]\n\n text_template = get_template(\"events/member_email.txt\")\n\n subject = _(\"[THALIA] Notification about your registration for '{}'\").format(\n event.title\n )\n text_message = text_template.render(\n {\n \"event\": event,\n \"registration\": first_waiting,\n \"name\": first_waiting.name or first_waiting.member.first_name,\n \"base_url\": settings.BASE_URL,\n }\n )\n\n EmailMessage(subject, text_message, to=[first_waiting.email]).send()\n\n\ndef notify_organiser(event, registration):\n \"\"\"Send an email to the organiser of the event if someone cancels their registration.\n\n :param event: the event\n :param registration: the registration that was cancelled\n \"\"\"\n if not event.organisers.exists():\n return\n\n text_template = get_template(\"events/organiser_email.txt\")\n subject = f\"Registration for {event.title} cancelled by member\"\n text_message = text_template.render({\"event\": event, \"registration\": registration})\n\n EmailMessage(\n subject,\n text_message,\n to=[\n organiser.contact_mailinglist.name + \"@\" + settings.SITE_DOMAIN\n for organiser in event.organisers.all()\n ],\n ).send()\n\n\ndef notify_waiting(event, registration):\n text_template = get_template(\"events/more_places_email.txt\")\n subject = _(\"[THALIA] Notification about your registration for '{}'\").format(\n event.title\n )\n text_message = text_template.render(\n {\n \"event\": event,\n \"registration\": registration,\n \"name\": registration.name or registration.member.first_name,\n \"base_url\": settings.BASE_URL,\n }\n )\n EmailMessage(subject, text_message, to=[registration.email]).send()\n", "path": "website/events/emails.py"}]}
| 1,311 | 174 |
gh_patches_debug_32786
|
rasdani/github-patches
|
git_diff
|
celery__kombu-882
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Gathering data_files in setup.py is broken (affects current wheel on PyPI)
The `setup.py` file in this package uses the `data_files` argument to install "data files" on setup. Looking at the code that's responsible for gathering those files in
https://github.com/celery/kombu/blob/v4.2.0/setup.py#L71-L80
one can assume that the intention was to add every file which name is not ending with `.py` under the `kombu` directory. But the code there actually generates a list of _every_ file in the given directory when it encounters at least one file that's not ending with `.py`. It can get tricky if the environment is not clean - for example when there are some `.pyc` files.
Now, when creating packages from the source files we have a discrepancy when it comes to wheel format and tar.gz one (the actual cause for it is not yet clear to me).
I have tried to install both wheel and source packages on two separate virtualenvs from the following "unclean" state of this repo:
```sh
$ git status --short
$ git clean -nx
Would remove kombu/utils/functional.pyc
```
I have done it like that:
```sh
$ python setup.py sdist
$ python setup.py bdist_wheel
$ /tmp/kombutests-wheel/bin/pip install dist/kombu-4.2.0-py2.py3-none-any.whl
$ /tmp/kombutests-targz/bin/pip install dist/kombu-4.2.0.tar.gz
$ tree /tmp/kombutests-wheel/kombu/
/tmp/kombutests-wheel/kombu/
└── utils
├── __init__.py
├── __init__.pyc
├── amq_manager.py
├── amq_manager.pyc
...
├── uuid.py
└── uuid.pyc
$ tree /tmp/kombutests-targz/kombu
/tmp/kombutests-targz/kombu [error opening dir]
0 directories, 0 files
```
where `/tmp/kombutests-targz` and `/tmp/kombutests-wheel` are the virtualenvs I've mentioned.
This shows that there's an additional `kombu/utils` directory "installed" in the virtualenv's base directory only because we had one additional `.pyc` file in the repo (`kombu/utils/functional.pyc`).
Now, on top of that, it seems that the wheel that's published on PyPi is broken because (I suspect) it was created from a non-clean repo clone.
When downloading the wheel from https://files.pythonhosted.org/packages/2f/0d/416d396ee75b7e47e620005ef81a5b8a36c53c85bb148bd82459e8e61b6b/kombu-4.2.0-py2.py3-none-any.whl (found on https://pypi.org/project/kombu/#files) and unpacking it with `wheel unpack` we can find `kombu-4.2.0/kombu-4.2.0.data/data/kombu` directory. This directory gets copied into the virtualenv base directory. The contents of that directory look similar to what actually gets installed inside `lib/python2.7/site-packages/kombu`, but there are some substantial differences.
For example there's still an old `async` directory there (it was renamed in https://github.com/celery/kombu/commit/75695205f6e7af8e7e9178e010debc3871b19106) and `asynchronous` dir has only `.pyc` files inside `__pycache__` directories.
I've pasted the contents of that directory in http://dpaste.com/1C2C7D5
This has bitten us quite hard since we run our processes from the virtualenv base directory so we've used that incomplete version of kombu from that data directory and encountered some "unexplainable" `ImportError`s.
As I've mentioned I'm not sure what's the original reason for such difference when installing from wheel and from .tar.gz package. Although I'm pretty sure the `data_files` argument from setup is not used correctly here.
Our current workaround for this issue is to put
```
--no-binary kombu
```
on top of our `requirements.txt` file.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 import os
4 import re
5 import sys
6 import codecs
7
8 import setuptools
9 import setuptools.command.test
10
11 from distutils.command.install import INSTALL_SCHEMES
12
13 if sys.version_info < (2, 7):
14 raise Exception('Kombu 4.0 requires Python 2.7 or higher.')
15
16 try:
17 from setuptools import setup
18 except ImportError:
19 from distutils.core import setup # noqa
20
21 # -- Parse meta
22 re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)')
23 re_doc = re.compile(r'^"""(.+?)"""')
24
25
26 def add_default(m):
27 attr_name, attr_value = m.groups()
28 return ((attr_name, attr_value.strip("\"'")),)
29
30
31 def add_doc(m):
32 return (('doc', m.groups()[0]),)
33
34 pats = {re_meta: add_default, re_doc: add_doc}
35 here = os.path.abspath(os.path.dirname(__file__))
36 meta_fh = open(os.path.join(here, 'kombu/__init__.py'))
37 try:
38 meta = {}
39 for line in meta_fh:
40 if line.strip() == '# -eof meta-':
41 break
42 for pattern, handler in pats.items():
43 m = pattern.match(line.strip())
44 if m:
45 meta.update(handler(m))
46 finally:
47 meta_fh.close()
48 # --
49
50 data_files = []
51 root_dir = os.path.dirname(__file__)
52 if root_dir != '':
53 os.chdir(root_dir)
54 src_dir = 'kombu'
55
56
57 def fullsplit(path, result=None):
58 if result is None:
59 result = []
60 head, tail = os.path.split(path)
61 if head == '':
62 return [tail] + result
63 if head == path:
64 return result
65 return fullsplit(head, [tail] + result)
66
67
68 for scheme in list(INSTALL_SCHEMES.values()):
69 scheme['data'] = scheme['purelib']
70
71 for dirpath, dirnames, filenames in os.walk(src_dir):
72 # Ignore dirnames that start with '.'
73 for i, dirname in enumerate(dirnames):
74 if dirname.startswith('.'):
75 del dirnames[i]
76 for filename in filenames:
77 if not filename.endswith('.py'):
78 data_files.append(
79 [dirpath, [os.path.join(dirpath, f) for f in filenames]],
80 )
81
82 if os.path.exists('README.rst'):
83 long_description = codecs.open('README.rst', 'r', 'utf-8').read()
84 else:
85 long_description = 'See https://pypi.python.org/pypi/kombu'
86
87 # -*- Installation Requires -*-
88 py_version = sys.version_info
89 is_jython = sys.platform.startswith('java')
90 is_pypy = hasattr(sys, 'pypy_version_info')
91
92
93 def strip_comments(l):
94 return l.split('#', 1)[0].strip()
95
96
97 def reqs(*f):
98 return [
99 r for r in (
100 strip_comments(l) for l in open(
101 os.path.join(os.getcwd(), 'requirements', *f)).readlines()
102 ) if r]
103
104
105 def extras(*p):
106 return reqs('extras', *p)
107
108
109 class pytest(setuptools.command.test.test):
110 user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
111
112 def initialize_options(self):
113 setuptools.command.test.test.initialize_options(self)
114 self.pytest_args = []
115
116 def run_tests(self):
117 import pytest
118 sys.exit(pytest.main(self.pytest_args))
119
120 setup(
121 name='kombu',
122 packages=setuptools.find_packages(exclude=['t', 't.*']),
123 version=meta['version'],
124 description=meta['doc'],
125 long_description=long_description,
126 keywords='messaging message amqp rabbitmq redis actor producer consumer',
127 author=meta['author'],
128 author_email=meta['contact'],
129 url=meta['homepage'],
130 platforms=['any'],
131 data_files=data_files,
132 zip_safe=False,
133 license='BSD',
134 cmdclass={'test': pytest},
135 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
136 install_requires=reqs('default.txt'),
137 tests_require=reqs('test.txt'),
138 extras_require={
139 'msgpack': extras('msgpack.txt'),
140 'yaml': extras('yaml.txt'),
141 'redis': extras('redis.txt'),
142 'mongodb': extras('mongodb.txt'),
143 'sqs': extras('sqs.txt'),
144 'zookeeper': extras('zookeeper.txt'),
145 'sqlalchemy': extras('sqlalchemy.txt'),
146 'librabbitmq': extras('librabbitmq.txt'),
147 'pyro': extras('pyro.txt'),
148 'slmq': extras('slmq.txt'),
149 'qpid': extras('qpid.txt'),
150 'consul': extras('consul.txt'),
151 },
152 classifiers=[
153 'Development Status :: 5 - Production/Stable',
154 'License :: OSI Approved :: BSD License',
155 'Operating System :: OS Independent',
156 'Programming Language :: Python',
157 'Programming Language :: Python :: 3',
158 'Programming Language :: Python :: 3.4',
159 'Programming Language :: Python :: 3.5',
160 'Programming Language :: Python :: 3.6',
161 'Programming Language :: Python :: 2.7',
162 'Programming Language :: Python :: 2',
163 'Programming Language :: Python :: Implementation :: CPython',
164 'Programming Language :: Python :: Implementation :: PyPy',
165 'Programming Language :: Python :: Implementation :: Jython',
166 'Intended Audience :: Developers',
167 'Topic :: Communications',
168 'Topic :: System :: Distributed Computing',
169 'Topic :: System :: Networking',
170 'Topic :: Software Development :: Libraries :: Python Modules',
171 ],
172 )
173
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -31,6 +31,7 @@
def add_doc(m):
return (('doc', m.groups()[0]),)
+
pats = {re_meta: add_default, re_doc: add_doc}
here = os.path.abspath(os.path.dirname(__file__))
meta_fh = open(os.path.join(here, 'kombu/__init__.py'))
@@ -47,12 +48,6 @@
meta_fh.close()
# --
-data_files = []
-root_dir = os.path.dirname(__file__)
-if root_dir != '':
- os.chdir(root_dir)
-src_dir = 'kombu'
-
def fullsplit(path, result=None):
if result is None:
@@ -68,17 +63,6 @@
for scheme in list(INSTALL_SCHEMES.values()):
scheme['data'] = scheme['purelib']
-for dirpath, dirnames, filenames in os.walk(src_dir):
- # Ignore dirnames that start with '.'
- for i, dirname in enumerate(dirnames):
- if dirname.startswith('.'):
- del dirnames[i]
- for filename in filenames:
- if not filename.endswith('.py'):
- data_files.append(
- [dirpath, [os.path.join(dirpath, f) for f in filenames]],
- )
-
if os.path.exists('README.rst'):
long_description = codecs.open('README.rst', 'r', 'utf-8').read()
else:
@@ -117,6 +101,7 @@
import pytest
sys.exit(pytest.main(self.pytest_args))
+
setup(
name='kombu',
packages=setuptools.find_packages(exclude=['t', 't.*']),
@@ -128,7 +113,6 @@
author_email=meta['contact'],
url=meta['homepage'],
platforms=['any'],
- data_files=data_files,
zip_safe=False,
license='BSD',
cmdclass={'test': pytest},
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -31,6 +31,7 @@\n def add_doc(m):\n return (('doc', m.groups()[0]),)\n \n+\n pats = {re_meta: add_default, re_doc: add_doc}\n here = os.path.abspath(os.path.dirname(__file__))\n meta_fh = open(os.path.join(here, 'kombu/__init__.py'))\n@@ -47,12 +48,6 @@\n meta_fh.close()\n # --\n \n-data_files = []\n-root_dir = os.path.dirname(__file__)\n-if root_dir != '':\n- os.chdir(root_dir)\n-src_dir = 'kombu'\n-\n \n def fullsplit(path, result=None):\n if result is None:\n@@ -68,17 +63,6 @@\n for scheme in list(INSTALL_SCHEMES.values()):\n scheme['data'] = scheme['purelib']\n \n-for dirpath, dirnames, filenames in os.walk(src_dir):\n- # Ignore dirnames that start with '.'\n- for i, dirname in enumerate(dirnames):\n- if dirname.startswith('.'):\n- del dirnames[i]\n- for filename in filenames:\n- if not filename.endswith('.py'):\n- data_files.append(\n- [dirpath, [os.path.join(dirpath, f) for f in filenames]],\n- )\n-\n if os.path.exists('README.rst'):\n long_description = codecs.open('README.rst', 'r', 'utf-8').read()\n else:\n@@ -117,6 +101,7 @@\n import pytest\n sys.exit(pytest.main(self.pytest_args))\n \n+\n setup(\n name='kombu',\n packages=setuptools.find_packages(exclude=['t', 't.*']),\n@@ -128,7 +113,6 @@\n author_email=meta['contact'],\n url=meta['homepage'],\n platforms=['any'],\n- data_files=data_files,\n zip_safe=False,\n license='BSD',\n cmdclass={'test': pytest},\n", "issue": "Gathering data_files in setup.py is broken (affects current wheel on PyPI)\nThe `setup.py` file in this package uses the `data_files` argument to install \"data files\" on setup. Looking at the code that's responsible for gathering those files in\r\nhttps://github.com/celery/kombu/blob/v4.2.0/setup.py#L71-L80\r\none can assume that the intention was to add every file which name is not ending with `.py` under the `kombu` directory. But the code there actually generates a list of _every_ file in the given directory when it encounters at least one file that's not ending with `.py`. It can get tricky if the environment is not clean - for example when there are some `.pyc` files.\r\n\r\nNow, when creating packages from the source files we have a discrepancy when it comes to wheel format and tar.gz one (the actual cause for it is not yet clear to me). \r\n\r\nI have tried to install both wheel and source packages on two separate virtualenvs from the following \"unclean\" state of this repo:\r\n```sh\r\n$ git status --short\r\n$ git clean -nx\r\nWould remove kombu/utils/functional.pyc\r\n``` \r\n\r\nI have done it like that:\r\n```sh\r\n$ python setup.py sdist\r\n$ python setup.py bdist_wheel\r\n$ /tmp/kombutests-wheel/bin/pip install dist/kombu-4.2.0-py2.py3-none-any.whl\r\n$ /tmp/kombutests-targz/bin/pip install dist/kombu-4.2.0.tar.gz\r\n$ tree /tmp/kombutests-wheel/kombu/\r\n/tmp/kombutests-wheel/kombu/\r\n\u2514\u2500\u2500 utils\r\n \u251c\u2500\u2500 __init__.py\r\n \u251c\u2500\u2500 __init__.pyc\r\n \u251c\u2500\u2500 amq_manager.py\r\n \u251c\u2500\u2500 amq_manager.pyc\r\n ...\r\n \u251c\u2500\u2500 uuid.py\r\n \u2514\u2500\u2500 uuid.pyc\r\n$ tree /tmp/kombutests-targz/kombu\r\n/tmp/kombutests-targz/kombu [error opening dir]\r\n\r\n0 directories, 0 files\r\n```\r\nwhere `/tmp/kombutests-targz` and `/tmp/kombutests-wheel` are the virtualenvs I've mentioned.\r\nThis shows that there's an additional `kombu/utils` directory \"installed\" in the virtualenv's base directory only because we had one additional `.pyc` file in the repo (`kombu/utils/functional.pyc`).\r\n\r\nNow, on top of that, it seems that the wheel that's published on PyPi is broken because (I suspect) it was created from a non-clean repo clone.\r\n\r\nWhen downloading the wheel from https://files.pythonhosted.org/packages/2f/0d/416d396ee75b7e47e620005ef81a5b8a36c53c85bb148bd82459e8e61b6b/kombu-4.2.0-py2.py3-none-any.whl (found on https://pypi.org/project/kombu/#files) and unpacking it with `wheel unpack` we can find `kombu-4.2.0/kombu-4.2.0.data/data/kombu` directory. This directory gets copied into the virtualenv base directory. The contents of that directory look similar to what actually gets installed inside `lib/python2.7/site-packages/kombu`, but there are some substantial differences. \r\nFor example there's still an old `async` directory there (it was renamed in https://github.com/celery/kombu/commit/75695205f6e7af8e7e9178e010debc3871b19106) and `asynchronous` dir has only `.pyc` files inside `__pycache__` directories. \r\nI've pasted the contents of that directory in http://dpaste.com/1C2C7D5\r\n\r\nThis has bitten us quite hard since we run our processes from the virtualenv base directory so we've used that incomplete version of kombu from that data directory and encountered some \"unexplainable\" `ImportError`s. \r\n\r\nAs I've mentioned I'm not sure what's the original reason for such difference when installing from wheel and from .tar.gz package. Although I'm pretty sure the `data_files` argument from setup is not used correctly here. \r\n\r\nOur current workaround for this issue is to put\r\n```\r\n--no-binary kombu\r\n```\r\non top of our `requirements.txt` file.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport re\nimport sys\nimport codecs\n\nimport setuptools\nimport setuptools.command.test\n\nfrom distutils.command.install import INSTALL_SCHEMES\n\nif sys.version_info < (2, 7):\n raise Exception('Kombu 4.0 requires Python 2.7 or higher.')\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup # noqa\n\n# -- Parse meta\nre_meta = re.compile(r'__(\\w+?)__\\s*=\\s*(.*)')\nre_doc = re.compile(r'^\"\"\"(.+?)\"\"\"')\n\n\ndef add_default(m):\n attr_name, attr_value = m.groups()\n return ((attr_name, attr_value.strip(\"\\\"'\")),)\n\n\ndef add_doc(m):\n return (('doc', m.groups()[0]),)\n\npats = {re_meta: add_default, re_doc: add_doc}\nhere = os.path.abspath(os.path.dirname(__file__))\nmeta_fh = open(os.path.join(here, 'kombu/__init__.py'))\ntry:\n meta = {}\n for line in meta_fh:\n if line.strip() == '# -eof meta-':\n break\n for pattern, handler in pats.items():\n m = pattern.match(line.strip())\n if m:\n meta.update(handler(m))\nfinally:\n meta_fh.close()\n# --\n\ndata_files = []\nroot_dir = os.path.dirname(__file__)\nif root_dir != '':\n os.chdir(root_dir)\nsrc_dir = 'kombu'\n\n\ndef fullsplit(path, result=None):\n if result is None:\n result = []\n head, tail = os.path.split(path)\n if head == '':\n return [tail] + result\n if head == path:\n return result\n return fullsplit(head, [tail] + result)\n\n\nfor scheme in list(INSTALL_SCHEMES.values()):\n scheme['data'] = scheme['purelib']\n\nfor dirpath, dirnames, filenames in os.walk(src_dir):\n # Ignore dirnames that start with '.'\n for i, dirname in enumerate(dirnames):\n if dirname.startswith('.'):\n del dirnames[i]\n for filename in filenames:\n if not filename.endswith('.py'):\n data_files.append(\n [dirpath, [os.path.join(dirpath, f) for f in filenames]],\n )\n\nif os.path.exists('README.rst'):\n long_description = codecs.open('README.rst', 'r', 'utf-8').read()\nelse:\n long_description = 'See https://pypi.python.org/pypi/kombu'\n\n# -*- Installation Requires -*-\npy_version = sys.version_info\nis_jython = sys.platform.startswith('java')\nis_pypy = hasattr(sys, 'pypy_version_info')\n\n\ndef strip_comments(l):\n return l.split('#', 1)[0].strip()\n\n\ndef reqs(*f):\n return [\n r for r in (\n strip_comments(l) for l in open(\n os.path.join(os.getcwd(), 'requirements', *f)).readlines()\n ) if r]\n\n\ndef extras(*p):\n return reqs('extras', *p)\n\n\nclass pytest(setuptools.command.test.test):\n user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]\n\n def initialize_options(self):\n setuptools.command.test.test.initialize_options(self)\n self.pytest_args = []\n\n def run_tests(self):\n import pytest\n sys.exit(pytest.main(self.pytest_args))\n\nsetup(\n name='kombu',\n packages=setuptools.find_packages(exclude=['t', 't.*']),\n version=meta['version'],\n description=meta['doc'],\n long_description=long_description,\n keywords='messaging message amqp rabbitmq redis actor producer consumer',\n author=meta['author'],\n author_email=meta['contact'],\n url=meta['homepage'],\n platforms=['any'],\n data_files=data_files,\n zip_safe=False,\n license='BSD',\n cmdclass={'test': pytest},\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*\",\n install_requires=reqs('default.txt'),\n tests_require=reqs('test.txt'),\n extras_require={\n 'msgpack': extras('msgpack.txt'),\n 'yaml': extras('yaml.txt'),\n 'redis': extras('redis.txt'),\n 'mongodb': extras('mongodb.txt'),\n 'sqs': extras('sqs.txt'),\n 'zookeeper': extras('zookeeper.txt'),\n 'sqlalchemy': extras('sqlalchemy.txt'),\n 'librabbitmq': extras('librabbitmq.txt'),\n 'pyro': extras('pyro.txt'),\n 'slmq': extras('slmq.txt'),\n 'qpid': extras('qpid.txt'),\n 'consul': extras('consul.txt'),\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: Implementation :: Jython',\n 'Intended Audience :: Developers',\n 'Topic :: Communications',\n 'Topic :: System :: Distributed Computing',\n 'Topic :: System :: Networking',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n)\n", "path": "setup.py"}]}
| 3,180 | 444 |
gh_patches_debug_2509
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-3901
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Handle invalid data while change data types during migrations
**Related error:**
```
sqlalchemy.exc.IntegrityError: (psycopg2.IntegrityError) column "pdf_url" contains null values
[SQL: 'ALTER TABLE ticket_holders ADD COLUMN pdf_url VARCHAR NOT NULL']
```
Due to this, migration are not running on `api.eventyay.com`.
Ensure existing data (valid or not) is handled properly and also ensure columns have proper null setting. (for example here, make pdf_url nullable.)
Similarly check and ensure other columns as well
**Full log attached for reference:**
```
2017-06-28T05:49:05.921467228Z run_migrations_online()
2017-06-28T05:49:05.921470119Z File "migrations/env.py", line 151, in run_migrations_online
2017-06-28T05:49:05.921473075Z context.run_migrations()
2017-06-28T05:49:05.921475912Z File "<string>", line 8, in run_migrations
2017-06-28T05:49:05.921479088Z File "/usr/local/lib/python2.7/site-packages/alembic/runtime/environment.py", line 817, in run_migrations
2017-06-28T05:49:05.922610188Z self.get_context().run_migrations(**kw)
2017-06-28T05:49:05.922642643Z File "/usr/local/lib/python2.7/site-packages/alembic/runtime/migration.py", line 329, in run_migrations
2017-06-28T05:49:05.923243091Z step.migration_fn(**kw)
2017-06-28T05:49:05.923274018Z File "/opev/open_event/migrations/versions/c6b183975be9_.py", line 20, in upgrade
2017-06-28T05:49:05.923338965Z op.add_column('ticket_holders', sa.Column('pdf_url', sa.String(), nullable=False))
2017-06-28T05:49:05.923364414Z File "<string>", line 8, in add_column
2017-06-28T05:49:05.923427981Z File "<string>", line 3, in add_column
2017-06-28T05:49:05.923478561Z File "/usr/local/lib/python2.7/site-packages/alembic/operations/ops.py", line 1551, in add_column
2017-06-28T05:49:05.926653441Z return operations.invoke(op)
2017-06-28T05:49:05.926712969Z File "/usr/local/lib/python2.7/site-packages/alembic/operations/base.py", line 318, in invoke
2017-06-28T05:49:05.927353504Z return fn(self, operation)
2017-06-28T05:49:05.927386093Z File "/usr/local/lib/python2.7/site-packages/alembic/operations/toimpl.py", line 123, in add_column
2017-06-28T05:49:05.927969743Z schema=schema
2017-06-28T05:49:05.927998827Z File "/usr/local/lib/python2.7/site-packages/alembic/ddl/impl.py", line 172, in add_column
2017-06-28T05:49:05.92861825Z self._exec(base.AddColumn(table_name, column, schema=schema))
2017-06-28T05:49:05.92864919Z File "/usr/local/lib/python2.7/site-packages/alembic/ddl/impl.py", line 118, in _exec
2017-06-28T05:49:05.928742734Z return conn.execute(construct, *multiparams, **params)
2017-06-28T05:49:05.928786028Z File "/usr/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 945, in execute
2017-06-28T05:49:05.929323557Z return meth(self, multiparams, params)
2017-06-28T05:49:05.929333376Z File "/usr/local/lib/python2.7/site-packages/sqlalchemy/sql/ddl.py", line 68, in _execute_on_connection
2017-06-28T05:49:05.929336744Z return connection._execute_ddl(self, multiparams, params)
2017-06-28T05:49:05.929339861Z File "/usr/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1002, in _execute_ddl
2017-06-28T05:49:05.929512493Z compiled
2017-06-28T05:49:05.929538983Z File "/usr/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1189, in _execute_context
2017-06-28T05:49:05.929768208Z context)
2017-06-28T05:49:05.929810133Z File "/usr/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
2017-06-28T05:49:05.930067186Z exc_info
2017-06-28T05:49:05.930093798Z File "/usr/local/lib/python2.7/site-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
2017-06-28T05:49:05.930194023Z reraise(type(exception), exception, tb=exc_tb, cause=cause)
2017-06-28T05:49:05.930237111Z File "/usr/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
2017-06-28T05:49:05.931176835Z context)
2017-06-28T05:49:05.931187737Z File "/usr/local/lib/python2.7/site-packages/sqlalchemy/engine/default.py", line 470, in do_execute
2017-06-28T05:49:05.931191369Z cursor.execute(statement, parameters)
2017-06-28T05:49:05.931334485Z sqlalchemy.exc.IntegrityError: (psycopg2.IntegrityError) column "pdf_url" contains null values
2017-06-28T05:49:05.931342263Z [SQL: 'ALTER TABLE ticket_holders ADD COLUMN pdf_url VARCHAR NOT NULL']
```
</issue>
<code>
[start of migrations/versions/c6b183975be9_.py]
1 """empty message
2
3 Revision ID: c6b183975be9
4 Revises: ccd80550c01f
5 Create Date: 2017-06-12 00:42:29.329727
6
7 """
8
9 # revision identifiers, used by Alembic.
10 revision = 'c6b183975be9'
11 down_revision = 'ccd80550c01f'
12
13 from alembic import op
14 import sqlalchemy as sa
15 import sqlalchemy_utils
16
17
18 def upgrade():
19 # ### commands auto generated by Alembic - please adjust! ###
20 op.add_column('ticket_holders', sa.Column('pdf_url', sa.String(), nullable=False))
21 # ### end Alembic commands ###
22
23
24 def downgrade():
25 # ### commands auto generated by Alembic - please adjust! ###
26 op.drop_column('ticket_holders', 'pdf_url')
27 # ### end Alembic commands ###
28
[end of migrations/versions/c6b183975be9_.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/migrations/versions/c6b183975be9_.py b/migrations/versions/c6b183975be9_.py
--- a/migrations/versions/c6b183975be9_.py
+++ b/migrations/versions/c6b183975be9_.py
@@ -17,7 +17,7 @@
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
- op.add_column('ticket_holders', sa.Column('pdf_url', sa.String(), nullable=False))
+ op.add_column('ticket_holders', sa.Column('pdf_url', sa.String(), nullable=True))
# ### end Alembic commands ###
|
{"golden_diff": "diff --git a/migrations/versions/c6b183975be9_.py b/migrations/versions/c6b183975be9_.py\n--- a/migrations/versions/c6b183975be9_.py\n+++ b/migrations/versions/c6b183975be9_.py\n@@ -17,7 +17,7 @@\n \n def upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n- op.add_column('ticket_holders', sa.Column('pdf_url', sa.String(), nullable=False))\n+ op.add_column('ticket_holders', sa.Column('pdf_url', sa.String(), nullable=True))\n # ### end Alembic commands ###\n", "issue": "Handle invalid data while change data types during migrations\n**Related error:**\r\n\r\n```\r\nsqlalchemy.exc.IntegrityError: (psycopg2.IntegrityError) column \"pdf_url\" contains null values\r\n[SQL: 'ALTER TABLE ticket_holders ADD COLUMN pdf_url VARCHAR NOT NULL']\r\n```\r\n\r\nDue to this, migration are not running on `api.eventyay.com`.\r\n\r\nEnsure existing data (valid or not) is handled properly and also ensure columns have proper null setting. (for example here, make pdf_url nullable.)\r\nSimilarly check and ensure other columns as well\r\n\r\n\r\n**Full log attached for reference:**\r\n\r\n```\r\n2017-06-28T05:49:05.921467228Z run_migrations_online()\r\n2017-06-28T05:49:05.921470119Z File \"migrations/env.py\", line 151, in run_migrations_online\r\n2017-06-28T05:49:05.921473075Z context.run_migrations()\r\n2017-06-28T05:49:05.921475912Z File \"<string>\", line 8, in run_migrations\r\n2017-06-28T05:49:05.921479088Z File \"/usr/local/lib/python2.7/site-packages/alembic/runtime/environment.py\", line 817, in run_migrations\r\n2017-06-28T05:49:05.922610188Z self.get_context().run_migrations(**kw)\r\n2017-06-28T05:49:05.922642643Z File \"/usr/local/lib/python2.7/site-packages/alembic/runtime/migration.py\", line 329, in run_migrations\r\n2017-06-28T05:49:05.923243091Z step.migration_fn(**kw)\r\n2017-06-28T05:49:05.923274018Z File \"/opev/open_event/migrations/versions/c6b183975be9_.py\", line 20, in upgrade\r\n2017-06-28T05:49:05.923338965Z op.add_column('ticket_holders', sa.Column('pdf_url', sa.String(), nullable=False))\r\n2017-06-28T05:49:05.923364414Z File \"<string>\", line 8, in add_column\r\n2017-06-28T05:49:05.923427981Z File \"<string>\", line 3, in add_column\r\n2017-06-28T05:49:05.923478561Z File \"/usr/local/lib/python2.7/site-packages/alembic/operations/ops.py\", line 1551, in add_column\r\n2017-06-28T05:49:05.926653441Z return operations.invoke(op)\r\n2017-06-28T05:49:05.926712969Z File \"/usr/local/lib/python2.7/site-packages/alembic/operations/base.py\", line 318, in invoke\r\n2017-06-28T05:49:05.927353504Z return fn(self, operation)\r\n2017-06-28T05:49:05.927386093Z File \"/usr/local/lib/python2.7/site-packages/alembic/operations/toimpl.py\", line 123, in add_column\r\n2017-06-28T05:49:05.927969743Z schema=schema\r\n2017-06-28T05:49:05.927998827Z File \"/usr/local/lib/python2.7/site-packages/alembic/ddl/impl.py\", line 172, in add_column\r\n2017-06-28T05:49:05.92861825Z self._exec(base.AddColumn(table_name, column, schema=schema))\r\n2017-06-28T05:49:05.92864919Z File \"/usr/local/lib/python2.7/site-packages/alembic/ddl/impl.py\", line 118, in _exec\r\n2017-06-28T05:49:05.928742734Z return conn.execute(construct, *multiparams, **params)\r\n2017-06-28T05:49:05.928786028Z File \"/usr/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 945, in execute\r\n2017-06-28T05:49:05.929323557Z return meth(self, multiparams, params)\r\n2017-06-28T05:49:05.929333376Z File \"/usr/local/lib/python2.7/site-packages/sqlalchemy/sql/ddl.py\", line 68, in _execute_on_connection\r\n2017-06-28T05:49:05.929336744Z return connection._execute_ddl(self, multiparams, params)\r\n2017-06-28T05:49:05.929339861Z File \"/usr/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 1002, in _execute_ddl\r\n2017-06-28T05:49:05.929512493Z compiled\r\n2017-06-28T05:49:05.929538983Z File \"/usr/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 1189, in _execute_context\r\n2017-06-28T05:49:05.929768208Z context)\r\n2017-06-28T05:49:05.929810133Z File \"/usr/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 1402, in _handle_dbapi_exception\r\n2017-06-28T05:49:05.930067186Z exc_info\r\n2017-06-28T05:49:05.930093798Z File \"/usr/local/lib/python2.7/site-packages/sqlalchemy/util/compat.py\", line 203, in raise_from_cause\r\n2017-06-28T05:49:05.930194023Z reraise(type(exception), exception, tb=exc_tb, cause=cause)\r\n2017-06-28T05:49:05.930237111Z File \"/usr/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 1182, in _execute_context\r\n2017-06-28T05:49:05.931176835Z context)\r\n2017-06-28T05:49:05.931187737Z File \"/usr/local/lib/python2.7/site-packages/sqlalchemy/engine/default.py\", line 470, in do_execute\r\n2017-06-28T05:49:05.931191369Z cursor.execute(statement, parameters)\r\n2017-06-28T05:49:05.931334485Z sqlalchemy.exc.IntegrityError: (psycopg2.IntegrityError) column \"pdf_url\" contains null values\r\n2017-06-28T05:49:05.931342263Z [SQL: 'ALTER TABLE ticket_holders ADD COLUMN pdf_url VARCHAR NOT NULL']\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\"empty message\n\nRevision ID: c6b183975be9\nRevises: ccd80550c01f\nCreate Date: 2017-06-12 00:42:29.329727\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'c6b183975be9'\ndown_revision = 'ccd80550c01f'\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('ticket_holders', sa.Column('pdf_url', sa.String(), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('ticket_holders', 'pdf_url')\n # ### end Alembic commands ###\n", "path": "migrations/versions/c6b183975be9_.py"}]}
| 2,911 | 164 |
gh_patches_debug_34640
|
rasdani/github-patches
|
git_diff
|
freqtrade__freqtrade-2451
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Strategy analysis -> how many max_open_trades i will use for this strategy ?
As an algo trade
I want to perform statistics like maximum drawdown, profit and find an optimum between stake_amount and max_open_trade so i run and export the backtest with --disable-max-market-positions flag and make simulations in jupyter.
So that i can find a comprise between risk and profit.
To archive that i need to find trades that happens at the same time and flag them (open_trade_number) and then filter then.
I don't know if it can be useful to have this straight in a column of the dataframe of load_backtest_data() ?
Maybe it is a too specific need.
Have a nice day.
</issue>
<code>
[start of freqtrade/data/btanalysis.py]
1 """
2 Helpers when analyzing backtest data
3 """
4 import logging
5 from pathlib import Path
6 from typing import Dict
7
8 import numpy as np
9 import pandas as pd
10 import pytz
11
12 from freqtrade import persistence
13 from freqtrade.misc import json_load
14 from freqtrade.persistence import Trade
15
16 logger = logging.getLogger(__name__)
17
18 # must align with columns in backtest.py
19 BT_DATA_COLUMNS = ["pair", "profitperc", "open_time", "close_time", "index", "duration",
20 "open_rate", "close_rate", "open_at_end", "sell_reason"]
21
22
23 def load_backtest_data(filename) -> pd.DataFrame:
24 """
25 Load backtest data file.
26 :param filename: pathlib.Path object, or string pointing to the file.
27 :return: a dataframe with the analysis results
28 """
29 if isinstance(filename, str):
30 filename = Path(filename)
31
32 if not filename.is_file():
33 raise ValueError(f"File {filename} does not exist.")
34
35 with filename.open() as file:
36 data = json_load(file)
37
38 df = pd.DataFrame(data, columns=BT_DATA_COLUMNS)
39
40 df['open_time'] = pd.to_datetime(df['open_time'],
41 unit='s',
42 utc=True,
43 infer_datetime_format=True
44 )
45 df['close_time'] = pd.to_datetime(df['close_time'],
46 unit='s',
47 utc=True,
48 infer_datetime_format=True
49 )
50 df['profitabs'] = df['close_rate'] - df['open_rate']
51 df = df.sort_values("open_time").reset_index(drop=True)
52 return df
53
54
55 def evaluate_result_multi(results: pd.DataFrame, freq: str, max_open_trades: int) -> pd.DataFrame:
56 """
57 Find overlapping trades by expanding each trade once per period it was open
58 and then counting overlaps
59 :param results: Results Dataframe - can be loaded
60 :param freq: Frequency used for the backtest
61 :param max_open_trades: parameter max_open_trades used during backtest run
62 :return: dataframe with open-counts per time-period in freq
63 """
64 dates = [pd.Series(pd.date_range(row[1].open_time, row[1].close_time, freq=freq))
65 for row in results[['open_time', 'close_time']].iterrows()]
66 deltas = [len(x) for x in dates]
67 dates = pd.Series(pd.concat(dates).values, name='date')
68 df2 = pd.DataFrame(np.repeat(results.values, deltas, axis=0), columns=results.columns)
69
70 df2 = pd.concat([dates, df2], axis=1)
71 df2 = df2.set_index('date')
72 df_final = df2.resample(freq)[['pair']].count()
73 return df_final[df_final['pair'] > max_open_trades]
74
75
76 def load_trades_from_db(db_url: str) -> pd.DataFrame:
77 """
78 Load trades from a DB (using dburl)
79 :param db_url: Sqlite url (default format sqlite:///tradesv3.dry-run.sqlite)
80 :return: Dataframe containing Trades
81 """
82 trades: pd.DataFrame = pd.DataFrame([], columns=BT_DATA_COLUMNS)
83 persistence.init(db_url, clean_open_orders=False)
84
85 columns = ["pair", "open_time", "close_time", "profit", "profitperc",
86 "open_rate", "close_rate", "amount", "duration", "sell_reason",
87 "fee_open", "fee_close", "open_rate_requested", "close_rate_requested",
88 "stake_amount", "max_rate", "min_rate", "id", "exchange",
89 "stop_loss", "initial_stop_loss", "strategy", "ticker_interval"]
90
91 trades = pd.DataFrame([(t.pair,
92 t.open_date.replace(tzinfo=pytz.UTC),
93 t.close_date.replace(tzinfo=pytz.UTC) if t.close_date else None,
94 t.calc_profit(), t.calc_profit_percent(),
95 t.open_rate, t.close_rate, t.amount,
96 (round((t.close_date.timestamp() - t.open_date.timestamp()) / 60, 2)
97 if t.close_date else None),
98 t.sell_reason,
99 t.fee_open, t.fee_close,
100 t.open_rate_requested,
101 t.close_rate_requested,
102 t.stake_amount,
103 t.max_rate,
104 t.min_rate,
105 t.id, t.exchange,
106 t.stop_loss, t.initial_stop_loss,
107 t.strategy, t.ticker_interval
108 )
109 for t in Trade.query.all()],
110 columns=columns)
111
112 return trades
113
114
115 def load_trades(source: str, db_url: str, exportfilename: str) -> pd.DataFrame:
116 """
117 Based on configuration option "trade_source":
118 * loads data from DB (using `db_url`)
119 * loads data from backtestfile (using `exportfilename`)
120 """
121 if source == "DB":
122 return load_trades_from_db(db_url)
123 elif source == "file":
124 return load_backtest_data(Path(exportfilename))
125
126
127 def extract_trades_of_period(dataframe: pd.DataFrame, trades: pd.DataFrame) -> pd.DataFrame:
128 """
129 Compare trades and backtested pair DataFrames to get trades performed on backtested period
130 :return: the DataFrame of a trades of period
131 """
132 trades = trades.loc[(trades['open_time'] >= dataframe.iloc[0]['date']) &
133 (trades['close_time'] <= dataframe.iloc[-1]['date'])]
134 return trades
135
136
137 def combine_tickers_with_mean(tickers: Dict[str, pd.DataFrame], column: str = "close"):
138 """
139 Combine multiple dataframes "column"
140 :param tickers: Dict of Dataframes, dict key should be pair.
141 :param column: Column in the original dataframes to use
142 :return: DataFrame with the column renamed to the dict key, and a column
143 named mean, containing the mean of all pairs.
144 """
145 df_comb = pd.concat([tickers[pair].set_index('date').rename(
146 {column: pair}, axis=1)[pair] for pair in tickers], axis=1)
147
148 df_comb['mean'] = df_comb.mean(axis=1)
149
150 return df_comb
151
152
153 def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str,
154 timeframe: str) -> pd.DataFrame:
155 """
156 Adds a column `col_name` with the cumulative profit for the given trades array.
157 :param df: DataFrame with date index
158 :param trades: DataFrame containing trades (requires columns close_time and profitperc)
159 :param col_name: Column name that will be assigned the results
160 :param timeframe: Timeframe used during the operations
161 :return: Returns df with one additional column, col_name, containing the cumulative profit.
162 """
163 from freqtrade.exchange import timeframe_to_minutes
164 ticker_minutes = timeframe_to_minutes(timeframe)
165 # Resample to ticker_interval to make sure trades match candles
166 _trades_sum = trades.resample(f'{ticker_minutes}min', on='close_time')[['profitperc']].sum()
167 df.loc[:, col_name] = _trades_sum.cumsum()
168 # Set first value to 0
169 df.loc[df.iloc[0].name, col_name] = 0
170 # FFill to get continuous
171 df[col_name] = df[col_name].ffill()
172 return df
173
[end of freqtrade/data/btanalysis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/freqtrade/data/btanalysis.py b/freqtrade/data/btanalysis.py
--- a/freqtrade/data/btanalysis.py
+++ b/freqtrade/data/btanalysis.py
@@ -52,16 +52,18 @@
return df
-def evaluate_result_multi(results: pd.DataFrame, freq: str, max_open_trades: int) -> pd.DataFrame:
+def analyze_trade_parallelism(results: pd.DataFrame, timeframe: str) -> pd.DataFrame:
"""
Find overlapping trades by expanding each trade once per period it was open
- and then counting overlaps
+ and then counting overlaps.
:param results: Results Dataframe - can be loaded
- :param freq: Frequency used for the backtest
- :param max_open_trades: parameter max_open_trades used during backtest run
- :return: dataframe with open-counts per time-period in freq
+ :param timeframe: Timeframe used for backtest
+ :return: dataframe with open-counts per time-period in timeframe
"""
- dates = [pd.Series(pd.date_range(row[1].open_time, row[1].close_time, freq=freq))
+ from freqtrade.exchange import timeframe_to_minutes
+ timeframe_min = timeframe_to_minutes(timeframe)
+ dates = [pd.Series(pd.date_range(row[1].open_time, row[1].close_time,
+ freq=f"{timeframe_min}min"))
for row in results[['open_time', 'close_time']].iterrows()]
deltas = [len(x) for x in dates]
dates = pd.Series(pd.concat(dates).values, name='date')
@@ -69,8 +71,23 @@
df2 = pd.concat([dates, df2], axis=1)
df2 = df2.set_index('date')
- df_final = df2.resample(freq)[['pair']].count()
- return df_final[df_final['pair'] > max_open_trades]
+ df_final = df2.resample(f"{timeframe_min}min")[['pair']].count()
+ df_final = df_final.rename({'pair': 'open_trades'}, axis=1)
+ return df_final
+
+
+def evaluate_result_multi(results: pd.DataFrame, timeframe: str,
+ max_open_trades: int) -> pd.DataFrame:
+ """
+ Find overlapping trades by expanding each trade once per period it was open
+ and then counting overlaps
+ :param results: Results Dataframe - can be loaded
+ :param timeframe: Frequency used for the backtest
+ :param max_open_trades: parameter max_open_trades used during backtest run
+ :return: dataframe with open-counts per time-period in freq
+ """
+ df_final = analyze_trade_parallelism(results, timeframe)
+ return df_final[df_final['open_trades'] > max_open_trades]
def load_trades_from_db(db_url: str) -> pd.DataFrame:
|
{"golden_diff": "diff --git a/freqtrade/data/btanalysis.py b/freqtrade/data/btanalysis.py\n--- a/freqtrade/data/btanalysis.py\n+++ b/freqtrade/data/btanalysis.py\n@@ -52,16 +52,18 @@\n return df\n \n \n-def evaluate_result_multi(results: pd.DataFrame, freq: str, max_open_trades: int) -> pd.DataFrame:\n+def analyze_trade_parallelism(results: pd.DataFrame, timeframe: str) -> pd.DataFrame:\n \"\"\"\n Find overlapping trades by expanding each trade once per period it was open\n- and then counting overlaps\n+ and then counting overlaps.\n :param results: Results Dataframe - can be loaded\n- :param freq: Frequency used for the backtest\n- :param max_open_trades: parameter max_open_trades used during backtest run\n- :return: dataframe with open-counts per time-period in freq\n+ :param timeframe: Timeframe used for backtest\n+ :return: dataframe with open-counts per time-period in timeframe\n \"\"\"\n- dates = [pd.Series(pd.date_range(row[1].open_time, row[1].close_time, freq=freq))\n+ from freqtrade.exchange import timeframe_to_minutes\n+ timeframe_min = timeframe_to_minutes(timeframe)\n+ dates = [pd.Series(pd.date_range(row[1].open_time, row[1].close_time,\n+ freq=f\"{timeframe_min}min\"))\n for row in results[['open_time', 'close_time']].iterrows()]\n deltas = [len(x) for x in dates]\n dates = pd.Series(pd.concat(dates).values, name='date')\n@@ -69,8 +71,23 @@\n \n df2 = pd.concat([dates, df2], axis=1)\n df2 = df2.set_index('date')\n- df_final = df2.resample(freq)[['pair']].count()\n- return df_final[df_final['pair'] > max_open_trades]\n+ df_final = df2.resample(f\"{timeframe_min}min\")[['pair']].count()\n+ df_final = df_final.rename({'pair': 'open_trades'}, axis=1)\n+ return df_final\n+\n+\n+def evaluate_result_multi(results: pd.DataFrame, timeframe: str,\n+ max_open_trades: int) -> pd.DataFrame:\n+ \"\"\"\n+ Find overlapping trades by expanding each trade once per period it was open\n+ and then counting overlaps\n+ :param results: Results Dataframe - can be loaded\n+ :param timeframe: Frequency used for the backtest\n+ :param max_open_trades: parameter max_open_trades used during backtest run\n+ :return: dataframe with open-counts per time-period in freq\n+ \"\"\"\n+ df_final = analyze_trade_parallelism(results, timeframe)\n+ return df_final[df_final['open_trades'] > max_open_trades]\n \n \n def load_trades_from_db(db_url: str) -> pd.DataFrame:\n", "issue": "Strategy analysis -> how many max_open_trades i will use for this strategy ?\nAs an algo trade\r\nI want to perform statistics like maximum drawdown, profit and find an optimum between stake_amount and max_open_trade so i run and export the backtest with --disable-max-market-positions flag and make simulations in jupyter.\r\nSo that i can find a comprise between risk and profit.\r\n\r\nTo archive that i need to find trades that happens at the same time and flag them (open_trade_number) and then filter then.\r\n\r\nI don't know if it can be useful to have this straight in a column of the dataframe of load_backtest_data() ?\r\n\r\nMaybe it is a too specific need.\r\n\r\nHave a nice day.\n", "before_files": [{"content": "\"\"\"\nHelpers when analyzing backtest data\n\"\"\"\nimport logging\nfrom pathlib import Path\nfrom typing import Dict\n\nimport numpy as np\nimport pandas as pd\nimport pytz\n\nfrom freqtrade import persistence\nfrom freqtrade.misc import json_load\nfrom freqtrade.persistence import Trade\n\nlogger = logging.getLogger(__name__)\n\n# must align with columns in backtest.py\nBT_DATA_COLUMNS = [\"pair\", \"profitperc\", \"open_time\", \"close_time\", \"index\", \"duration\",\n \"open_rate\", \"close_rate\", \"open_at_end\", \"sell_reason\"]\n\n\ndef load_backtest_data(filename) -> pd.DataFrame:\n \"\"\"\n Load backtest data file.\n :param filename: pathlib.Path object, or string pointing to the file.\n :return: a dataframe with the analysis results\n \"\"\"\n if isinstance(filename, str):\n filename = Path(filename)\n\n if not filename.is_file():\n raise ValueError(f\"File {filename} does not exist.\")\n\n with filename.open() as file:\n data = json_load(file)\n\n df = pd.DataFrame(data, columns=BT_DATA_COLUMNS)\n\n df['open_time'] = pd.to_datetime(df['open_time'],\n unit='s',\n utc=True,\n infer_datetime_format=True\n )\n df['close_time'] = pd.to_datetime(df['close_time'],\n unit='s',\n utc=True,\n infer_datetime_format=True\n )\n df['profitabs'] = df['close_rate'] - df['open_rate']\n df = df.sort_values(\"open_time\").reset_index(drop=True)\n return df\n\n\ndef evaluate_result_multi(results: pd.DataFrame, freq: str, max_open_trades: int) -> pd.DataFrame:\n \"\"\"\n Find overlapping trades by expanding each trade once per period it was open\n and then counting overlaps\n :param results: Results Dataframe - can be loaded\n :param freq: Frequency used for the backtest\n :param max_open_trades: parameter max_open_trades used during backtest run\n :return: dataframe with open-counts per time-period in freq\n \"\"\"\n dates = [pd.Series(pd.date_range(row[1].open_time, row[1].close_time, freq=freq))\n for row in results[['open_time', 'close_time']].iterrows()]\n deltas = [len(x) for x in dates]\n dates = pd.Series(pd.concat(dates).values, name='date')\n df2 = pd.DataFrame(np.repeat(results.values, deltas, axis=0), columns=results.columns)\n\n df2 = pd.concat([dates, df2], axis=1)\n df2 = df2.set_index('date')\n df_final = df2.resample(freq)[['pair']].count()\n return df_final[df_final['pair'] > max_open_trades]\n\n\ndef load_trades_from_db(db_url: str) -> pd.DataFrame:\n \"\"\"\n Load trades from a DB (using dburl)\n :param db_url: Sqlite url (default format sqlite:///tradesv3.dry-run.sqlite)\n :return: Dataframe containing Trades\n \"\"\"\n trades: pd.DataFrame = pd.DataFrame([], columns=BT_DATA_COLUMNS)\n persistence.init(db_url, clean_open_orders=False)\n\n columns = [\"pair\", \"open_time\", \"close_time\", \"profit\", \"profitperc\",\n \"open_rate\", \"close_rate\", \"amount\", \"duration\", \"sell_reason\",\n \"fee_open\", \"fee_close\", \"open_rate_requested\", \"close_rate_requested\",\n \"stake_amount\", \"max_rate\", \"min_rate\", \"id\", \"exchange\",\n \"stop_loss\", \"initial_stop_loss\", \"strategy\", \"ticker_interval\"]\n\n trades = pd.DataFrame([(t.pair,\n t.open_date.replace(tzinfo=pytz.UTC),\n t.close_date.replace(tzinfo=pytz.UTC) if t.close_date else None,\n t.calc_profit(), t.calc_profit_percent(),\n t.open_rate, t.close_rate, t.amount,\n (round((t.close_date.timestamp() - t.open_date.timestamp()) / 60, 2)\n if t.close_date else None),\n t.sell_reason,\n t.fee_open, t.fee_close,\n t.open_rate_requested,\n t.close_rate_requested,\n t.stake_amount,\n t.max_rate,\n t.min_rate,\n t.id, t.exchange,\n t.stop_loss, t.initial_stop_loss,\n t.strategy, t.ticker_interval\n )\n for t in Trade.query.all()],\n columns=columns)\n\n return trades\n\n\ndef load_trades(source: str, db_url: str, exportfilename: str) -> pd.DataFrame:\n \"\"\"\n Based on configuration option \"trade_source\":\n * loads data from DB (using `db_url`)\n * loads data from backtestfile (using `exportfilename`)\n \"\"\"\n if source == \"DB\":\n return load_trades_from_db(db_url)\n elif source == \"file\":\n return load_backtest_data(Path(exportfilename))\n\n\ndef extract_trades_of_period(dataframe: pd.DataFrame, trades: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Compare trades and backtested pair DataFrames to get trades performed on backtested period\n :return: the DataFrame of a trades of period\n \"\"\"\n trades = trades.loc[(trades['open_time'] >= dataframe.iloc[0]['date']) &\n (trades['close_time'] <= dataframe.iloc[-1]['date'])]\n return trades\n\n\ndef combine_tickers_with_mean(tickers: Dict[str, pd.DataFrame], column: str = \"close\"):\n \"\"\"\n Combine multiple dataframes \"column\"\n :param tickers: Dict of Dataframes, dict key should be pair.\n :param column: Column in the original dataframes to use\n :return: DataFrame with the column renamed to the dict key, and a column\n named mean, containing the mean of all pairs.\n \"\"\"\n df_comb = pd.concat([tickers[pair].set_index('date').rename(\n {column: pair}, axis=1)[pair] for pair in tickers], axis=1)\n\n df_comb['mean'] = df_comb.mean(axis=1)\n\n return df_comb\n\n\ndef create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str,\n timeframe: str) -> pd.DataFrame:\n \"\"\"\n Adds a column `col_name` with the cumulative profit for the given trades array.\n :param df: DataFrame with date index\n :param trades: DataFrame containing trades (requires columns close_time and profitperc)\n :param col_name: Column name that will be assigned the results\n :param timeframe: Timeframe used during the operations\n :return: Returns df with one additional column, col_name, containing the cumulative profit.\n \"\"\"\n from freqtrade.exchange import timeframe_to_minutes\n ticker_minutes = timeframe_to_minutes(timeframe)\n # Resample to ticker_interval to make sure trades match candles\n _trades_sum = trades.resample(f'{ticker_minutes}min', on='close_time')[['profitperc']].sum()\n df.loc[:, col_name] = _trades_sum.cumsum()\n # Set first value to 0\n df.loc[df.iloc[0].name, col_name] = 0\n # FFill to get continuous\n df[col_name] = df[col_name].ffill()\n return df\n", "path": "freqtrade/data/btanalysis.py"}]}
| 2,674 | 648 |
gh_patches_debug_7108
|
rasdani/github-patches
|
git_diff
|
vas3k__vas3k.club-858
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: возможность замьютить самого себя
## Чеклист
- [x] Я поискал поиском по трекеру похожие проблемы, в том числе в закрытых Issues
- [x] Баг стабильно воспроизводится и я знаю как это сделать
## Описание бага
Пользователь технически может замьюить самого себя, после чего не сможет увидеть свои посты и комментарии.
Отмьютить себя пользователь тоже может самостоятельно (это работает в обе стороны).
## Ожидаемый результат
- При вызове мьюта самого себя система показывает шаблон с заглушкой: нельзя мьютить себя.
- Невозможность мьюта и размьюта самого себя.
## Шаги к воспроизведению
1. **Mute**: перейти на URL `vas3k.club/user/%USERNAME%/mute/`, где `%USERNAME%` — псевдоним пользователя (slug)
2. Следовать мастеру мьюта.
3. **Unmute**: перейти на URL `vas3k.club/user/%USERNAME%/mute/`, где `%USERNAME%` — псевдоним пользователя (slug)
4. Следовать мастеру мьюта.
Скриншот со страницы шаблона мьюта (продакшн):

</issue>
<code>
[start of users/views/muted.py]
1 from django.conf import settings
2 from django.http import HttpResponseForbidden
3 from django.shortcuts import get_object_or_404, render
4
5 from auth.helpers import auth_required
6 from club.exceptions import AccessDenied
7 from notifications.telegram.users import notify_admin_user_on_mute
8 from users.models.mute import Muted
9 from users.models.user import User
10
11
12 @auth_required
13 def toggle_mute(request, user_slug):
14 user_to = get_object_or_404(User, slug=user_slug)
15 if user_to.is_curator or user_to.is_moderator:
16 raise AccessDenied(title="У этого юзера иммунитет от мьюта")
17
18 total_user_muted_count = Muted.objects.filter(user_from=request.me).count()
19
20 # show form on GET
21 if request.method != "POST":
22 is_muted = Muted.is_muted(
23 user_from=request.me,
24 user_to=user_to,
25 )
26 if is_muted:
27 return render(request, "users/mute/unmute.html", {
28 "user": user_to,
29 })
30 else:
31 return render(request, "users/mute/mute.html", {
32 "user": user_to,
33 "mutes_left": settings.MAX_MUTE_COUNT - total_user_muted_count,
34 })
35
36 # else — process POST
37 if total_user_muted_count > settings.MAX_MUTE_COUNT:
38 raise AccessDenied(
39 title="Вы замьютили слишком много людей",
40 message="Рекомендуем притормозить и поговорить с кем-нибудь..."
41 )
42
43 comment = request.POST.get("comment") or ""
44 mute, is_created = Muted.mute(
45 user_from=request.me,
46 user_to=user_to,
47 comment=comment,
48 )
49
50 if is_created:
51 # notify admins
52 notify_admin_user_on_mute(
53 user_from=request.me,
54 user_to=user_to,
55 comment=comment,
56 )
57
58 return render(request, "users/messages/muted.html", {
59 "user": user_to,
60 })
61 else:
62 # unmute this user
63 Muted.unmute(
64 user_from=request.me,
65 user_to=user_to,
66 )
67
68 return render(request, "users/messages/unmuted.html", {
69 "user": user_to,
70 })
71
72
73 @auth_required
74 def muted(request, user_slug):
75 if request.me.slug != user_slug:
76 return HttpResponseForbidden()
77
78 user = get_object_or_404(User, slug=user_slug)
79 muted_users = Muted.muted_by_user(user)
80
81 return render(request, "users/mute/index.html", {
82 "user": user,
83 "muted": muted_users,
84 })
85
[end of users/views/muted.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/users/views/muted.py b/users/views/muted.py
--- a/users/views/muted.py
+++ b/users/views/muted.py
@@ -15,6 +15,10 @@
if user_to.is_curator or user_to.is_moderator:
raise AccessDenied(title="У этого юзера иммунитет от мьюта")
+ if user_to == request.me:
+ raise AccessDenied(title='Хорошая попытка, но мьютить себя нельзя. Кожаный мешок, ты прекрасен!',
+ message='')
+
total_user_muted_count = Muted.objects.filter(user_from=request.me).count()
# show form on GET
|
{"golden_diff": "diff --git a/users/views/muted.py b/users/views/muted.py\n--- a/users/views/muted.py\n+++ b/users/views/muted.py\n@@ -15,6 +15,10 @@\n if user_to.is_curator or user_to.is_moderator:\n raise AccessDenied(title=\"\u0423 \u044d\u0442\u043e\u0433\u043e \u044e\u0437\u0435\u0440\u0430 \u0438\u043c\u043c\u0443\u043d\u0438\u0442\u0435\u0442 \u043e\u0442 \u043c\u044c\u044e\u0442\u0430\")\n \n+ if user_to == request.me:\n+ raise AccessDenied(title='\u0425\u043e\u0440\u043e\u0448\u0430\u044f \u043f\u043e\u043f\u044b\u0442\u043a\u0430, \u043d\u043e \u043c\u044c\u044e\u0442\u0438\u0442\u044c \u0441\u0435\u0431\u044f \u043d\u0435\u043b\u044c\u0437\u044f. \u041a\u043e\u0436\u0430\u043d\u044b\u0439 \u043c\u0435\u0448\u043e\u043a, \u0442\u044b \u043f\u0440\u0435\u043a\u0440\u0430\u0441\u0435\u043d!',\n+ message='')\n+\n total_user_muted_count = Muted.objects.filter(user_from=request.me).count()\n \n # show form on GET\n", "issue": "Bug: \u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e\u0441\u0442\u044c \u0437\u0430\u043c\u044c\u044e\u0442\u0438\u0442\u044c \u0441\u0430\u043c\u043e\u0433\u043e \u0441\u0435\u0431\u044f\n## \u0427\u0435\u043a\u043b\u0438\u0441\u0442\r\n\r\n- [x] \u042f \u043f\u043e\u0438\u0441\u043a\u0430\u043b \u043f\u043e\u0438\u0441\u043a\u043e\u043c \u043f\u043e \u0442\u0440\u0435\u043a\u0435\u0440\u0443 \u043f\u043e\u0445\u043e\u0436\u0438\u0435 \u043f\u0440\u043e\u0431\u043b\u0435\u043c\u044b, \u0432 \u0442\u043e\u043c \u0447\u0438\u0441\u043b\u0435 \u0432 \u0437\u0430\u043a\u0440\u044b\u0442\u044b\u0445 Issues\r\n- [x] \u0411\u0430\u0433 \u0441\u0442\u0430\u0431\u0438\u043b\u044c\u043d\u043e \u0432\u043e\u0441\u043f\u0440\u043e\u0438\u0437\u0432\u043e\u0434\u0438\u0442\u0441\u044f \u0438 \u044f \u0437\u043d\u0430\u044e \u043a\u0430\u043a \u044d\u0442\u043e \u0441\u0434\u0435\u043b\u0430\u0442\u044c\r\n\r\n## \u041e\u043f\u0438\u0441\u0430\u043d\u0438\u0435 \u0431\u0430\u0433\u0430\r\n\r\n\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u0442\u0435\u0445\u043d\u0438\u0447\u0435\u0441\u043a\u0438 \u043c\u043e\u0436\u0435\u0442 \u0437\u0430\u043c\u044c\u044e\u0438\u0442\u044c \u0441\u0430\u043c\u043e\u0433\u043e \u0441\u0435\u0431\u044f, \u043f\u043e\u0441\u043b\u0435 \u0447\u0435\u0433\u043e \u043d\u0435 \u0441\u043c\u043e\u0436\u0435\u0442 \u0443\u0432\u0438\u0434\u0435\u0442\u044c \u0441\u0432\u043e\u0438 \u043f\u043e\u0441\u0442\u044b \u0438 \u043a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u0438.\r\n\u041e\u0442\u043c\u044c\u044e\u0442\u0438\u0442\u044c \u0441\u0435\u0431\u044f \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u0442\u043e\u0436\u0435 \u043c\u043e\u0436\u0435\u0442 \u0441\u0430\u043c\u043e\u0441\u0442\u043e\u044f\u0442\u0435\u043b\u044c\u043d\u043e (\u044d\u0442\u043e \u0440\u0430\u0431\u043e\u0442\u0430\u0435\u0442 \u0432 \u043e\u0431\u0435 \u0441\u0442\u043e\u0440\u043e\u043d\u044b).\r\n\r\n## \u041e\u0436\u0438\u0434\u0430\u0435\u043c\u044b\u0439 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\r\n\r\n- \u041f\u0440\u0438 \u0432\u044b\u0437\u043e\u0432\u0435 \u043c\u044c\u044e\u0442\u0430 \u0441\u0430\u043c\u043e\u0433\u043e \u0441\u0435\u0431\u044f \u0441\u0438\u0441\u0442\u0435\u043c\u0430 \u043f\u043e\u043a\u0430\u0437\u044b\u0432\u0430\u0435\u0442 \u0448\u0430\u0431\u043b\u043e\u043d \u0441 \u0437\u0430\u0433\u043b\u0443\u0448\u043a\u043e\u0439: \u043d\u0435\u043b\u044c\u0437\u044f \u043c\u044c\u044e\u0442\u0438\u0442\u044c \u0441\u0435\u0431\u044f.\r\n- \u041d\u0435\u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e\u0441\u0442\u044c \u043c\u044c\u044e\u0442\u0430 \u0438 \u0440\u0430\u0437\u043c\u044c\u044e\u0442\u0430 \u0441\u0430\u043c\u043e\u0433\u043e \u0441\u0435\u0431\u044f.\r\n\r\n## \u0428\u0430\u0433\u0438 \u043a \u0432\u043e\u0441\u043f\u0440\u043e\u0438\u0437\u0432\u0435\u0434\u0435\u043d\u0438\u044e\r\n\r\n1. **Mute**: \u043f\u0435\u0440\u0435\u0439\u0442\u0438 \u043d\u0430 URL `vas3k.club/user/%USERNAME%/mute/`, \u0433\u0434\u0435 `%USERNAME%` \u2014 \u043f\u0441\u0435\u0432\u0434\u043e\u043d\u0438\u043c \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044f (slug) \r\n2. \u0421\u043b\u0435\u0434\u043e\u0432\u0430\u0442\u044c \u043c\u0430\u0441\u0442\u0435\u0440\u0443 \u043c\u044c\u044e\u0442\u0430.\r\n3. **Unmute**: \u043f\u0435\u0440\u0435\u0439\u0442\u0438 \u043d\u0430 URL `vas3k.club/user/%USERNAME%/mute/`, \u0433\u0434\u0435 `%USERNAME%` \u2014 \u043f\u0441\u0435\u0432\u0434\u043e\u043d\u0438\u043c \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044f (slug) \r\n4. \u0421\u043b\u0435\u0434\u043e\u0432\u0430\u0442\u044c \u043c\u0430\u0441\u0442\u0435\u0440\u0443 \u043c\u044c\u044e\u0442\u0430.\r\n\r\n\u0421\u043a\u0440\u0438\u043d\u0448\u043e\u0442 \u0441\u043e \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b \u0448\u0430\u0431\u043b\u043e\u043d\u0430 \u043c\u044c\u044e\u0442\u0430 (\u043f\u0440\u043e\u0434\u0430\u043a\u0448\u043d):\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.http import HttpResponseForbidden\nfrom django.shortcuts import get_object_or_404, render\n\nfrom auth.helpers import auth_required\nfrom club.exceptions import AccessDenied\nfrom notifications.telegram.users import notify_admin_user_on_mute\nfrom users.models.mute import Muted\nfrom users.models.user import User\n\n\n@auth_required\ndef toggle_mute(request, user_slug):\n user_to = get_object_or_404(User, slug=user_slug)\n if user_to.is_curator or user_to.is_moderator:\n raise AccessDenied(title=\"\u0423 \u044d\u0442\u043e\u0433\u043e \u044e\u0437\u0435\u0440\u0430 \u0438\u043c\u043c\u0443\u043d\u0438\u0442\u0435\u0442 \u043e\u0442 \u043c\u044c\u044e\u0442\u0430\")\n\n total_user_muted_count = Muted.objects.filter(user_from=request.me).count()\n\n # show form on GET\n if request.method != \"POST\":\n is_muted = Muted.is_muted(\n user_from=request.me,\n user_to=user_to,\n )\n if is_muted:\n return render(request, \"users/mute/unmute.html\", {\n \"user\": user_to,\n })\n else:\n return render(request, \"users/mute/mute.html\", {\n \"user\": user_to,\n \"mutes_left\": settings.MAX_MUTE_COUNT - total_user_muted_count,\n })\n\n # else \u2014 process POST\n if total_user_muted_count > settings.MAX_MUTE_COUNT:\n raise AccessDenied(\n title=\"\u0412\u044b \u0437\u0430\u043c\u044c\u044e\u0442\u0438\u043b\u0438 \u0441\u043b\u0438\u0448\u043a\u043e\u043c \u043c\u043d\u043e\u0433\u043e \u043b\u044e\u0434\u0435\u0439\",\n message=\"\u0420\u0435\u043a\u043e\u043c\u0435\u043d\u0434\u0443\u0435\u043c \u043f\u0440\u0438\u0442\u043e\u0440\u043c\u043e\u0437\u0438\u0442\u044c \u0438 \u043f\u043e\u0433\u043e\u0432\u043e\u0440\u0438\u0442\u044c \u0441 \u043a\u0435\u043c-\u043d\u0438\u0431\u0443\u0434\u044c...\"\n )\n\n comment = request.POST.get(\"comment\") or \"\"\n mute, is_created = Muted.mute(\n user_from=request.me,\n user_to=user_to,\n comment=comment,\n )\n\n if is_created:\n # notify admins\n notify_admin_user_on_mute(\n user_from=request.me,\n user_to=user_to,\n comment=comment,\n )\n\n return render(request, \"users/messages/muted.html\", {\n \"user\": user_to,\n })\n else:\n # unmute this user\n Muted.unmute(\n user_from=request.me,\n user_to=user_to,\n )\n\n return render(request, \"users/messages/unmuted.html\", {\n \"user\": user_to,\n })\n\n\n@auth_required\ndef muted(request, user_slug):\n if request.me.slug != user_slug:\n return HttpResponseForbidden()\n\n user = get_object_or_404(User, slug=user_slug)\n muted_users = Muted.muted_by_user(user)\n\n return render(request, \"users/mute/index.html\", {\n \"user\": user,\n \"muted\": muted_users,\n })\n", "path": "users/views/muted.py"}]}
| 1,645 | 157 |
gh_patches_debug_15840
|
rasdani/github-patches
|
git_diff
|
napari__napari-1494
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CTRL-C should exit napari gracefully
## 🐛 Bug
After #1476 napari just prints `KeyboardInterrupt` when CTRL-C is pressed in the system terminal window that used to launch napari. Prior to 1476 it exited with a crash, which got the job done but was not great.
Ideally napari would exit gracefully when you hit CTRL-C in the system terminal window.
## To Reproduce
Steps to reproduce the behavior:
1. From a system terminal (e.g. Terminal program on mac)
2. Run "napari" or a script that uses `napari.gui_qt()`
3. Switch back to the terminal window and type CTRL-C
## Expected behavior
Napari exits gracefully.
## Environment
```
napari: not-installed
Platform: macOS-10.15.3-x86_64-i386-64bit
Python: 3.8.1 (default, Jan 8 2020, 16:15:59) [Clang 4.0.1 (tags/RELEASE_401/final)]
Qt: 5.14.2
PyQt5: 5.14.2
NumPy: 1.18.4
SciPy: 1.4.1
Dask: 2.17.2
VisPy: 0.6.5.dev111+g8387ea1a.d20200424
GL version: 2.1 ATI-3.5.5
MAX_TEXTURE_SIZE: 16384
Plugins:
- napari-plugin-engine: 0.1.6
- svg: 0.1.3
```
## Additional context
This is low priority since you can exit with the Quit command, or from the system terminal hit CTRL-Z and `kill %1` the app if necessary. However it seems like exiting gracefully is the right behavior long term.
I tried adding this to our new `ExceptionHandler` class:
```
# Interpret CTRL-C as a request to quit.
if isinstance(value, KeyboardInterrupt):
QApplication.instance().quit()
return
```
but while it exits cleanly sometimes, sometimes it bus errors or seg faults.
</issue>
<code>
[start of napari/_qt/exceptions.py]
1 import logging
2 import os
3 import traceback
4 from types import TracebackType
5 from typing import Optional, Type
6
7 from qtpy.QtCore import QObject, Signal
8
9 from .qt_error_notification import NapariNotification
10
11
12 class ExceptionHandler(QObject):
13 """General class to handle all uncaught exceptions in the Qt event loop.
14
15 Parameters
16 ----------
17 parent : QObject, optional
18 parent object, by default None
19 gui_exceptions : bool, optional
20 Whether to show exceptions as, by default True. May be overriden by
21 environment variable: ``NAPARI_CATCH_ERRORS=1`
22 Note: this ``False`` by default in ``gui_qt()`` (the main
23 instantiator of this class), but it is ``True`` in ``napari.__main__``.
24 As a result, exceptions will be shown in the GUI only (mostly) when
25 running napari as ``napari`` or ``python -m napari`` from the command
26 line.
27 """
28
29 error = Signal(tuple)
30 message: Optional[NapariNotification] = None
31
32 def __init__(self, parent=None, *, gui_exceptions=True):
33 super().__init__(parent)
34 if os.getenv("NAPARI_CATCH_ERRORS") in ('0', 'False'):
35 self.gui_exceptions = False
36 else:
37 self.gui_exceptions = gui_exceptions
38
39 def handle(
40 self,
41 etype: Type[BaseException],
42 value: BaseException,
43 tb: TracebackType,
44 ):
45 """Our sys.excepthook override.
46
47 This function handles uncaught exceptions and can delegate to a
48 secondary handler, whether it be a GUI dialog, or an IPython traceback
49 printout. The override to ``sys.excepthook`` happens in
50 :func:`napari.gui_qt`, and therefore this is only active when the qt
51 event loop has been started by napari.
52
53 The three parameters here are what would be returned from
54 :func:`sys.exc_info()`.
55
56 Parameters
57 ----------
58 etype : Type[BaseException]
59 The type of error raised
60 value : BaseException
61 The error instance
62 tb : TracebackType
63 The traceback object associated with the error.
64 """
65 if self.gui_exceptions:
66 self._show_error_dialog(value)
67 else:
68 text = "".join(traceback.format_exception(etype, value, tb))
69 logging.error("Unhandled exception:\n%s", text)
70 self.error.emit((etype, value, tb))
71
72 def _show_error_dialog(self, exception: BaseException):
73 self.message = NapariNotification.from_exception(exception)
74 self.message.show()
75
[end of napari/_qt/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/napari/_qt/exceptions.py b/napari/_qt/exceptions.py
--- a/napari/_qt/exceptions.py
+++ b/napari/_qt/exceptions.py
@@ -1,5 +1,6 @@
import logging
import os
+import sys
import traceback
from types import TracebackType
from typing import Optional, Type
@@ -62,6 +63,12 @@
tb : TracebackType
The traceback object associated with the error.
"""
+ # etype.__module__ contains the module raising the error
+ # Custom exception classes can have different behavior
+ # can add custom exception handlers here ...
+ if isinstance(value, KeyboardInterrupt):
+ print("Closed by KeyboardInterrupt", file=sys.stderr)
+ sys.exit(1)
if self.gui_exceptions:
self._show_error_dialog(value)
else:
|
{"golden_diff": "diff --git a/napari/_qt/exceptions.py b/napari/_qt/exceptions.py\n--- a/napari/_qt/exceptions.py\n+++ b/napari/_qt/exceptions.py\n@@ -1,5 +1,6 @@\n import logging\n import os\n+import sys\n import traceback\n from types import TracebackType\n from typing import Optional, Type\n@@ -62,6 +63,12 @@\n tb : TracebackType\n The traceback object associated with the error.\n \"\"\"\n+ # etype.__module__ contains the module raising the error\n+ # Custom exception classes can have different behavior\n+ # can add custom exception handlers here ...\n+ if isinstance(value, KeyboardInterrupt):\n+ print(\"Closed by KeyboardInterrupt\", file=sys.stderr)\n+ sys.exit(1)\n if self.gui_exceptions:\n self._show_error_dialog(value)\n else:\n", "issue": "CTRL-C should exit napari gracefully\n## \ud83d\udc1b Bug\r\n\r\nAfter #1476 napari just prints `KeyboardInterrupt` when CTRL-C is pressed in the system terminal window that used to launch napari. Prior to 1476 it exited with a crash, which got the job done but was not great. \r\n\r\nIdeally napari would exit gracefully when you hit CTRL-C in the system terminal window.\r\n\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. From a system terminal (e.g. Terminal program on mac)\r\n2. Run \"napari\" or a script that uses `napari.gui_qt()`\r\n3. Switch back to the terminal window and type CTRL-C\r\n\r\n## Expected behavior\r\n\r\nNapari exits gracefully.\r\n\r\n## Environment\r\n\r\n```\r\nnapari: not-installed\r\nPlatform: macOS-10.15.3-x86_64-i386-64bit\r\nPython: 3.8.1 (default, Jan 8 2020, 16:15:59) [Clang 4.0.1 (tags/RELEASE_401/final)]\r\nQt: 5.14.2\r\nPyQt5: 5.14.2\r\nNumPy: 1.18.4\r\nSciPy: 1.4.1\r\nDask: 2.17.2\r\nVisPy: 0.6.5.dev111+g8387ea1a.d20200424\r\n\r\nGL version: 2.1 ATI-3.5.5\r\nMAX_TEXTURE_SIZE: 16384\r\n\r\nPlugins:\r\n- napari-plugin-engine: 0.1.6\r\n- svg: 0.1.3\r\n```\r\n\r\n## Additional context\r\n\r\nThis is low priority since you can exit with the Quit command, or from the system terminal hit CTRL-Z and `kill %1` the app if necessary. However it seems like exiting gracefully is the right behavior long term.\r\n\r\nI tried adding this to our new `ExceptionHandler` class:\r\n```\r\n # Interpret CTRL-C as a request to quit.\r\n if isinstance(value, KeyboardInterrupt):\r\n QApplication.instance().quit()\r\n return\r\n```\r\nbut while it exits cleanly sometimes, sometimes it bus errors or seg faults.\n", "before_files": [{"content": "import logging\nimport os\nimport traceback\nfrom types import TracebackType\nfrom typing import Optional, Type\n\nfrom qtpy.QtCore import QObject, Signal\n\nfrom .qt_error_notification import NapariNotification\n\n\nclass ExceptionHandler(QObject):\n \"\"\"General class to handle all uncaught exceptions in the Qt event loop.\n\n Parameters\n ----------\n parent : QObject, optional\n parent object, by default None\n gui_exceptions : bool, optional\n Whether to show exceptions as, by default True. May be overriden by\n environment variable: ``NAPARI_CATCH_ERRORS=1`\n Note: this ``False`` by default in ``gui_qt()`` (the main\n instantiator of this class), but it is ``True`` in ``napari.__main__``.\n As a result, exceptions will be shown in the GUI only (mostly) when\n running napari as ``napari`` or ``python -m napari`` from the command\n line.\n \"\"\"\n\n error = Signal(tuple)\n message: Optional[NapariNotification] = None\n\n def __init__(self, parent=None, *, gui_exceptions=True):\n super().__init__(parent)\n if os.getenv(\"NAPARI_CATCH_ERRORS\") in ('0', 'False'):\n self.gui_exceptions = False\n else:\n self.gui_exceptions = gui_exceptions\n\n def handle(\n self,\n etype: Type[BaseException],\n value: BaseException,\n tb: TracebackType,\n ):\n \"\"\"Our sys.excepthook override.\n\n This function handles uncaught exceptions and can delegate to a\n secondary handler, whether it be a GUI dialog, or an IPython traceback\n printout. The override to ``sys.excepthook`` happens in\n :func:`napari.gui_qt`, and therefore this is only active when the qt\n event loop has been started by napari.\n\n The three parameters here are what would be returned from\n :func:`sys.exc_info()`.\n\n Parameters\n ----------\n etype : Type[BaseException]\n The type of error raised\n value : BaseException\n The error instance\n tb : TracebackType\n The traceback object associated with the error.\n \"\"\"\n if self.gui_exceptions:\n self._show_error_dialog(value)\n else:\n text = \"\".join(traceback.format_exception(etype, value, tb))\n logging.error(\"Unhandled exception:\\n%s\", text)\n self.error.emit((etype, value, tb))\n\n def _show_error_dialog(self, exception: BaseException):\n self.message = NapariNotification.from_exception(exception)\n self.message.show()\n", "path": "napari/_qt/exceptions.py"}]}
| 1,748 | 192 |
gh_patches_debug_18602
|
rasdani/github-patches
|
git_diff
|
getmoto__moto-2193
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Several KMS operations are broken on Windows in 1.3.8
The change from https://github.com/spulec/moto/pull/2073 breaks several KMS operations on Windows because the `%s` format specifier is platform specific and not supported in python.
This can be easily fixed by changing the timestamp generation to use:
> `(datetime.utcnow() - datetime(1970, 1, 1)).total_seconds()`
An example stack trace of a test that throws is included below.
```
tests\test_kms.py:33: in _create_kms_key
key = kms_client.create_key(Policy="Policy", KeyUsage='ENCRYPT_DECRYPT')
wenv\lib\site-packages\botocore\client.py:357: in _api_call
return self._make_api_call(operation_name, kwargs)
wenv\lib\site-packages\botocore\client.py:648: in _make_api_call
operation_model, request_dict, request_context)
wenv\lib\site-packages\botocore\client.py:667: in _make_request
return self._endpoint.make_request(operation_model, request_dict)
wenv\lib\site-packages\botocore\endpoint.py:102: in make_request
return self._send_request(request_dict, operation_model)
wenv\lib\site-packages\botocore\endpoint.py:137: in _send_request
success_response, exception):
wenv\lib\site-packages\botocore\endpoint.py:231: in _needs_retry
caught_exception=caught_exception, request_dict=request_dict)
wenv\lib\site-packages\botocore\hooks.py:356: in emit
return self._emitter.emit(aliased_event_name, **kwargs)
wenv\lib\site-packages\botocore\hooks.py:228: in emit
return self._emit(event_name, kwargs)
wenv\lib\site-packages\botocore\hooks.py:211: in _emit
response = handler(**kwargs)
wenv\lib\site-packages\botocore\retryhandler.py:183: in __call__
if self._checker(attempts, response, caught_exception):
wenv\lib\site-packages\botocore\retryhandler.py:251: in __call__
caught_exception)
wenv\lib\site-packages\botocore\retryhandler.py:269: in _should_retry
return self._checker(attempt_number, response, caught_exception)
wenv\lib\site-packages\botocore\retryhandler.py:317: in __call__
caught_exception)
wenv\lib\site-packages\botocore\retryhandler.py:223: in __call__
attempt_number, caught_exception)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <botocore.retryhandler.ExceptionRaiser object at 0x0000000011A51A58>
attempt_number = 1, caught_exception = ValueError('Invalid format string',)
def _check_caught_exception(self, attempt_number, caught_exception):
# This is implementation specific, but this class is useful by
# coordinating with the MaxAttemptsDecorator.
# The MaxAttemptsDecorator has a list of exceptions it should catch
# and retry, but something needs to come along and actually raise the
# caught_exception. That's what this class is being used for. If
# the MaxAttemptsDecorator is not interested in retrying the exception
# then this exception just propogates out past the retry code.
print(dir(caught_exception))
> raise caught_exception
E ValueError: Invalid format string
```
</issue>
<code>
[start of moto/kms/models.py]
1 from __future__ import unicode_literals
2
3 import os
4 import boto.kms
5 from moto.core import BaseBackend, BaseModel
6 from moto.core.utils import iso_8601_datetime_without_milliseconds
7 from .utils import generate_key_id
8 from collections import defaultdict
9 from datetime import datetime, timedelta
10
11
12 class Key(BaseModel):
13
14 def __init__(self, policy, key_usage, description, region):
15 self.id = generate_key_id()
16 self.policy = policy
17 self.key_usage = key_usage
18 self.key_state = "Enabled"
19 self.description = description
20 self.enabled = True
21 self.region = region
22 self.account_id = "0123456789012"
23 self.key_rotation_status = False
24 self.deletion_date = None
25 self.tags = {}
26
27 @property
28 def physical_resource_id(self):
29 return self.id
30
31 @property
32 def arn(self):
33 return "arn:aws:kms:{0}:{1}:key/{2}".format(self.region, self.account_id, self.id)
34
35 def to_dict(self):
36 key_dict = {
37 "KeyMetadata": {
38 "AWSAccountId": self.account_id,
39 "Arn": self.arn,
40 "CreationDate": datetime.strftime(datetime.utcnow(), "%s"),
41 "Description": self.description,
42 "Enabled": self.enabled,
43 "KeyId": self.id,
44 "KeyUsage": self.key_usage,
45 "KeyState": self.key_state,
46 }
47 }
48 if self.key_state == 'PendingDeletion':
49 key_dict['KeyMetadata']['DeletionDate'] = iso_8601_datetime_without_milliseconds(self.deletion_date)
50 return key_dict
51
52 def delete(self, region_name):
53 kms_backends[region_name].delete_key(self.id)
54
55 @classmethod
56 def create_from_cloudformation_json(self, resource_name, cloudformation_json, region_name):
57 kms_backend = kms_backends[region_name]
58 properties = cloudformation_json['Properties']
59
60 key = kms_backend.create_key(
61 policy=properties['KeyPolicy'],
62 key_usage='ENCRYPT_DECRYPT',
63 description=properties['Description'],
64 region=region_name,
65 )
66 key.key_rotation_status = properties['EnableKeyRotation']
67 key.enabled = properties['Enabled']
68 return key
69
70 def get_cfn_attribute(self, attribute_name):
71 from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
72 if attribute_name == 'Arn':
73 return self.arn
74 raise UnformattedGetAttTemplateException()
75
76
77 class KmsBackend(BaseBackend):
78
79 def __init__(self):
80 self.keys = {}
81 self.key_to_aliases = defaultdict(set)
82
83 def create_key(self, policy, key_usage, description, region):
84 key = Key(policy, key_usage, description, region)
85 self.keys[key.id] = key
86 return key
87
88 def update_key_description(self, key_id, description):
89 key = self.keys[self.get_key_id(key_id)]
90 key.description = description
91
92 def tag_resource(self, key_id, tags):
93 key = self.keys[self.get_key_id(key_id)]
94 key.tags = tags
95
96 def list_resource_tags(self, key_id):
97 key = self.keys[self.get_key_id(key_id)]
98 return key.tags
99
100 def delete_key(self, key_id):
101 if key_id in self.keys:
102 if key_id in self.key_to_aliases:
103 self.key_to_aliases.pop(key_id)
104
105 return self.keys.pop(key_id)
106
107 def describe_key(self, key_id):
108 # allow the different methods (alias, ARN :key/, keyId, ARN alias) to
109 # describe key not just KeyId
110 key_id = self.get_key_id(key_id)
111 if r'alias/' in str(key_id).lower():
112 key_id = self.get_key_id_from_alias(key_id.split('alias/')[1])
113 return self.keys[self.get_key_id(key_id)]
114
115 def list_keys(self):
116 return self.keys.values()
117
118 def get_key_id(self, key_id):
119 # Allow use of ARN as well as pure KeyId
120 return str(key_id).split(r':key/')[1] if r':key/' in str(key_id).lower() else key_id
121
122 def alias_exists(self, alias_name):
123 for aliases in self.key_to_aliases.values():
124 if alias_name in aliases:
125 return True
126
127 return False
128
129 def add_alias(self, target_key_id, alias_name):
130 self.key_to_aliases[target_key_id].add(alias_name)
131
132 def delete_alias(self, alias_name):
133 """Delete the alias."""
134 for aliases in self.key_to_aliases.values():
135 if alias_name in aliases:
136 aliases.remove(alias_name)
137
138 def get_all_aliases(self):
139 return self.key_to_aliases
140
141 def get_key_id_from_alias(self, alias_name):
142 for key_id, aliases in dict(self.key_to_aliases).items():
143 if alias_name in ",".join(aliases):
144 return key_id
145 return None
146
147 def enable_key_rotation(self, key_id):
148 self.keys[self.get_key_id(key_id)].key_rotation_status = True
149
150 def disable_key_rotation(self, key_id):
151 self.keys[self.get_key_id(key_id)].key_rotation_status = False
152
153 def get_key_rotation_status(self, key_id):
154 return self.keys[self.get_key_id(key_id)].key_rotation_status
155
156 def put_key_policy(self, key_id, policy):
157 self.keys[self.get_key_id(key_id)].policy = policy
158
159 def get_key_policy(self, key_id):
160 return self.keys[self.get_key_id(key_id)].policy
161
162 def disable_key(self, key_id):
163 self.keys[key_id].enabled = False
164 self.keys[key_id].key_state = 'Disabled'
165
166 def enable_key(self, key_id):
167 self.keys[key_id].enabled = True
168 self.keys[key_id].key_state = 'Enabled'
169
170 def cancel_key_deletion(self, key_id):
171 self.keys[key_id].key_state = 'Disabled'
172 self.keys[key_id].deletion_date = None
173
174 def schedule_key_deletion(self, key_id, pending_window_in_days):
175 if 7 <= pending_window_in_days <= 30:
176 self.keys[key_id].enabled = False
177 self.keys[key_id].key_state = 'PendingDeletion'
178 self.keys[key_id].deletion_date = datetime.now() + timedelta(days=pending_window_in_days)
179 return iso_8601_datetime_without_milliseconds(self.keys[key_id].deletion_date)
180
181 def generate_data_key(self, key_id, encryption_context, number_of_bytes, key_spec, grant_tokens):
182 key = self.keys[self.get_key_id(key_id)]
183
184 if key_spec:
185 if key_spec == 'AES_128':
186 bytes = 16
187 else:
188 bytes = 32
189 else:
190 bytes = number_of_bytes
191
192 plaintext = os.urandom(bytes)
193
194 return plaintext, key.arn
195
196
197 kms_backends = {}
198 for region in boto.kms.regions():
199 kms_backends[region.name] = KmsBackend()
200
[end of moto/kms/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/moto/kms/models.py b/moto/kms/models.py
--- a/moto/kms/models.py
+++ b/moto/kms/models.py
@@ -3,7 +3,7 @@
import os
import boto.kms
from moto.core import BaseBackend, BaseModel
-from moto.core.utils import iso_8601_datetime_without_milliseconds
+from moto.core.utils import iso_8601_datetime_without_milliseconds, unix_time
from .utils import generate_key_id
from collections import defaultdict
from datetime import datetime, timedelta
@@ -37,7 +37,7 @@
"KeyMetadata": {
"AWSAccountId": self.account_id,
"Arn": self.arn,
- "CreationDate": datetime.strftime(datetime.utcnow(), "%s"),
+ "CreationDate": "%d" % unix_time(),
"Description": self.description,
"Enabled": self.enabled,
"KeyId": self.id,
|
{"golden_diff": "diff --git a/moto/kms/models.py b/moto/kms/models.py\n--- a/moto/kms/models.py\n+++ b/moto/kms/models.py\n@@ -3,7 +3,7 @@\n import os\n import boto.kms\n from moto.core import BaseBackend, BaseModel\n-from moto.core.utils import iso_8601_datetime_without_milliseconds\n+from moto.core.utils import iso_8601_datetime_without_milliseconds, unix_time\n from .utils import generate_key_id\n from collections import defaultdict\n from datetime import datetime, timedelta\n@@ -37,7 +37,7 @@\n \"KeyMetadata\": {\n \"AWSAccountId\": self.account_id,\n \"Arn\": self.arn,\n- \"CreationDate\": datetime.strftime(datetime.utcnow(), \"%s\"),\n+ \"CreationDate\": \"%d\" % unix_time(),\n \"Description\": self.description,\n \"Enabled\": self.enabled,\n \"KeyId\": self.id,\n", "issue": "Several KMS operations are broken on Windows in 1.3.8\nThe change from https://github.com/spulec/moto/pull/2073 breaks several KMS operations on Windows because the `%s` format specifier is platform specific and not supported in python.\r\n\r\nThis can be easily fixed by changing the timestamp generation to use:\r\n> `(datetime.utcnow() - datetime(1970, 1, 1)).total_seconds()`\r\n\r\nAn example stack trace of a test that throws is included below.\r\n\r\n```\r\ntests\\test_kms.py:33: in _create_kms_key\r\n key = kms_client.create_key(Policy=\"Policy\", KeyUsage='ENCRYPT_DECRYPT')\r\nwenv\\lib\\site-packages\\botocore\\client.py:357: in _api_call\r\n return self._make_api_call(operation_name, kwargs)\r\nwenv\\lib\\site-packages\\botocore\\client.py:648: in _make_api_call\r\n operation_model, request_dict, request_context)\r\nwenv\\lib\\site-packages\\botocore\\client.py:667: in _make_request\r\n return self._endpoint.make_request(operation_model, request_dict)\r\nwenv\\lib\\site-packages\\botocore\\endpoint.py:102: in make_request\r\n return self._send_request(request_dict, operation_model)\r\nwenv\\lib\\site-packages\\botocore\\endpoint.py:137: in _send_request\r\n success_response, exception):\r\nwenv\\lib\\site-packages\\botocore\\endpoint.py:231: in _needs_retry\r\n caught_exception=caught_exception, request_dict=request_dict)\r\nwenv\\lib\\site-packages\\botocore\\hooks.py:356: in emit\r\n return self._emitter.emit(aliased_event_name, **kwargs)\r\nwenv\\lib\\site-packages\\botocore\\hooks.py:228: in emit\r\n return self._emit(event_name, kwargs)\r\nwenv\\lib\\site-packages\\botocore\\hooks.py:211: in _emit\r\n response = handler(**kwargs)\r\nwenv\\lib\\site-packages\\botocore\\retryhandler.py:183: in __call__\r\n if self._checker(attempts, response, caught_exception):\r\nwenv\\lib\\site-packages\\botocore\\retryhandler.py:251: in __call__\r\n caught_exception)\r\nwenv\\lib\\site-packages\\botocore\\retryhandler.py:269: in _should_retry\r\n return self._checker(attempt_number, response, caught_exception)\r\nwenv\\lib\\site-packages\\botocore\\retryhandler.py:317: in __call__\r\n caught_exception)\r\nwenv\\lib\\site-packages\\botocore\\retryhandler.py:223: in __call__\r\n attempt_number, caught_exception)\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\r\n\r\nself = <botocore.retryhandler.ExceptionRaiser object at 0x0000000011A51A58>\r\nattempt_number = 1, caught_exception = ValueError('Invalid format string',)\r\n\r\n def _check_caught_exception(self, attempt_number, caught_exception):\r\n # This is implementation specific, but this class is useful by\r\n # coordinating with the MaxAttemptsDecorator.\r\n # The MaxAttemptsDecorator has a list of exceptions it should catch\r\n # and retry, but something needs to come along and actually raise the\r\n # caught_exception. That's what this class is being used for. If\r\n # the MaxAttemptsDecorator is not interested in retrying the exception\r\n # then this exception just propogates out past the retry code.\r\n print(dir(caught_exception))\r\n> raise caught_exception\r\nE ValueError: Invalid format string\r\n```\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport os\nimport boto.kms\nfrom moto.core import BaseBackend, BaseModel\nfrom moto.core.utils import iso_8601_datetime_without_milliseconds\nfrom .utils import generate_key_id\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\n\n\nclass Key(BaseModel):\n\n def __init__(self, policy, key_usage, description, region):\n self.id = generate_key_id()\n self.policy = policy\n self.key_usage = key_usage\n self.key_state = \"Enabled\"\n self.description = description\n self.enabled = True\n self.region = region\n self.account_id = \"0123456789012\"\n self.key_rotation_status = False\n self.deletion_date = None\n self.tags = {}\n\n @property\n def physical_resource_id(self):\n return self.id\n\n @property\n def arn(self):\n return \"arn:aws:kms:{0}:{1}:key/{2}\".format(self.region, self.account_id, self.id)\n\n def to_dict(self):\n key_dict = {\n \"KeyMetadata\": {\n \"AWSAccountId\": self.account_id,\n \"Arn\": self.arn,\n \"CreationDate\": datetime.strftime(datetime.utcnow(), \"%s\"),\n \"Description\": self.description,\n \"Enabled\": self.enabled,\n \"KeyId\": self.id,\n \"KeyUsage\": self.key_usage,\n \"KeyState\": self.key_state,\n }\n }\n if self.key_state == 'PendingDeletion':\n key_dict['KeyMetadata']['DeletionDate'] = iso_8601_datetime_without_milliseconds(self.deletion_date)\n return key_dict\n\n def delete(self, region_name):\n kms_backends[region_name].delete_key(self.id)\n\n @classmethod\n def create_from_cloudformation_json(self, resource_name, cloudformation_json, region_name):\n kms_backend = kms_backends[region_name]\n properties = cloudformation_json['Properties']\n\n key = kms_backend.create_key(\n policy=properties['KeyPolicy'],\n key_usage='ENCRYPT_DECRYPT',\n description=properties['Description'],\n region=region_name,\n )\n key.key_rotation_status = properties['EnableKeyRotation']\n key.enabled = properties['Enabled']\n return key\n\n def get_cfn_attribute(self, attribute_name):\n from moto.cloudformation.exceptions import UnformattedGetAttTemplateException\n if attribute_name == 'Arn':\n return self.arn\n raise UnformattedGetAttTemplateException()\n\n\nclass KmsBackend(BaseBackend):\n\n def __init__(self):\n self.keys = {}\n self.key_to_aliases = defaultdict(set)\n\n def create_key(self, policy, key_usage, description, region):\n key = Key(policy, key_usage, description, region)\n self.keys[key.id] = key\n return key\n\n def update_key_description(self, key_id, description):\n key = self.keys[self.get_key_id(key_id)]\n key.description = description\n\n def tag_resource(self, key_id, tags):\n key = self.keys[self.get_key_id(key_id)]\n key.tags = tags\n\n def list_resource_tags(self, key_id):\n key = self.keys[self.get_key_id(key_id)]\n return key.tags\n\n def delete_key(self, key_id):\n if key_id in self.keys:\n if key_id in self.key_to_aliases:\n self.key_to_aliases.pop(key_id)\n\n return self.keys.pop(key_id)\n\n def describe_key(self, key_id):\n # allow the different methods (alias, ARN :key/, keyId, ARN alias) to\n # describe key not just KeyId\n key_id = self.get_key_id(key_id)\n if r'alias/' in str(key_id).lower():\n key_id = self.get_key_id_from_alias(key_id.split('alias/')[1])\n return self.keys[self.get_key_id(key_id)]\n\n def list_keys(self):\n return self.keys.values()\n\n def get_key_id(self, key_id):\n # Allow use of ARN as well as pure KeyId\n return str(key_id).split(r':key/')[1] if r':key/' in str(key_id).lower() else key_id\n\n def alias_exists(self, alias_name):\n for aliases in self.key_to_aliases.values():\n if alias_name in aliases:\n return True\n\n return False\n\n def add_alias(self, target_key_id, alias_name):\n self.key_to_aliases[target_key_id].add(alias_name)\n\n def delete_alias(self, alias_name):\n \"\"\"Delete the alias.\"\"\"\n for aliases in self.key_to_aliases.values():\n if alias_name in aliases:\n aliases.remove(alias_name)\n\n def get_all_aliases(self):\n return self.key_to_aliases\n\n def get_key_id_from_alias(self, alias_name):\n for key_id, aliases in dict(self.key_to_aliases).items():\n if alias_name in \",\".join(aliases):\n return key_id\n return None\n\n def enable_key_rotation(self, key_id):\n self.keys[self.get_key_id(key_id)].key_rotation_status = True\n\n def disable_key_rotation(self, key_id):\n self.keys[self.get_key_id(key_id)].key_rotation_status = False\n\n def get_key_rotation_status(self, key_id):\n return self.keys[self.get_key_id(key_id)].key_rotation_status\n\n def put_key_policy(self, key_id, policy):\n self.keys[self.get_key_id(key_id)].policy = policy\n\n def get_key_policy(self, key_id):\n return self.keys[self.get_key_id(key_id)].policy\n\n def disable_key(self, key_id):\n self.keys[key_id].enabled = False\n self.keys[key_id].key_state = 'Disabled'\n\n def enable_key(self, key_id):\n self.keys[key_id].enabled = True\n self.keys[key_id].key_state = 'Enabled'\n\n def cancel_key_deletion(self, key_id):\n self.keys[key_id].key_state = 'Disabled'\n self.keys[key_id].deletion_date = None\n\n def schedule_key_deletion(self, key_id, pending_window_in_days):\n if 7 <= pending_window_in_days <= 30:\n self.keys[key_id].enabled = False\n self.keys[key_id].key_state = 'PendingDeletion'\n self.keys[key_id].deletion_date = datetime.now() + timedelta(days=pending_window_in_days)\n return iso_8601_datetime_without_milliseconds(self.keys[key_id].deletion_date)\n\n def generate_data_key(self, key_id, encryption_context, number_of_bytes, key_spec, grant_tokens):\n key = self.keys[self.get_key_id(key_id)]\n\n if key_spec:\n if key_spec == 'AES_128':\n bytes = 16\n else:\n bytes = 32\n else:\n bytes = number_of_bytes\n\n plaintext = os.urandom(bytes)\n\n return plaintext, key.arn\n\n\nkms_backends = {}\nfor region in boto.kms.regions():\n kms_backends[region.name] = KmsBackend()\n", "path": "moto/kms/models.py"}]}
| 3,417 | 202 |
gh_patches_debug_35238
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-5779
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Plotly charts ignore HEIGHT attribute after bug fix #5645
### Checklist
- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.
- [X] I added a very descriptive title to this issue.
- [X] I have provided sufficient information below to help reproduce this issue.
### Summary
When defining a chart with Plotly Express, you can provide a HEIGHT attribute defining.
Before bugfix #5645 this worked, but after this bugfix now the HEIGHT is ignored.
### Reproducible Code Example
[](https://issues.streamlitapp.com/?issue=gh-5749)
```Python
import streamlit as st
import pandas as pd
import plotly.express as px
data = pd.DataFrame((100,120,104,102,203,102),columns=["some_col"])
fig = px.line(data, height=100, width=200)
fig.update_xaxes(visible=False, fixedrange=True)
fig.update_yaxes(visible=False, fixedrange=True)
fig.update_layout(annotations=[], overwrite=True)
fig.update_layout(
showlegend=False,
plot_bgcolor="white",
margin=dict(t=10,l=10,b=10,r=10)
)
st.plotly_chart(fig,config=dict(displayModeBar=False))
```
### Steps To Reproduce
_No response_
### Expected Behavior
_No response_
### Current Behavior
_No response_
### Is this a regression?
- [X] Yes, this used to work in a previous version.
### Debug info
- Streamlit version: 1.15.
- Python version: 3.10
- Operating System: Win 10
- Browser: Edge
- Virtual environment:
### Additional Information
_No response_
### Are you willing to submit a PR?
- [ ] Yes, I am willing to submit a PR!
</issue>
<code>
[start of e2e/scripts/st_plotly_chart.py]
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from datetime import datetime
16
17 import numpy as np
18 import plotly.express as px
19 import plotly.figure_factory as ff
20 import plotly.graph_objects as go
21 from plotly.subplots import make_subplots
22
23 import streamlit as st
24
25 # Explicitly seed the RNG for deterministic results
26 np.random.seed(0)
27
28 df_bubble = px.data.gapminder()
29 fig_bubble = px.scatter(
30 df_bubble.query("year==2007"),
31 x="gdpPercap",
32 y="lifeExp",
33 size="pop",
34 color="continent",
35 hover_name="country",
36 log_x=True,
37 size_max=60,
38 )
39
40 # tests no streamlit theme plot
41 st.plotly_chart(fig_bubble, theme=None)
42
43 # Bubble Chart
44 # Tests Discrete coloring with streamlit theme
45 st.plotly_chart(fig_bubble, theme="streamlit")
46
47 # Candlestick Chart
48 open_data_candlestick = [33.0, 33.3, 33.5, 33.0, 34.1]
49 high_data_candlestick = [33.1, 33.3, 33.6, 33.2, 34.8]
50 low_data_candlestick = [32.7, 32.7, 32.8, 32.6, 32.8]
51 close_data_candlestick = [33.0, 32.9, 33.3, 33.1, 33.1]
52 dates_candlestick = [
53 datetime(year=2013, month=10, day=10),
54 datetime(year=2013, month=11, day=10),
55 datetime(year=2013, month=12, day=10),
56 datetime(year=2014, month=1, day=10),
57 datetime(year=2014, month=2, day=10),
58 ]
59 fig_candlestick = go.Figure(
60 data=[
61 go.Candlestick(
62 x=dates_candlestick,
63 open=open_data_candlestick,
64 high=high_data_candlestick,
65 low=low_data_candlestick,
66 close=close_data_candlestick,
67 )
68 ]
69 )
70 st.plotly_chart(fig_candlestick, theme="streamlit")
71
72 # Tests sunburst charts and color parameter using streamlit colors
73 df = px.data.tips()
74 fig_sunburst = px.sunburst(
75 df, path=["sex", "day", "time"], values="total_bill", color="day"
76 )
77 st.plotly_chart(fig_sunburst, theme="streamlit")
78
79 # Contour Plot and Heatmap
80 fig = make_subplots(
81 rows=2, cols=2, subplot_titles=("connectgaps = False", "connectgaps = True")
82 )
83 z = [
84 [None, None, None, 12, 13, 14, 15, 16],
85 [None, 1, None, 11, None, None, None, 17],
86 [None, 2, 6, 7, None, None, None, 18],
87 [None, 3, None, 8, None, None, None, 19],
88 [5, 4, 10, 9, None, None, None, 20],
89 [None, None, None, 27, None, None, None, 21],
90 [None, None, None, 26, 25, 24, 23, 22],
91 ]
92
93 fig.add_trace(go.Contour(z=z, showscale=False), 1, 1)
94 fig.add_trace(go.Contour(z=z, showscale=False, connectgaps=True), 1, 2)
95 fig.add_trace(go.Heatmap(z=z, showscale=False, zsmooth="best"), 2, 1)
96 fig.add_trace(go.Heatmap(z=z, showscale=False, connectgaps=True, zsmooth="best"), 2, 2)
97
98 fig["layout"]["yaxis1"].update(title="Contour map")
99 fig["layout"]["yaxis3"].update(title="Heatmap")
100
101 st.plotly_chart(fig, theme="streamlit")
102
103 # Waterfall Chart
104 fig_waterfall = go.Figure(
105 go.Waterfall(
106 name="20",
107 orientation="v",
108 measure=["relative", "relative", "total", "relative", "relative", "total"],
109 x=[
110 "Sales",
111 "Consulting",
112 "Net revenue",
113 "Purchases",
114 "Other expenses",
115 "Profit before tax",
116 ],
117 textposition="outside",
118 text=["+60", "+80", "", "-40", "-20", "Total"],
119 y=[60, 80, 0, -40, -20, 0],
120 connector={"line": {"color": "rgb(63, 63, 63)"}},
121 )
122 )
123
124 fig_waterfall.update_layout(title="Profit and loss statement 2018", showlegend=True)
125 st.plotly_chart(fig_waterfall, theme="streamlit")
126
127 # Ternary Chart
128 df = px.data.election()
129 fig_ternary = px.scatter_ternary(df, a="Joly", b="Coderre", c="Bergeron")
130
131 st.plotly_chart(fig_ternary, theme="streamlit")
132
133 # Table Plot
134 fig_table = go.Figure(
135 data=[
136 go.Table(
137 header=dict(values=["A Scores", "B Scores"]),
138 cells=dict(values=[[100, 90, 80, 90], [95, 85, 75, 95]]),
139 )
140 ]
141 )
142 st.plotly_chart(fig_table, theme="streamlit")
143
144 # Continuous Customization Chart with plotly.go graph
145 fig_contour = go.Figure(
146 data=go.Contour(
147 z=[
148 [10, 10.625, 12.5, 15.625, 20],
149 [5.625, 6.25, 8.125, 11.25, 15.625],
150 [2.5, 3.125, 5.0, 8.125, 12.5],
151 [0.625, 1.25, 3.125, 6.25, 10.625],
152 [0, 0.625, 2.5, 5.625, 10],
153 ],
154 colorscale="Electric",
155 )
156 )
157 st.plotly_chart(fig_contour, theme="streamlit")
158
159 # Discrete Customization Chart
160 df = px.data.wind()
161 fig = px.scatter_polar(
162 df,
163 r="frequency",
164 theta="direction",
165 color="strength",
166 symbol="strength",
167 size="frequency",
168 color_discrete_sequence=px.colors.sequential.Plasma_r,
169 )
170 st.plotly_chart(fig, theme="streamlit")
171
172 # Layout Customization Chart
173 import plotly.graph_objects as go
174
175 fig = go.Figure(
176 go.Sunburst(
177 labels=[
178 "Eve",
179 "Cain",
180 "Seth",
181 "Enos",
182 "Noam",
183 "Abel",
184 "Awan",
185 "Enoch",
186 "Azura",
187 ],
188 parents=["", "Eve", "Eve", "Seth", "Seth", "Eve", "Eve", "Awan", "Eve"],
189 values=[65, 14, 12, 10, 2, 6, 6, 4, 4],
190 branchvalues="total",
191 )
192 )
193 fig.update_layout(margin=dict(t=10, l=100, r=100, b=110))
194 st.plotly_chart(fig, theme="streamlit")
195
196 # Separate template Customization Chart
197 df = px.data.gapminder().query("country == 'Canada'")
198 fig = px.bar(
199 df,
200 x="year",
201 y="pop",
202 hover_data=["lifeExp", "gdpPercap"],
203 color="lifeExp",
204 template="plotly",
205 labels={"pop": "population of Canada"},
206 height=400,
207 )
208
209 st.plotly_chart(fig, theme="streamlit")
210
211 # Histogram chart
212 df = px.data.tips()
213
214 fig = px.density_heatmap(df, x="total_bill", y="tip")
215 st.plotly_chart(fig, theme="streamlit")
216
[end of e2e/scripts/st_plotly_chart.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/e2e/scripts/st_plotly_chart.py b/e2e/scripts/st_plotly_chart.py
--- a/e2e/scripts/st_plotly_chart.py
+++ b/e2e/scripts/st_plotly_chart.py
@@ -15,8 +15,8 @@
from datetime import datetime
import numpy as np
+import pandas as pd
import plotly.express as px
-import plotly.figure_factory as ff
import plotly.graph_objects as go
from plotly.subplots import make_subplots
@@ -42,7 +42,9 @@
# Bubble Chart
# Tests Discrete coloring with streamlit theme
-st.plotly_chart(fig_bubble, theme="streamlit")
+# uses container width when use_container_width flag is True
+fig_bubble.update_layout(height=300, width=300)
+st.plotly_chart(fig_bubble, use_container_width=True, theme="streamlit")
# Candlestick Chart
open_data_candlestick = [33.0, 33.3, 33.5, 33.0, 34.1]
@@ -121,8 +123,11 @@
)
)
-fig_waterfall.update_layout(title="Profit and loss statement 2018", showlegend=True)
-st.plotly_chart(fig_waterfall, theme="streamlit")
+fig_waterfall.update_layout(
+ title="Profit and loss statement 2018", height=300, width=300, showlegend=True
+)
+# uses figure height and width when use_container_width is False
+st.plotly_chart(fig_waterfall, use_container_width=False, theme="streamlit")
# Ternary Chart
df = px.data.election()
@@ -170,8 +175,6 @@
st.plotly_chart(fig, theme="streamlit")
# Layout Customization Chart
-import plotly.graph_objects as go
-
fig = go.Figure(
go.Sunburst(
labels=[
@@ -213,3 +216,19 @@
fig = px.density_heatmap(df, x="total_bill", y="tip")
st.plotly_chart(fig, theme="streamlit")
+
+data = pd.DataFrame((100, 120, 104, 102, 203, 102), columns=["some_col"])
+
+fig = px.line(data, height=100, width=300)
+fig.update_xaxes(visible=False, fixedrange=True)
+fig.update_yaxes(visible=False, fixedrange=True)
+fig.update_layout(annotations=[], overwrite=True)
+fig.update_layout(showlegend=False, margin=dict(t=10, l=10, b=10, r=10))
+
+# uses figure height and width when use_container_width is False
+st.plotly_chart(
+ fig, config=dict(displayModeBar=False), use_container_width=False, theme=None
+)
+
+# uses container width when use_container_width flag is True
+st.plotly_chart(fig, use_container_width=True, theme=None)
|
{"golden_diff": "diff --git a/e2e/scripts/st_plotly_chart.py b/e2e/scripts/st_plotly_chart.py\n--- a/e2e/scripts/st_plotly_chart.py\n+++ b/e2e/scripts/st_plotly_chart.py\n@@ -15,8 +15,8 @@\n from datetime import datetime\n \n import numpy as np\n+import pandas as pd\n import plotly.express as px\n-import plotly.figure_factory as ff\n import plotly.graph_objects as go\n from plotly.subplots import make_subplots\n \n@@ -42,7 +42,9 @@\n \n # Bubble Chart\n # Tests Discrete coloring with streamlit theme\n-st.plotly_chart(fig_bubble, theme=\"streamlit\")\n+# uses container width when use_container_width flag is True\n+fig_bubble.update_layout(height=300, width=300)\n+st.plotly_chart(fig_bubble, use_container_width=True, theme=\"streamlit\")\n \n # Candlestick Chart\n open_data_candlestick = [33.0, 33.3, 33.5, 33.0, 34.1]\n@@ -121,8 +123,11 @@\n )\n )\n \n-fig_waterfall.update_layout(title=\"Profit and loss statement 2018\", showlegend=True)\n-st.plotly_chart(fig_waterfall, theme=\"streamlit\")\n+fig_waterfall.update_layout(\n+ title=\"Profit and loss statement 2018\", height=300, width=300, showlegend=True\n+)\n+# uses figure height and width when use_container_width is False\n+st.plotly_chart(fig_waterfall, use_container_width=False, theme=\"streamlit\")\n \n # Ternary Chart\n df = px.data.election()\n@@ -170,8 +175,6 @@\n st.plotly_chart(fig, theme=\"streamlit\")\n \n # Layout Customization Chart\n-import plotly.graph_objects as go\n-\n fig = go.Figure(\n go.Sunburst(\n labels=[\n@@ -213,3 +216,19 @@\n \n fig = px.density_heatmap(df, x=\"total_bill\", y=\"tip\")\n st.plotly_chart(fig, theme=\"streamlit\")\n+\n+data = pd.DataFrame((100, 120, 104, 102, 203, 102), columns=[\"some_col\"])\n+\n+fig = px.line(data, height=100, width=300)\n+fig.update_xaxes(visible=False, fixedrange=True)\n+fig.update_yaxes(visible=False, fixedrange=True)\n+fig.update_layout(annotations=[], overwrite=True)\n+fig.update_layout(showlegend=False, margin=dict(t=10, l=10, b=10, r=10))\n+\n+# uses figure height and width when use_container_width is False\n+st.plotly_chart(\n+ fig, config=dict(displayModeBar=False), use_container_width=False, theme=None\n+)\n+\n+# uses container width when use_container_width flag is True\n+st.plotly_chart(fig, use_container_width=True, theme=None)\n", "issue": "Plotly charts ignore HEIGHT attribute after bug fix #5645\n### Checklist\r\n\r\n- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\r\n- [X] I added a very descriptive title to this issue.\r\n- [X] I have provided sufficient information below to help reproduce this issue.\r\n\r\n### Summary\r\n\r\nWhen defining a chart with Plotly Express, you can provide a HEIGHT attribute defining.\r\nBefore bugfix #5645 this worked, but after this bugfix now the HEIGHT is ignored.\r\n\r\n\r\n### Reproducible Code Example\r\n\r\n[](https://issues.streamlitapp.com/?issue=gh-5749)\r\n\r\n```Python\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport plotly.express as px\r\n\r\ndata = pd.DataFrame((100,120,104,102,203,102),columns=[\"some_col\"])\r\n\r\nfig = px.line(data, height=100, width=200)\r\nfig.update_xaxes(visible=False, fixedrange=True)\r\nfig.update_yaxes(visible=False, fixedrange=True)\r\nfig.update_layout(annotations=[], overwrite=True)\r\nfig.update_layout(\r\n showlegend=False,\r\n plot_bgcolor=\"white\",\r\n margin=dict(t=10,l=10,b=10,r=10)\r\n )\r\nst.plotly_chart(fig,config=dict(displayModeBar=False))\r\n```\r\n\r\n\r\n### Steps To Reproduce\r\n\r\n_No response_\r\n\r\n### Expected Behavior\r\n\r\n_No response_\r\n\r\n### Current Behavior\r\n\r\n_No response_\r\n\r\n### Is this a regression?\r\n\r\n- [X] Yes, this used to work in a previous version.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 1.15.\r\n- Python version: 3.10\r\n- Operating System: Win 10\r\n- Browser: Edge\r\n- Virtual environment:\r\n\r\n\r\n### Additional Information\r\n\r\n_No response_\r\n\r\n### Are you willing to submit a PR?\r\n\r\n- [ ] Yes, I am willing to submit a PR!\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import datetime\n\nimport numpy as np\nimport plotly.express as px\nimport plotly.figure_factory as ff\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nimport streamlit as st\n\n# Explicitly seed the RNG for deterministic results\nnp.random.seed(0)\n\ndf_bubble = px.data.gapminder()\nfig_bubble = px.scatter(\n df_bubble.query(\"year==2007\"),\n x=\"gdpPercap\",\n y=\"lifeExp\",\n size=\"pop\",\n color=\"continent\",\n hover_name=\"country\",\n log_x=True,\n size_max=60,\n)\n\n# tests no streamlit theme plot\nst.plotly_chart(fig_bubble, theme=None)\n\n# Bubble Chart\n# Tests Discrete coloring with streamlit theme\nst.plotly_chart(fig_bubble, theme=\"streamlit\")\n\n# Candlestick Chart\nopen_data_candlestick = [33.0, 33.3, 33.5, 33.0, 34.1]\nhigh_data_candlestick = [33.1, 33.3, 33.6, 33.2, 34.8]\nlow_data_candlestick = [32.7, 32.7, 32.8, 32.6, 32.8]\nclose_data_candlestick = [33.0, 32.9, 33.3, 33.1, 33.1]\ndates_candlestick = [\n datetime(year=2013, month=10, day=10),\n datetime(year=2013, month=11, day=10),\n datetime(year=2013, month=12, day=10),\n datetime(year=2014, month=1, day=10),\n datetime(year=2014, month=2, day=10),\n]\nfig_candlestick = go.Figure(\n data=[\n go.Candlestick(\n x=dates_candlestick,\n open=open_data_candlestick,\n high=high_data_candlestick,\n low=low_data_candlestick,\n close=close_data_candlestick,\n )\n ]\n)\nst.plotly_chart(fig_candlestick, theme=\"streamlit\")\n\n# Tests sunburst charts and color parameter using streamlit colors\ndf = px.data.tips()\nfig_sunburst = px.sunburst(\n df, path=[\"sex\", \"day\", \"time\"], values=\"total_bill\", color=\"day\"\n)\nst.plotly_chart(fig_sunburst, theme=\"streamlit\")\n\n# Contour Plot and Heatmap\nfig = make_subplots(\n rows=2, cols=2, subplot_titles=(\"connectgaps = False\", \"connectgaps = True\")\n)\nz = [\n [None, None, None, 12, 13, 14, 15, 16],\n [None, 1, None, 11, None, None, None, 17],\n [None, 2, 6, 7, None, None, None, 18],\n [None, 3, None, 8, None, None, None, 19],\n [5, 4, 10, 9, None, None, None, 20],\n [None, None, None, 27, None, None, None, 21],\n [None, None, None, 26, 25, 24, 23, 22],\n]\n\nfig.add_trace(go.Contour(z=z, showscale=False), 1, 1)\nfig.add_trace(go.Contour(z=z, showscale=False, connectgaps=True), 1, 2)\nfig.add_trace(go.Heatmap(z=z, showscale=False, zsmooth=\"best\"), 2, 1)\nfig.add_trace(go.Heatmap(z=z, showscale=False, connectgaps=True, zsmooth=\"best\"), 2, 2)\n\nfig[\"layout\"][\"yaxis1\"].update(title=\"Contour map\")\nfig[\"layout\"][\"yaxis3\"].update(title=\"Heatmap\")\n\nst.plotly_chart(fig, theme=\"streamlit\")\n\n# Waterfall Chart\nfig_waterfall = go.Figure(\n go.Waterfall(\n name=\"20\",\n orientation=\"v\",\n measure=[\"relative\", \"relative\", \"total\", \"relative\", \"relative\", \"total\"],\n x=[\n \"Sales\",\n \"Consulting\",\n \"Net revenue\",\n \"Purchases\",\n \"Other expenses\",\n \"Profit before tax\",\n ],\n textposition=\"outside\",\n text=[\"+60\", \"+80\", \"\", \"-40\", \"-20\", \"Total\"],\n y=[60, 80, 0, -40, -20, 0],\n connector={\"line\": {\"color\": \"rgb(63, 63, 63)\"}},\n )\n)\n\nfig_waterfall.update_layout(title=\"Profit and loss statement 2018\", showlegend=True)\nst.plotly_chart(fig_waterfall, theme=\"streamlit\")\n\n# Ternary Chart\ndf = px.data.election()\nfig_ternary = px.scatter_ternary(df, a=\"Joly\", b=\"Coderre\", c=\"Bergeron\")\n\nst.plotly_chart(fig_ternary, theme=\"streamlit\")\n\n# Table Plot\nfig_table = go.Figure(\n data=[\n go.Table(\n header=dict(values=[\"A Scores\", \"B Scores\"]),\n cells=dict(values=[[100, 90, 80, 90], [95, 85, 75, 95]]),\n )\n ]\n)\nst.plotly_chart(fig_table, theme=\"streamlit\")\n\n# Continuous Customization Chart with plotly.go graph\nfig_contour = go.Figure(\n data=go.Contour(\n z=[\n [10, 10.625, 12.5, 15.625, 20],\n [5.625, 6.25, 8.125, 11.25, 15.625],\n [2.5, 3.125, 5.0, 8.125, 12.5],\n [0.625, 1.25, 3.125, 6.25, 10.625],\n [0, 0.625, 2.5, 5.625, 10],\n ],\n colorscale=\"Electric\",\n )\n)\nst.plotly_chart(fig_contour, theme=\"streamlit\")\n\n# Discrete Customization Chart\ndf = px.data.wind()\nfig = px.scatter_polar(\n df,\n r=\"frequency\",\n theta=\"direction\",\n color=\"strength\",\n symbol=\"strength\",\n size=\"frequency\",\n color_discrete_sequence=px.colors.sequential.Plasma_r,\n)\nst.plotly_chart(fig, theme=\"streamlit\")\n\n# Layout Customization Chart\nimport plotly.graph_objects as go\n\nfig = go.Figure(\n go.Sunburst(\n labels=[\n \"Eve\",\n \"Cain\",\n \"Seth\",\n \"Enos\",\n \"Noam\",\n \"Abel\",\n \"Awan\",\n \"Enoch\",\n \"Azura\",\n ],\n parents=[\"\", \"Eve\", \"Eve\", \"Seth\", \"Seth\", \"Eve\", \"Eve\", \"Awan\", \"Eve\"],\n values=[65, 14, 12, 10, 2, 6, 6, 4, 4],\n branchvalues=\"total\",\n )\n)\nfig.update_layout(margin=dict(t=10, l=100, r=100, b=110))\nst.plotly_chart(fig, theme=\"streamlit\")\n\n# Separate template Customization Chart\ndf = px.data.gapminder().query(\"country == 'Canada'\")\nfig = px.bar(\n df,\n x=\"year\",\n y=\"pop\",\n hover_data=[\"lifeExp\", \"gdpPercap\"],\n color=\"lifeExp\",\n template=\"plotly\",\n labels={\"pop\": \"population of Canada\"},\n height=400,\n)\n\nst.plotly_chart(fig, theme=\"streamlit\")\n\n# Histogram chart\ndf = px.data.tips()\n\nfig = px.density_heatmap(df, x=\"total_bill\", y=\"tip\")\nst.plotly_chart(fig, theme=\"streamlit\")\n", "path": "e2e/scripts/st_plotly_chart.py"}]}
| 3,592 | 674 |
gh_patches_debug_5565
|
rasdani/github-patches
|
git_diff
|
microsoft__ptvsd-1079
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Jython - Internal error detected.
@kostermw commented on [Sat Dec 15 2018](https://github.com/Microsoft/vscode-python/issues/3715)
<!--
Do you have a question instead of a bug report or enhancement request? Please ask it on https://stackoverflow.com/questions/tagged/visual-studio-code+python.
Unable to install a linter or formatter? 'No installers available'?
Windows - https://stackoverflow.com/questions/4750806/how-do-i-install-pip-on-windows
Linux - https://www.cyberciti.biz/faq/debian-ubuntu-centos-rhel-linux-install-pipclient/ , https://www.tecmint.com/install-pip-in-linux/
Python configuration issues? Please check https://code.visualstudio.com/docs/python/python-tutorial#_prerequisites
Otherwise **please** fill in the requested details below. "XXX" markers should not be present in the final bug report.
If you think a GIF of what is happening would be helpful, consider tools like https://www.cockos.com/licecap/, https://github.com/phw/peek or https://www.screentogif.com/ .
-->
## Environment data
- VS Code version: 1.30.0
- Extension version (available under the Extensions sidebar): 2018.12.1
- OS and version: W10 Pro 1803 build 17134.471
- Python version (& distribution if applicable, e.g. Anaconda): Jython 2.7.0
- Type of virtual environment used (N/A | venv | virtualenv | conda | ...): N/A
- Relevant/affected Python packages and their versions: XXX
## Expected behaviour
XXX
## Actual behaviour
XXX
## Steps to reproduce:
1. debugging hello world example
## Logs
Output for `Python` in the `Output` panel (`View`→`Output`, change the drop-down the upper-right of the `Output` panel to `Python`)
```
XXX
```
Output from `Console` under the `Developer Tools` panel (toggle Developer Tools on under `Help`)
```
PS D:\Users\koste\vs\hello> cd 'd:\Users\koste\vs\hello'; ${env:PYTHONIOENCODING}='UTF-8'; ${env:PYTHONUNBUFFERED}='1'; & 'C:\jython2.7.0\bin\jython.exe' 'd:\Users\koste\.vscode\extensions\ms-python.python-2018.12.1\pythonFiles\ptvsd_launcher.py' '--default' '--client' '--host' 'localhost' '--port' '51023' 'd:\Users\koste\vs\hello\hello.py'
Traceback (most recent call last):
File "d:\Users\koste\.vscode\extensions\ms-python.python-2018.12.1\pythonFiles\ptvsd_launcher.py", line 21, in <module>
import ptvsd
File "d:\Users\koste\.vscode\extensions\ms-python.python-2018.12.1\pythonFiles\ptvsd_launcher.py", line 21, in <module>
import ptvsd
File "d:\Users\koste\.vscode\extensions\ms-python.python-2018.12.1\pythonFiles\lib\python\ptvsd\__init__.py", line 15, in <module>
from ptvsd.attach_server import (
File "d:\Users\koste\.vscode\extensions\ms-python.python-2018.12.1\pythonFiles\lib\python\ptvsd\attach_server.py", line 5, in <module>
from ptvsd._remote import (
File "d:\Users\koste\.vscode\extensions\ms-python.python-2018.12.1\pythonFiles\lib\python\ptvsd\_remote.py", line 11, in <module>
from ptvsd.pydevd_hooks import install
File "d:\Users\koste\.vscode\extensions\ms-python.python-2018.12.1\pythonFiles\lib\python\ptvsd\pydevd_hooks.py", line 15, in <module>
from ptvsd.daemon import Daemon, DaemonStoppedError, DaemonClosedError
File "d:\Users\koste\.vscode\extensions\ms-python.python-2018.12.1\pythonFiles\lib\python\ptvsd\daemon.py", line 12, in <module>
from .exit_handlers import (
File "d:\Users\koste\.vscode\extensions\ms-python.python-2018.12.1\pythonFiles\lib\python\ptvsd\exit_handlers.py", line 28, in <module>
class ExitHandlers(object):
File "d:\Users\koste\.vscode\extensions\ms-python.python-2018.12.1\pythonFiles\lib\python\ptvsd\exit_handlers.py", line 36, in ExitHandlers
SIGNALS = [
AttributeError: 'module' object has no attribute 'SIGHUP'
Internal error detected. Please copy the above traceback and report at
https://github.com/Microsoft/vscode-python/issues/new
```
---
@kostermw commented on [Sat Dec 15 2018](https://github.com/Microsoft/vscode-python/issues/3715#issuecomment-447572774)
Reported as requested
</issue>
<code>
[start of src/ptvsd/exit_handlers.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import atexit
6 import os
7 import platform
8 import signal
9
10
11 class AlreadyInstalledError(RuntimeError):
12 """Exit handlers were already installed."""
13
14
15 class UnsupportedSignalError(RuntimeError):
16 """A signal is not supported."""
17
18
19 def kill_current_proc(signum=signal.SIGTERM):
20 """Kill the current process.
21
22 Note that this directly kills the process (with SIGTERM, by default)
23 rather than using sys.exit().
24 """
25 os.kill(os.getpid(), signum)
26
27
28 class ExitHandlers(object):
29 """Manages signal and atexit handlers."""
30
31 if platform.system() == 'Windows':
32 # TODO: Windows *does* support these signals:
33 # SIGABRT, SIGFPE, SIGILL, SIGINT, SIGSEGV, SIGTERM, SIGBREAK
34 SIGNALS = []
35 else:
36 SIGNALS = [
37 signal.SIGHUP,
38 ]
39
40 def __init__(self):
41 self._signal_handlers = {sig: []
42 for sig in self.SIGNALS}
43 self._atexit_handlers = []
44 self._installed = False
45
46 @property
47 def supported_signals(self):
48 return set(self.SIGNALS)
49
50 @property
51 def installed(self):
52 return self._installed
53
54 def install(self):
55 """Set the parent handlers.
56
57 This must be called in the main thread.
58 """
59 if self._installed:
60 raise AlreadyInstalledError('exit handlers already installed')
61 self._installed = True
62 self._install_signal_handler()
63 self._install_atexit_handler()
64
65 # TODO: Add uninstall()?
66
67 def add_atexit_handler(self, handle_atexit, nodupe=True):
68 """Add an atexit handler to the list managed here."""
69 if nodupe and handle_atexit in self._atexit_handlers:
70 raise ValueError('atexit handler alraedy added')
71 self._atexit_handlers.append(handle_atexit)
72
73 def add_signal_handler(self, signum, handle_signal, nodupe=True,
74 ignoreunsupported=False):
75 """Add a signal handler to the list managed here."""
76 # TODO: The initialization of self.SIGNALS should make this
77 # special-casing unnecessary.
78 if platform.system() == 'Windows':
79 return
80
81 try:
82 handlers = self._signal_handlers[signum]
83 except KeyError:
84 if ignoreunsupported:
85 return
86 raise UnsupportedSignalError(signum)
87 if nodupe and handle_signal in handlers:
88 raise ValueError('signal handler alraedy added')
89 handlers.append(handle_signal)
90
91 # internal methods
92
93 def _install_signal_handler(self):
94 # TODO: The initialization of self.SIGNALS should make this
95 # special-casing unnecessary.
96 if platform.system() == 'Windows':
97 return
98
99 orig = {}
100 try:
101 for sig in self._signal_handlers:
102 # TODO: Skip or fail if signal.getsignal() returns None?
103 orig[sig] = signal.signal(sig, self._signal_handler)
104 except ValueError:
105 # Wasn't called in main thread!
106 raise
107
108 def _signal_handler(self, signum, frame):
109 for handle_signal in self._signal_handlers.get(signum, ()):
110 handle_signal(signum, frame)
111
112 def _install_atexit_handler(self):
113 self._atexit_handlers = []
114 atexit.register(self._atexit_handler)
115
116 def _atexit_handler(self):
117 for handle_atexit in self._atexit_handlers:
118 handle_atexit()
119
[end of src/ptvsd/exit_handlers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/ptvsd/exit_handlers.py b/src/ptvsd/exit_handlers.py
--- a/src/ptvsd/exit_handlers.py
+++ b/src/ptvsd/exit_handlers.py
@@ -28,7 +28,7 @@
class ExitHandlers(object):
"""Manages signal and atexit handlers."""
- if platform.system() == 'Windows':
+ if platform.system() == 'Windows' or not hasattr(signal, 'SIGHUP'):
# TODO: Windows *does* support these signals:
# SIGABRT, SIGFPE, SIGILL, SIGINT, SIGSEGV, SIGTERM, SIGBREAK
SIGNALS = []
|
{"golden_diff": "diff --git a/src/ptvsd/exit_handlers.py b/src/ptvsd/exit_handlers.py\n--- a/src/ptvsd/exit_handlers.py\n+++ b/src/ptvsd/exit_handlers.py\n@@ -28,7 +28,7 @@\n class ExitHandlers(object):\n \"\"\"Manages signal and atexit handlers.\"\"\"\n \n- if platform.system() == 'Windows':\n+ if platform.system() == 'Windows' or not hasattr(signal, 'SIGHUP'):\n # TODO: Windows *does* support these signals:\n # SIGABRT, SIGFPE, SIGILL, SIGINT, SIGSEGV, SIGTERM, SIGBREAK\n SIGNALS = []\n", "issue": "Jython - Internal error detected.\n@kostermw commented on [Sat Dec 15 2018](https://github.com/Microsoft/vscode-python/issues/3715)\n\n<!-- \r\nDo you have a question instead of a bug report or enhancement request? Please ask it on https://stackoverflow.com/questions/tagged/visual-studio-code+python. \r\n\r\nUnable to install a linter or formatter? 'No installers available'? \r\nWindows - https://stackoverflow.com/questions/4750806/how-do-i-install-pip-on-windows\r\nLinux - https://www.cyberciti.biz/faq/debian-ubuntu-centos-rhel-linux-install-pipclient/ , https://www.tecmint.com/install-pip-in-linux/\r\n\r\nPython configuration issues? Please check https://code.visualstudio.com/docs/python/python-tutorial#_prerequisites\r\n\r\nOtherwise **please** fill in the requested details below. \"XXX\" markers should not be present in the final bug report.\r\n\r\nIf you think a GIF of what is happening would be helpful, consider tools like https://www.cockos.com/licecap/, https://github.com/phw/peek or https://www.screentogif.com/ .\r\n-->\r\n\r\n## Environment data\r\n\r\n- VS Code version: 1.30.0\r\n- Extension version (available under the Extensions sidebar): 2018.12.1\r\n- OS and version: W10 Pro 1803 build 17134.471\r\n- Python version (& distribution if applicable, e.g. Anaconda): Jython 2.7.0\r\n- Type of virtual environment used (N/A | venv | virtualenv | conda | ...): N/A\r\n- Relevant/affected Python packages and their versions: XXX\r\n\r\n## Expected behaviour\r\n\r\nXXX\r\n\r\n## Actual behaviour\r\n\r\nXXX\r\n\r\n## Steps to reproduce:\r\n1. debugging hello world example\r\n\r\n## Logs\r\nOutput for `Python` in the `Output` panel (`View`\u2192`Output`, change the drop-down the upper-right of the `Output` panel to `Python`)\r\n\r\n```\r\nXXX\r\n``` \r\n\r\nOutput from `Console` under the `Developer Tools` panel (toggle Developer Tools on under `Help`)\r\n\r\n```\r\nPS D:\\Users\\koste\\vs\\hello> cd 'd:\\Users\\koste\\vs\\hello'; ${env:PYTHONIOENCODING}='UTF-8'; ${env:PYTHONUNBUFFERED}='1'; & 'C:\\jython2.7.0\\bin\\jython.exe' 'd:\\Users\\koste\\.vscode\\extensions\\ms-python.python-2018.12.1\\pythonFiles\\ptvsd_launcher.py' '--default' '--client' '--host' 'localhost' '--port' '51023' 'd:\\Users\\koste\\vs\\hello\\hello.py'\r\nTraceback (most recent call last):\r\n File \"d:\\Users\\koste\\.vscode\\extensions\\ms-python.python-2018.12.1\\pythonFiles\\ptvsd_launcher.py\", line 21, in <module>\r\n import ptvsd\r\n File \"d:\\Users\\koste\\.vscode\\extensions\\ms-python.python-2018.12.1\\pythonFiles\\ptvsd_launcher.py\", line 21, in <module>\r\n import ptvsd\r\n File \"d:\\Users\\koste\\.vscode\\extensions\\ms-python.python-2018.12.1\\pythonFiles\\lib\\python\\ptvsd\\__init__.py\", line 15, in <module>\r\n from ptvsd.attach_server import (\r\n File \"d:\\Users\\koste\\.vscode\\extensions\\ms-python.python-2018.12.1\\pythonFiles\\lib\\python\\ptvsd\\attach_server.py\", line 5, in <module>\r\n from ptvsd._remote import (\r\n File \"d:\\Users\\koste\\.vscode\\extensions\\ms-python.python-2018.12.1\\pythonFiles\\lib\\python\\ptvsd\\_remote.py\", line 11, in <module>\r\n from ptvsd.pydevd_hooks import install\r\n File \"d:\\Users\\koste\\.vscode\\extensions\\ms-python.python-2018.12.1\\pythonFiles\\lib\\python\\ptvsd\\pydevd_hooks.py\", line 15, in <module>\r\n from ptvsd.daemon import Daemon, DaemonStoppedError, DaemonClosedError\r\n File \"d:\\Users\\koste\\.vscode\\extensions\\ms-python.python-2018.12.1\\pythonFiles\\lib\\python\\ptvsd\\daemon.py\", line 12, in <module>\r\n from .exit_handlers import (\r\n File \"d:\\Users\\koste\\.vscode\\extensions\\ms-python.python-2018.12.1\\pythonFiles\\lib\\python\\ptvsd\\exit_handlers.py\", line 28, in <module>\r\n class ExitHandlers(object):\r\n File \"d:\\Users\\koste\\.vscode\\extensions\\ms-python.python-2018.12.1\\pythonFiles\\lib\\python\\ptvsd\\exit_handlers.py\", line 36, in ExitHandlers\r\n SIGNALS = [\r\nAttributeError: 'module' object has no attribute 'SIGHUP'\r\n\r\nInternal error detected. Please copy the above traceback and report at\r\nhttps://github.com/Microsoft/vscode-python/issues/new\r\n```\r\n\n\n---\n\n@kostermw commented on [Sat Dec 15 2018](https://github.com/Microsoft/vscode-python/issues/3715#issuecomment-447572774)\n\nReported as requested \n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport atexit\nimport os\nimport platform\nimport signal\n\n\nclass AlreadyInstalledError(RuntimeError):\n \"\"\"Exit handlers were already installed.\"\"\"\n\n\nclass UnsupportedSignalError(RuntimeError):\n \"\"\"A signal is not supported.\"\"\"\n\n\ndef kill_current_proc(signum=signal.SIGTERM):\n \"\"\"Kill the current process.\n\n Note that this directly kills the process (with SIGTERM, by default)\n rather than using sys.exit().\n \"\"\"\n os.kill(os.getpid(), signum)\n\n\nclass ExitHandlers(object):\n \"\"\"Manages signal and atexit handlers.\"\"\"\n\n if platform.system() == 'Windows':\n # TODO: Windows *does* support these signals:\n # SIGABRT, SIGFPE, SIGILL, SIGINT, SIGSEGV, SIGTERM, SIGBREAK\n SIGNALS = []\n else:\n SIGNALS = [\n signal.SIGHUP,\n ]\n\n def __init__(self):\n self._signal_handlers = {sig: []\n for sig in self.SIGNALS}\n self._atexit_handlers = []\n self._installed = False\n\n @property\n def supported_signals(self):\n return set(self.SIGNALS)\n\n @property\n def installed(self):\n return self._installed\n\n def install(self):\n \"\"\"Set the parent handlers.\n\n This must be called in the main thread.\n \"\"\"\n if self._installed:\n raise AlreadyInstalledError('exit handlers already installed')\n self._installed = True\n self._install_signal_handler()\n self._install_atexit_handler()\n\n # TODO: Add uninstall()?\n\n def add_atexit_handler(self, handle_atexit, nodupe=True):\n \"\"\"Add an atexit handler to the list managed here.\"\"\"\n if nodupe and handle_atexit in self._atexit_handlers:\n raise ValueError('atexit handler alraedy added')\n self._atexit_handlers.append(handle_atexit)\n\n def add_signal_handler(self, signum, handle_signal, nodupe=True,\n ignoreunsupported=False):\n \"\"\"Add a signal handler to the list managed here.\"\"\"\n # TODO: The initialization of self.SIGNALS should make this\n # special-casing unnecessary.\n if platform.system() == 'Windows':\n return\n\n try:\n handlers = self._signal_handlers[signum]\n except KeyError:\n if ignoreunsupported:\n return\n raise UnsupportedSignalError(signum)\n if nodupe and handle_signal in handlers:\n raise ValueError('signal handler alraedy added')\n handlers.append(handle_signal)\n\n # internal methods\n\n def _install_signal_handler(self):\n # TODO: The initialization of self.SIGNALS should make this\n # special-casing unnecessary.\n if platform.system() == 'Windows':\n return\n\n orig = {}\n try:\n for sig in self._signal_handlers:\n # TODO: Skip or fail if signal.getsignal() returns None?\n orig[sig] = signal.signal(sig, self._signal_handler)\n except ValueError:\n # Wasn't called in main thread!\n raise\n\n def _signal_handler(self, signum, frame):\n for handle_signal in self._signal_handlers.get(signum, ()):\n handle_signal(signum, frame)\n\n def _install_atexit_handler(self):\n self._atexit_handlers = []\n atexit.register(self._atexit_handler)\n\n def _atexit_handler(self):\n for handle_atexit in self._atexit_handlers:\n handle_atexit()\n", "path": "src/ptvsd/exit_handlers.py"}]}
| 2,830 | 150 |
gh_patches_debug_38626
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-2026
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`Eligibility confirm`: make copy more specific, and improve field helper text

## Acceptance Criteria
<!-- Remember to consider edge cases -->
- [x] Update page heading
- [x] Update copy of field labels and helper text
- [x] Update copy for button
- [x] Update column definition for headline and body
Form fields
- [x] Spacing in between field label and field is 12px
- [x] Spacing between field and helper text is 12px
- Styles for field label are different
- [x] Smaller size (from 18px to 16px)
- [x] Line height 125%
- [x] Letter spacing 5%
- Styles for field helper copy are different
- [x] Smaller size (from 16px to 14px)
- [x] Line height 125%
- [x] Letter spacing 5%
- [x] Spacing between fields is 24px
</issue>
<code>
[start of benefits/eligibility/forms.py]
1 """
2 The eligibility application: Form definition for the eligibility verification flow.
3 """
4
5 import logging
6
7 from django import forms
8 from django.utils.translation import gettext_lazy as _
9
10 from benefits.core import models, recaptcha, widgets
11
12 logger = logging.getLogger(__name__)
13
14
15 class EligibilityVerifierSelectionForm(forms.Form):
16 """Form to capture eligibility verifier selection."""
17
18 action_url = "eligibility:index"
19 id = "form-verifier-selection"
20 method = "POST"
21
22 verifier = forms.ChoiceField(label="", widget=widgets.VerifierRadioSelect)
23 # sets label to empty string so the radio_select template can override the label style
24 submit_value = _("Choose this benefit")
25
26 def __init__(self, agency: models.TransitAgency, *args, **kwargs):
27 super().__init__(*args, **kwargs)
28 verifiers = agency.active_verifiers
29
30 self.classes = "col-lg-8"
31 # second element is not used since we render the whole label using selection_label_template,
32 # therefore set to None
33 self.fields["verifier"].choices = [(v.id, None) for v in verifiers]
34 self.fields["verifier"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}
35
36 def clean(self):
37 if not recaptcha.verify(self.data):
38 raise forms.ValidationError("reCAPTCHA failed")
39
40
41 class EligibilityVerificationForm(forms.Form):
42 """Form to collect eligibility verification details."""
43
44 action_url = "eligibility:confirm"
45 id = "form-eligibility-verification"
46 method = "POST"
47
48 submit_value = _("Check eligibility")
49 submitting_value = _("Checking")
50
51 _error_messages = {
52 "invalid": _("Check your input. The format looks wrong."),
53 "missing": _("This field is required."),
54 }
55
56 def __init__(
57 self,
58 title,
59 headline,
60 blurb,
61 name_label,
62 name_placeholder,
63 name_help_text,
64 sub_label,
65 sub_placeholder,
66 sub_help_text,
67 name_max_length=None,
68 sub_input_mode=None,
69 sub_max_length=None,
70 sub_pattern=None,
71 *args,
72 **kwargs,
73 ):
74 """Initialize a new EligibilityVerifier form.
75
76 Args:
77 title (str): The page (i.e. tab) title for the form's page.
78
79 headline (str): The <h1> on the form's page.
80
81 blurb (str): Intro <p> on the form's page.
82
83 name_label (str): Label for the name form field.
84
85 name_placeholder (str): Field placeholder for the name form field.
86
87 name_help_text (str): Extra help text for the name form field.
88
89 sub_label (str): Label for the sub form field.
90
91 sub_placeholder (str): Field placeholder for the sub form field.
92
93 sub_help_text (str): Extra help text for the sub form field.
94
95 name_max_length (int): The maximum length accepted for the 'name' API field before sending to this verifier
96
97 sub_input_mode (str): Input mode can be "numeric", "tel", "search", etc. to override default "text" keyboard on
98 mobile devices
99
100 sub_max_length (int): The maximum length accepted for the 'sub' API field before sending to this verifier
101
102 sub_pattern (str): A regular expression used to validate the 'sub' API field before sending to this verifier
103
104 Extra args and kwargs are passed through to the underlying django.forms.Form.
105 """
106 super().__init__(auto_id=True, label_suffix="", *args, **kwargs)
107
108 self.title = title
109 self.headline = headline
110 self.blurb = blurb
111
112 self.classes = "col-lg-6"
113 sub_widget = widgets.FormControlTextInput(placeholder=sub_placeholder)
114 if sub_pattern:
115 sub_widget.attrs.update({"pattern": sub_pattern})
116 if sub_input_mode:
117 sub_widget.attrs.update({"inputmode": sub_input_mode})
118 if sub_max_length:
119 sub_widget.attrs.update({"maxlength": sub_max_length})
120
121 self.fields["sub"] = forms.CharField(
122 label=sub_label,
123 widget=sub_widget,
124 help_text=sub_help_text,
125 )
126
127 name_widget = widgets.FormControlTextInput(placeholder=name_placeholder)
128 if name_max_length:
129 name_widget.attrs.update({"maxlength": name_max_length})
130
131 self.fields["name"] = forms.CharField(label=name_label, widget=name_widget, help_text=name_help_text)
132
133 def clean(self):
134 if not recaptcha.verify(self.data):
135 raise forms.ValidationError("reCAPTCHA failed")
136
137
138 class MSTCourtesyCard(EligibilityVerificationForm):
139 """EligibilityVerification form for the MST Courtesy Card."""
140
141 def __init__(self, *args, **kwargs):
142 super().__init__(
143 title=_("Agency card information"),
144 headline=_("Let’s see if we can confirm your eligibility."),
145 blurb=_("Please input your Courtesy Card number and last name below to confirm your eligibility."),
146 name_label=_("Last name (as it appears on Courtesy Card)"),
147 name_placeholder="Garcia",
148 name_help_text=_("We use this to help confirm your Courtesy Card."),
149 sub_label=_("MST Courtesy Card number"),
150 sub_help_text=_("This is a 5-digit number on the front and back of your card."),
151 sub_placeholder="12345",
152 name_max_length=255,
153 sub_input_mode="numeric",
154 sub_max_length=5,
155 sub_pattern=r"\d{5}",
156 *args,
157 **kwargs,
158 )
159
160
161 class SBMTDMobilityPass(EligibilityVerificationForm):
162 """EligibilityVerification form for the SBMTD Reduced Fare Mobility ID."""
163
164 def __init__(self, *args, **kwargs):
165 super().__init__(
166 title=_("Agency card information"),
167 headline=_("Let’s see if we can confirm your eligibility."),
168 blurb=_("Please input your Reduced Fare Mobility ID number and last name below to confirm your eligibility."),
169 name_label=_("Last name (as it appears on Reduced Fare Mobility ID card)"),
170 name_placeholder="Garcia",
171 name_help_text=_("We use this to help confirm your Reduced Fare Mobility ID."),
172 sub_label=_("SBMTD Reduced Fare Mobility ID number"),
173 sub_help_text=_("This is a 4-digit number on the back of your card."),
174 sub_placeholder="1234",
175 name_max_length=255,
176 sub_input_mode="numeric",
177 sub_max_length=4,
178 sub_pattern=r"\d{4}",
179 *args,
180 **kwargs,
181 )
182
[end of benefits/eligibility/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py
--- a/benefits/eligibility/forms.py
+++ b/benefits/eligibility/forms.py
@@ -45,7 +45,7 @@
id = "form-eligibility-verification"
method = "POST"
- submit_value = _("Check eligibility")
+ submit_value = _("Find my record")
submitting_value = _("Checking")
_error_messages = {
@@ -141,12 +141,16 @@
def __init__(self, *args, **kwargs):
super().__init__(
title=_("Agency card information"),
- headline=_("Let’s see if we can confirm your eligibility."),
- blurb=_("Please input your Courtesy Card number and last name below to confirm your eligibility."),
- name_label=_("Last name (as it appears on Courtesy Card)"),
+ headline=_("Let’s find the record of your transit benefit."),
+ blurb=_(
+ "We use the information on your MST Courtesy Card to find the record of your transit benefit in our system."
+ ),
+ name_label=_("Last Name"),
name_placeholder="Garcia",
- name_help_text=_("We use this to help confirm your Courtesy Card."),
- sub_label=_("MST Courtesy Card number"),
+ name_help_text=_(
+ "Please enter your last name the same way it is printed on your card, including capital letters and hyphens."
+ ),
+ sub_label=_("Courtesy Card number"),
sub_help_text=_("This is a 5-digit number on the front and back of your card."),
sub_placeholder="12345",
name_max_length=255,
@@ -164,12 +168,17 @@
def __init__(self, *args, **kwargs):
super().__init__(
title=_("Agency card information"),
- headline=_("Let’s see if we can confirm your eligibility."),
- blurb=_("Please input your Reduced Fare Mobility ID number and last name below to confirm your eligibility."),
- name_label=_("Last name (as it appears on Reduced Fare Mobility ID card)"),
+ headline=_("Let’s find the record of your transit benefit."),
+ blurb=_(
+ "We use the information on your SBMTD Reduced Fare Mobility ID card to find the record of your transit "
+ + "benefit in our system."
+ ),
+ name_label=_("Last Name"),
name_placeholder="Garcia",
- name_help_text=_("We use this to help confirm your Reduced Fare Mobility ID."),
- sub_label=_("SBMTD Reduced Fare Mobility ID number"),
+ name_help_text=_(
+ "Please enter your last name the same way it is printed on your card, including capital letters and hyphens."
+ ),
+ sub_label=_("Reduced Fare Mobility ID card number"),
sub_help_text=_("This is a 4-digit number on the back of your card."),
sub_placeholder="1234",
name_max_length=255,
|
{"golden_diff": "diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py\n--- a/benefits/eligibility/forms.py\n+++ b/benefits/eligibility/forms.py\n@@ -45,7 +45,7 @@\n id = \"form-eligibility-verification\"\n method = \"POST\"\n \n- submit_value = _(\"Check eligibility\")\n+ submit_value = _(\"Find my record\")\n submitting_value = _(\"Checking\")\n \n _error_messages = {\n@@ -141,12 +141,16 @@\n def __init__(self, *args, **kwargs):\n super().__init__(\n title=_(\"Agency card information\"),\n- headline=_(\"Let\u2019s see if we can confirm your eligibility.\"),\n- blurb=_(\"Please input your Courtesy Card number and last name below to confirm your eligibility.\"),\n- name_label=_(\"Last name (as it appears on Courtesy Card)\"),\n+ headline=_(\"Let\u2019s find the record of your transit benefit.\"),\n+ blurb=_(\n+ \"We use the information on your MST Courtesy Card to find the record of your transit benefit in our system.\"\n+ ),\n+ name_label=_(\"Last Name\"),\n name_placeholder=\"Garcia\",\n- name_help_text=_(\"We use this to help confirm your Courtesy Card.\"),\n- sub_label=_(\"MST Courtesy Card number\"),\n+ name_help_text=_(\n+ \"Please enter your last name the same way it is printed on your card, including capital letters and hyphens.\"\n+ ),\n+ sub_label=_(\"Courtesy Card number\"),\n sub_help_text=_(\"This is a 5-digit number on the front and back of your card.\"),\n sub_placeholder=\"12345\",\n name_max_length=255,\n@@ -164,12 +168,17 @@\n def __init__(self, *args, **kwargs):\n super().__init__(\n title=_(\"Agency card information\"),\n- headline=_(\"Let\u2019s see if we can confirm your eligibility.\"),\n- blurb=_(\"Please input your Reduced Fare Mobility ID number and last name below to confirm your eligibility.\"),\n- name_label=_(\"Last name (as it appears on Reduced Fare Mobility ID card)\"),\n+ headline=_(\"Let\u2019s find the record of your transit benefit.\"),\n+ blurb=_(\n+ \"We use the information on your SBMTD Reduced Fare Mobility ID card to find the record of your transit \"\n+ + \"benefit in our system.\"\n+ ),\n+ name_label=_(\"Last Name\"),\n name_placeholder=\"Garcia\",\n- name_help_text=_(\"We use this to help confirm your Reduced Fare Mobility ID.\"),\n- sub_label=_(\"SBMTD Reduced Fare Mobility ID number\"),\n+ name_help_text=_(\n+ \"Please enter your last name the same way it is printed on your card, including capital letters and hyphens.\"\n+ ),\n+ sub_label=_(\"Reduced Fare Mobility ID card number\"),\n sub_help_text=_(\"This is a 4-digit number on the back of your card.\"),\n sub_placeholder=\"1234\",\n name_max_length=255,\n", "issue": "`Eligibility confirm`: make copy more specific, and improve field helper text\n\r\n\r\n## Acceptance Criteria\r\n\r\n<!-- Remember to consider edge cases -->\r\n\r\n- [x] Update page heading\r\n- [x] Update copy of field labels and helper text\r\n- [x] Update copy for button\r\n- [x] Update column definition for headline and body\r\n\r\nForm fields\r\n\r\n- [x] Spacing in between field label and field is 12px\r\n- [x] Spacing between field and helper text is 12px\r\n- Styles for field label are different\r\n - [x] Smaller size (from 18px to 16px)\r\n - [x] Line height 125%\r\n - [x] Letter spacing 5%\r\n - Styles for field helper copy are different\r\n - [x] Smaller size (from 16px to 14px)\r\n - [x] Line height 125%\r\n - [x] Letter spacing 5%\r\n - [x] Spacing between fields is 24px\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThe eligibility application: Form definition for the eligibility verification flow.\n\"\"\"\n\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom benefits.core import models, recaptcha, widgets\n\nlogger = logging.getLogger(__name__)\n\n\nclass EligibilityVerifierSelectionForm(forms.Form):\n \"\"\"Form to capture eligibility verifier selection.\"\"\"\n\n action_url = \"eligibility:index\"\n id = \"form-verifier-selection\"\n method = \"POST\"\n\n verifier = forms.ChoiceField(label=\"\", widget=widgets.VerifierRadioSelect)\n # sets label to empty string so the radio_select template can override the label style\n submit_value = _(\"Choose this benefit\")\n\n def __init__(self, agency: models.TransitAgency, *args, **kwargs):\n super().__init__(*args, **kwargs)\n verifiers = agency.active_verifiers\n\n self.classes = \"col-lg-8\"\n # second element is not used since we render the whole label using selection_label_template,\n # therefore set to None\n self.fields[\"verifier\"].choices = [(v.id, None) for v in verifiers]\n self.fields[\"verifier\"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n\n\nclass EligibilityVerificationForm(forms.Form):\n \"\"\"Form to collect eligibility verification details.\"\"\"\n\n action_url = \"eligibility:confirm\"\n id = \"form-eligibility-verification\"\n method = \"POST\"\n\n submit_value = _(\"Check eligibility\")\n submitting_value = _(\"Checking\")\n\n _error_messages = {\n \"invalid\": _(\"Check your input. The format looks wrong.\"),\n \"missing\": _(\"This field is required.\"),\n }\n\n def __init__(\n self,\n title,\n headline,\n blurb,\n name_label,\n name_placeholder,\n name_help_text,\n sub_label,\n sub_placeholder,\n sub_help_text,\n name_max_length=None,\n sub_input_mode=None,\n sub_max_length=None,\n sub_pattern=None,\n *args,\n **kwargs,\n ):\n \"\"\"Initialize a new EligibilityVerifier form.\n\n Args:\n title (str): The page (i.e. tab) title for the form's page.\n\n headline (str): The <h1> on the form's page.\n\n blurb (str): Intro <p> on the form's page.\n\n name_label (str): Label for the name form field.\n\n name_placeholder (str): Field placeholder for the name form field.\n\n name_help_text (str): Extra help text for the name form field.\n\n sub_label (str): Label for the sub form field.\n\n sub_placeholder (str): Field placeholder for the sub form field.\n\n sub_help_text (str): Extra help text for the sub form field.\n\n name_max_length (int): The maximum length accepted for the 'name' API field before sending to this verifier\n\n sub_input_mode (str): Input mode can be \"numeric\", \"tel\", \"search\", etc. to override default \"text\" keyboard on\n mobile devices\n\n sub_max_length (int): The maximum length accepted for the 'sub' API field before sending to this verifier\n\n sub_pattern (str): A regular expression used to validate the 'sub' API field before sending to this verifier\n\n Extra args and kwargs are passed through to the underlying django.forms.Form.\n \"\"\"\n super().__init__(auto_id=True, label_suffix=\"\", *args, **kwargs)\n\n self.title = title\n self.headline = headline\n self.blurb = blurb\n\n self.classes = \"col-lg-6\"\n sub_widget = widgets.FormControlTextInput(placeholder=sub_placeholder)\n if sub_pattern:\n sub_widget.attrs.update({\"pattern\": sub_pattern})\n if sub_input_mode:\n sub_widget.attrs.update({\"inputmode\": sub_input_mode})\n if sub_max_length:\n sub_widget.attrs.update({\"maxlength\": sub_max_length})\n\n self.fields[\"sub\"] = forms.CharField(\n label=sub_label,\n widget=sub_widget,\n help_text=sub_help_text,\n )\n\n name_widget = widgets.FormControlTextInput(placeholder=name_placeholder)\n if name_max_length:\n name_widget.attrs.update({\"maxlength\": name_max_length})\n\n self.fields[\"name\"] = forms.CharField(label=name_label, widget=name_widget, help_text=name_help_text)\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n\n\nclass MSTCourtesyCard(EligibilityVerificationForm):\n \"\"\"EligibilityVerification form for the MST Courtesy Card.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(\n title=_(\"Agency card information\"),\n headline=_(\"Let\u2019s see if we can confirm your eligibility.\"),\n blurb=_(\"Please input your Courtesy Card number and last name below to confirm your eligibility.\"),\n name_label=_(\"Last name (as it appears on Courtesy Card)\"),\n name_placeholder=\"Garcia\",\n name_help_text=_(\"We use this to help confirm your Courtesy Card.\"),\n sub_label=_(\"MST Courtesy Card number\"),\n sub_help_text=_(\"This is a 5-digit number on the front and back of your card.\"),\n sub_placeholder=\"12345\",\n name_max_length=255,\n sub_input_mode=\"numeric\",\n sub_max_length=5,\n sub_pattern=r\"\\d{5}\",\n *args,\n **kwargs,\n )\n\n\nclass SBMTDMobilityPass(EligibilityVerificationForm):\n \"\"\"EligibilityVerification form for the SBMTD Reduced Fare Mobility ID.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(\n title=_(\"Agency card information\"),\n headline=_(\"Let\u2019s see if we can confirm your eligibility.\"),\n blurb=_(\"Please input your Reduced Fare Mobility ID number and last name below to confirm your eligibility.\"),\n name_label=_(\"Last name (as it appears on Reduced Fare Mobility ID card)\"),\n name_placeholder=\"Garcia\",\n name_help_text=_(\"We use this to help confirm your Reduced Fare Mobility ID.\"),\n sub_label=_(\"SBMTD Reduced Fare Mobility ID number\"),\n sub_help_text=_(\"This is a 4-digit number on the back of your card.\"),\n sub_placeholder=\"1234\",\n name_max_length=255,\n sub_input_mode=\"numeric\",\n sub_max_length=4,\n sub_pattern=r\"\\d{4}\",\n *args,\n **kwargs,\n )\n", "path": "benefits/eligibility/forms.py"}]}
| 2,665 | 661 |
gh_patches_debug_10139
|
rasdani/github-patches
|
git_diff
|
zestedesavoir__zds-site-3807
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[beta][v20] L'api des notifications renvoie que les notifs non lues
Serveur : Beta
Version : v20/6bb2f75
Système : Mac OS X
Navigateur : 52.0.2743.116 (64-bit)
---
1. Récupérez vos notifications depuis l'API
2. Constatez que le serveur renvoie uniquement les notifs non lues.
</issue>
<code>
[start of zds/notification/api/views.py]
1 # coding: utf-8
2 from dry_rest_permissions.generics import DRYPermissions
3 from rest_framework import filters
4 from rest_framework.generics import ListAPIView
5 from rest_framework.permissions import IsAuthenticated
6 from rest_framework_extensions.cache.decorators import cache_response
7 from rest_framework_extensions.etag.decorators import etag
8 from rest_framework_extensions.key_constructor import bits
9 from rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor
10
11 from zds.api.bits import DJRF3xPaginationKeyBit
12 from zds.notification.api.serializers import NotificationSerializer
13 from zds.notification.models import Notification
14
15
16 class PagingNotificationListKeyConstructor(DefaultKeyConstructor):
17 pagination = DJRF3xPaginationKeyBit()
18 search = bits.QueryParamsKeyBit(['search', 'ordering', 'type'])
19 list_sql_query = bits.ListSqlQueryKeyBit()
20 unique_view_id = bits.UniqueViewIdKeyBit()
21 user = bits.UserKeyBit()
22
23
24 class NotificationListAPI(ListAPIView):
25 """
26 List of notification.
27 """
28
29 filter_backends = (filters.SearchFilter, filters.OrderingFilter)
30 search_fields = ('title',)
31 ordering_fields = ('pubdate', 'title',)
32 list_key_func = PagingNotificationListKeyConstructor()
33 serializer_class = NotificationSerializer
34 permission_classes = (IsAuthenticated, DRYPermissions,)
35
36 @etag(list_key_func)
37 @cache_response(key_func=list_key_func)
38 def get(self, request, *args, **kwargs):
39 """
40 Lists all notifications of a user.
41 ---
42
43 parameters:
44 - name: Authorization
45 description: Bearer token to make an authenticated request.
46 required: true
47 paramType: header
48 - name: page
49 description: Restricts output to the given page number.
50 required: false
51 paramType: query
52 - name: page_size
53 description: Sets the number of notifications per page.
54 required: false
55 paramType: query
56 - name: search
57 description: Filters by title.
58 required: false
59 paramType: query
60 - name: ordering
61 description: Sorts the results. You can order by (-)pubdate or (-)title.
62 paramType: query
63 - name: type
64 description: Filters by notification type.
65 paramType: query
66 - name: subscription_type
67 description: Filters by subscription type.
68 paramType: query
69 - name: expand
70 description: Returns an object instead of an identifier representing the given field.
71 required: false
72 paramType: query
73 responseMessages:
74 - code: 401
75 message: Not Authenticated
76 - code: 404
77 message: Not Found
78 """
79 return self.list(request, *args, **kwargs)
80
81 def get_queryset(self):
82 queryset = Notification.objects.get_unread_notifications_of(self.request.user)
83 subscription_type = self.request.query_params.get('subscription_type', None)
84 if subscription_type:
85 queryset = queryset.filter(subscription__content_type__model=subscription_type)
86 _type = self.request.query_params.get('type', None)
87 if _type:
88 queryset = queryset.filter(content_type__model=_type)
89 return queryset
90
[end of zds/notification/api/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/zds/notification/api/views.py b/zds/notification/api/views.py
--- a/zds/notification/api/views.py
+++ b/zds/notification/api/views.py
@@ -79,7 +79,7 @@
return self.list(request, *args, **kwargs)
def get_queryset(self):
- queryset = Notification.objects.get_unread_notifications_of(self.request.user)
+ queryset = Notification.objects.get_notifications_of(self.request.user)
subscription_type = self.request.query_params.get('subscription_type', None)
if subscription_type:
queryset = queryset.filter(subscription__content_type__model=subscription_type)
|
{"golden_diff": "diff --git a/zds/notification/api/views.py b/zds/notification/api/views.py\n--- a/zds/notification/api/views.py\n+++ b/zds/notification/api/views.py\n@@ -79,7 +79,7 @@\n return self.list(request, *args, **kwargs)\n \n def get_queryset(self):\n- queryset = Notification.objects.get_unread_notifications_of(self.request.user)\n+ queryset = Notification.objects.get_notifications_of(self.request.user)\n subscription_type = self.request.query_params.get('subscription_type', None)\n if subscription_type:\n queryset = queryset.filter(subscription__content_type__model=subscription_type)\n", "issue": "[beta][v20] L'api des notifications renvoie que les notifs non lues\nServeur : Beta\nVersion : v20/6bb2f75\nSyst\u00e8me : Mac OS X\nNavigateur : 52.0.2743.116 (64-bit)\n\n---\n1. R\u00e9cup\u00e9rez vos notifications depuis l'API\n2. Constatez que le serveur renvoie uniquement les notifs non lues.\n\n", "before_files": [{"content": "# coding: utf-8\nfrom dry_rest_permissions.generics import DRYPermissions\nfrom rest_framework import filters\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_extensions.cache.decorators import cache_response\nfrom rest_framework_extensions.etag.decorators import etag\nfrom rest_framework_extensions.key_constructor import bits\nfrom rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor\n\nfrom zds.api.bits import DJRF3xPaginationKeyBit\nfrom zds.notification.api.serializers import NotificationSerializer\nfrom zds.notification.models import Notification\n\n\nclass PagingNotificationListKeyConstructor(DefaultKeyConstructor):\n pagination = DJRF3xPaginationKeyBit()\n search = bits.QueryParamsKeyBit(['search', 'ordering', 'type'])\n list_sql_query = bits.ListSqlQueryKeyBit()\n unique_view_id = bits.UniqueViewIdKeyBit()\n user = bits.UserKeyBit()\n\n\nclass NotificationListAPI(ListAPIView):\n \"\"\"\n List of notification.\n \"\"\"\n\n filter_backends = (filters.SearchFilter, filters.OrderingFilter)\n search_fields = ('title',)\n ordering_fields = ('pubdate', 'title',)\n list_key_func = PagingNotificationListKeyConstructor()\n serializer_class = NotificationSerializer\n permission_classes = (IsAuthenticated, DRYPermissions,)\n\n @etag(list_key_func)\n @cache_response(key_func=list_key_func)\n def get(self, request, *args, **kwargs):\n \"\"\"\n Lists all notifications of a user.\n ---\n\n parameters:\n - name: Authorization\n description: Bearer token to make an authenticated request.\n required: true\n paramType: header\n - name: page\n description: Restricts output to the given page number.\n required: false\n paramType: query\n - name: page_size\n description: Sets the number of notifications per page.\n required: false\n paramType: query\n - name: search\n description: Filters by title.\n required: false\n paramType: query\n - name: ordering\n description: Sorts the results. You can order by (-)pubdate or (-)title.\n paramType: query\n - name: type\n description: Filters by notification type.\n paramType: query\n - name: subscription_type\n description: Filters by subscription type.\n paramType: query\n - name: expand\n description: Returns an object instead of an identifier representing the given field.\n required: false\n paramType: query\n responseMessages:\n - code: 401\n message: Not Authenticated\n - code: 404\n message: Not Found\n \"\"\"\n return self.list(request, *args, **kwargs)\n\n def get_queryset(self):\n queryset = Notification.objects.get_unread_notifications_of(self.request.user)\n subscription_type = self.request.query_params.get('subscription_type', None)\n if subscription_type:\n queryset = queryset.filter(subscription__content_type__model=subscription_type)\n _type = self.request.query_params.get('type', None)\n if _type:\n queryset = queryset.filter(content_type__model=_type)\n return queryset\n", "path": "zds/notification/api/views.py"}]}
| 1,490 | 131 |
gh_patches_debug_38905
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-705
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
elasticsearch instrumentation creates too many span names, with unique document IDs.
**Describe your environment**
Using `opentelemetry-instrumentation-elasticsearch` `0.24b0` with `elasticsearch` `7.14.1`
**Steps to reproduce**
Perform some `index()` and/or `delete()` calls with `id` parameter set
**What is the expected behavior?**
The number of span names created is reasonable and finite
**What is the actual behavior?**
An unbounded number of span names get created, containing the unique document names. This makes it hard to search through operations, for example in Jaeger:

The `datamart.test.xxxx` parts are the Elasticsearch document IDs.
**Additional context**
I have a patch for this incoming.
</issue>
<code>
[start of instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 This library allows tracing HTTP elasticsearch made by the
17 `elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.
18
19 Usage
20 -----
21
22 .. code-block:: python
23
24 from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor
25 import elasticsearch
26
27
28 # instrument elasticsearch
29 ElasticsearchInstrumentor().instrument()
30
31 # Using elasticsearch as normal now will automatically generate spans
32 es = elasticsearch.Elasticsearch()
33 es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})
34 es.get(index='my-index', doc_type='my-type', id=1)
35
36 API
37 ---
38
39 Elasticsearch instrumentation prefixes operation names with the string "Elasticsearch". This
40 can be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`
41 environment variable or by passing the prefix as an argument to the instrumentor. For example,
42
43
44 .. code-block:: python
45
46 ElasticsearchInstrumentor("my-custom-prefix").instrument()
47
48
49 The `instrument` method accepts the following keyword args:
50
51 tracer_provider (TracerProvider) - an optional tracer provider
52 request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request
53 this function signature is:
54 def request_hook(span: Span, method: str, url: str, kwargs)
55 response_hook (Callable) - a function with extra user-defined logic to be performed after performing the request
56 this function signature is:
57 def response_hook(span: Span, response: dict)
58
59 for example:
60
61 .. code: python
62
63 from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor
64 import elasticsearch
65
66 def request_hook(span, method, url, kwargs):
67 if span and span.is_recording():
68 span.set_attribute("custom_user_attribute_from_request_hook", "some-value")
69
70 def response_hook(span, response):
71 if span and span.is_recording():
72 span.set_attribute("custom_user_attribute_from_response_hook", "some-value")
73
74 # instrument elasticsearch with request and response hooks
75 ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)
76
77 # Using elasticsearch as normal now will automatically generate spans,
78 # including user custom attributes added from the hooks
79 es = elasticsearch.Elasticsearch()
80 es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})
81 es.get(index='my-index', doc_type='my-type', id=1)
82 """
83
84 from logging import getLogger
85 from os import environ
86 from typing import Collection
87
88 import elasticsearch
89 import elasticsearch.exceptions
90 from wrapt import wrap_function_wrapper as _wrap
91
92 from opentelemetry.instrumentation.elasticsearch.package import _instruments
93 from opentelemetry.instrumentation.elasticsearch.version import __version__
94 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
95 from opentelemetry.instrumentation.utils import unwrap
96 from opentelemetry.semconv.trace import SpanAttributes
97 from opentelemetry.trace import SpanKind, get_tracer
98
99 logger = getLogger(__name__)
100
101
102 # Values to add as tags from the actual
103 # payload returned by Elasticsearch, if any.
104 _ATTRIBUTES_FROM_RESULT = [
105 "found",
106 "timed_out",
107 "took",
108 ]
109
110 _DEFALT_OP_NAME = "request"
111
112
113 class ElasticsearchInstrumentor(BaseInstrumentor):
114 """An instrumentor for elasticsearch
115 See `BaseInstrumentor`
116 """
117
118 def __init__(self, span_name_prefix=None):
119 if not span_name_prefix:
120 span_name_prefix = environ.get(
121 "OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX", "Elasticsearch",
122 )
123 self._span_name_prefix = span_name_prefix.strip()
124 super().__init__()
125
126 def instrumentation_dependencies(self) -> Collection[str]:
127 return _instruments
128
129 def _instrument(self, **kwargs):
130 """
131 Instruments elasticsarch module
132 """
133 tracer_provider = kwargs.get("tracer_provider")
134 tracer = get_tracer(__name__, __version__, tracer_provider)
135 request_hook = kwargs.get("request_hook")
136 response_hook = kwargs.get("response_hook")
137 _wrap(
138 elasticsearch,
139 "Transport.perform_request",
140 _wrap_perform_request(
141 tracer, self._span_name_prefix, request_hook, response_hook
142 ),
143 )
144
145 def _uninstrument(self, **kwargs):
146 unwrap(elasticsearch.Transport, "perform_request")
147
148
149 def _wrap_perform_request(
150 tracer, span_name_prefix, request_hook=None, response_hook=None
151 ):
152 # pylint: disable=R0912
153 def wrapper(wrapped, _, args, kwargs):
154 method = url = None
155 try:
156 method, url, *_ = args
157 except IndexError:
158 logger.warning(
159 "expected perform_request to receive two positional arguments. "
160 "Got %d",
161 len(args),
162 )
163
164 op_name = span_name_prefix + (url or method or _DEFALT_OP_NAME)
165 params = kwargs.get("params", {})
166 body = kwargs.get("body", None)
167
168 with tracer.start_as_current_span(
169 op_name, kind=SpanKind.CLIENT,
170 ) as span:
171
172 if callable(request_hook):
173 request_hook(span, method, url, kwargs)
174
175 if span.is_recording():
176 attributes = {
177 SpanAttributes.DB_SYSTEM: "elasticsearch",
178 }
179 if url:
180 attributes["elasticsearch.url"] = url
181 if method:
182 attributes["elasticsearch.method"] = method
183 if body:
184 attributes[SpanAttributes.DB_STATEMENT] = str(body)
185 if params:
186 attributes["elasticsearch.params"] = str(params)
187 for key, value in attributes.items():
188 span.set_attribute(key, value)
189
190 rv = wrapped(*args, **kwargs)
191 if isinstance(rv, dict) and span.is_recording():
192 for member in _ATTRIBUTES_FROM_RESULT:
193 if member in rv:
194 span.set_attribute(
195 f"elasticsearch.{member}", str(rv[member]),
196 )
197
198 if callable(response_hook):
199 response_hook(span, rv)
200 return rv
201
202 return wrapper
203
[end of instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
@@ -81,6 +81,7 @@
es.get(index='my-index', doc_type='my-type', id=1)
"""
+import re
from logging import getLogger
from os import environ
from typing import Collection
@@ -107,7 +108,7 @@
"took",
]
-_DEFALT_OP_NAME = "request"
+_DEFAULT_OP_NAME = "request"
class ElasticsearchInstrumentor(BaseInstrumentor):
@@ -146,6 +147,9 @@
unwrap(elasticsearch.Transport, "perform_request")
+_regex_doc_url = re.compile(r"/_doc/([^/]+)")
+
+
def _wrap_perform_request(
tracer, span_name_prefix, request_hook=None, response_hook=None
):
@@ -161,7 +165,24 @@
len(args),
)
- op_name = span_name_prefix + (url or method or _DEFALT_OP_NAME)
+ op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)
+ doc_id = None
+ if url:
+ # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()
+ # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7
+ # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708
+ match = _regex_doc_url.search(url)
+ if match is not None:
+ # Remove the full document ID from the URL
+ doc_span = match.span()
+ op_name = (
+ span_name_prefix
+ + url[: doc_span[0]]
+ + "/_doc/:id"
+ + url[doc_span[1] :]
+ )
+ # Put the document ID in attributes
+ doc_id = match.group(1)
params = kwargs.get("params", {})
body = kwargs.get("body", None)
@@ -184,6 +205,8 @@
attributes[SpanAttributes.DB_STATEMENT] = str(body)
if params:
attributes["elasticsearch.params"] = str(params)
+ if doc_id:
+ attributes["elasticsearch.id"] = doc_id
for key, value in attributes.items():
span.set_attribute(key, value)
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n@@ -81,6 +81,7 @@\n es.get(index='my-index', doc_type='my-type', id=1)\n \"\"\"\n \n+import re\n from logging import getLogger\n from os import environ\n from typing import Collection\n@@ -107,7 +108,7 @@\n \"took\",\n ]\n \n-_DEFALT_OP_NAME = \"request\"\n+_DEFAULT_OP_NAME = \"request\"\n \n \n class ElasticsearchInstrumentor(BaseInstrumentor):\n@@ -146,6 +147,9 @@\n unwrap(elasticsearch.Transport, \"perform_request\")\n \n \n+_regex_doc_url = re.compile(r\"/_doc/([^/]+)\")\n+\n+\n def _wrap_perform_request(\n tracer, span_name_prefix, request_hook=None, response_hook=None\n ):\n@@ -161,7 +165,24 @@\n len(args),\n )\n \n- op_name = span_name_prefix + (url or method or _DEFALT_OP_NAME)\n+ op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)\n+ doc_id = None\n+ if url:\n+ # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()\n+ # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7\n+ # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708\n+ match = _regex_doc_url.search(url)\n+ if match is not None:\n+ # Remove the full document ID from the URL\n+ doc_span = match.span()\n+ op_name = (\n+ span_name_prefix\n+ + url[: doc_span[0]]\n+ + \"/_doc/:id\"\n+ + url[doc_span[1] :]\n+ )\n+ # Put the document ID in attributes\n+ doc_id = match.group(1)\n params = kwargs.get(\"params\", {})\n body = kwargs.get(\"body\", None)\n \n@@ -184,6 +205,8 @@\n attributes[SpanAttributes.DB_STATEMENT] = str(body)\n if params:\n attributes[\"elasticsearch.params\"] = str(params)\n+ if doc_id:\n+ attributes[\"elasticsearch.id\"] = doc_id\n for key, value in attributes.items():\n span.set_attribute(key, value)\n", "issue": "elasticsearch instrumentation creates too many span names, with unique document IDs.\n**Describe your environment**\r\nUsing `opentelemetry-instrumentation-elasticsearch` `0.24b0` with `elasticsearch` `7.14.1`\r\n\r\n**Steps to reproduce**\r\nPerform some `index()` and/or `delete()` calls with `id` parameter set\r\n\r\n**What is the expected behavior?**\r\nThe number of span names created is reasonable and finite\r\n\r\n**What is the actual behavior?**\r\nAn unbounded number of span names get created, containing the unique document names. This makes it hard to search through operations, for example in Jaeger:\r\n\r\n\r\n\r\nThe `datamart.test.xxxx` parts are the Elasticsearch document IDs.\r\n\r\n**Additional context**\r\nI have a patch for this incoming.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows tracing HTTP elasticsearch made by the\n`elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.\n\nUsage\n-----\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n\n # instrument elasticsearch\n ElasticsearchInstrumentor().instrument()\n\n # Using elasticsearch as normal now will automatically generate spans\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nAPI\n---\n\nElasticsearch instrumentation prefixes operation names with the string \"Elasticsearch\". This\ncan be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`\nenvironment variable or by passing the prefix as an argument to the instrumentor. For example,\n\n\n.. code-block:: python\n\n ElasticsearchInstrumentor(\"my-custom-prefix\").instrument()\n\n\nThe `instrument` method accepts the following keyword args:\n\ntracer_provider (TracerProvider) - an optional tracer provider\nrequest_hook (Callable) - a function with extra user-defined logic to be performed before performing the request\n this function signature is:\n def request_hook(span: Span, method: str, url: str, kwargs)\nresponse_hook (Callable) - a function with extra user-defined logic to be performed after performing the request\n this function signature is:\n def response_hook(span: Span, response: dict)\n\nfor example:\n\n.. code: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n def request_hook(span, method, url, kwargs):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def response_hook(span, response):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n # instrument elasticsearch with request and response hooks\n ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)\n\n # Using elasticsearch as normal now will automatically generate spans,\n # including user custom attributes added from the hooks\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\"\"\"\n\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Collection\n\nimport elasticsearch\nimport elasticsearch.exceptions\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.elasticsearch.package import _instruments\nfrom opentelemetry.instrumentation.elasticsearch.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nlogger = getLogger(__name__)\n\n\n# Values to add as tags from the actual\n# payload returned by Elasticsearch, if any.\n_ATTRIBUTES_FROM_RESULT = [\n \"found\",\n \"timed_out\",\n \"took\",\n]\n\n_DEFALT_OP_NAME = \"request\"\n\n\nclass ElasticsearchInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for elasticsearch\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self, span_name_prefix=None):\n if not span_name_prefix:\n span_name_prefix = environ.get(\n \"OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\", \"Elasticsearch\",\n )\n self._span_name_prefix = span_name_prefix.strip()\n super().__init__()\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"\n Instruments elasticsarch module\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n request_hook = kwargs.get(\"request_hook\")\n response_hook = kwargs.get(\"response_hook\")\n _wrap(\n elasticsearch,\n \"Transport.perform_request\",\n _wrap_perform_request(\n tracer, self._span_name_prefix, request_hook, response_hook\n ),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(elasticsearch.Transport, \"perform_request\")\n\n\ndef _wrap_perform_request(\n tracer, span_name_prefix, request_hook=None, response_hook=None\n):\n # pylint: disable=R0912\n def wrapper(wrapped, _, args, kwargs):\n method = url = None\n try:\n method, url, *_ = args\n except IndexError:\n logger.warning(\n \"expected perform_request to receive two positional arguments. \"\n \"Got %d\",\n len(args),\n )\n\n op_name = span_name_prefix + (url or method or _DEFALT_OP_NAME)\n params = kwargs.get(\"params\", {})\n body = kwargs.get(\"body\", None)\n\n with tracer.start_as_current_span(\n op_name, kind=SpanKind.CLIENT,\n ) as span:\n\n if callable(request_hook):\n request_hook(span, method, url, kwargs)\n\n if span.is_recording():\n attributes = {\n SpanAttributes.DB_SYSTEM: \"elasticsearch\",\n }\n if url:\n attributes[\"elasticsearch.url\"] = url\n if method:\n attributes[\"elasticsearch.method\"] = method\n if body:\n attributes[SpanAttributes.DB_STATEMENT] = str(body)\n if params:\n attributes[\"elasticsearch.params\"] = str(params)\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n rv = wrapped(*args, **kwargs)\n if isinstance(rv, dict) and span.is_recording():\n for member in _ATTRIBUTES_FROM_RESULT:\n if member in rv:\n span.set_attribute(\n f\"elasticsearch.{member}\", str(rv[member]),\n )\n\n if callable(response_hook):\n response_hook(span, rv)\n return rv\n\n return wrapper\n", "path": "instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py"}]}
| 2,745 | 630 |
gh_patches_debug_20469
|
rasdani/github-patches
|
git_diff
|
privacyidea__privacyidea-3091
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bad error handling in /ttype/<type> endpoint
We observed a bad error handling when requesting the `/ttype/<type>` endpoint.
In specific, we faced the following error:
```
[ERROR][privacyidea.app:1892] Exception on /ttype/push"}. [GET]
...
AttributeError: 'NoneType' object has no attribute 'api_endpoint'
```
Actually, we could fix the problem but it would be nice to fix this upstream right away.
### Top-level intent
Access the `/ttype/<type>` endpoint.
### Steps to reproduce
1. Query `/ttype/test` endpoint
2. There will be a NoneType error in the logs.
### Expected outcome
Proper error handling
### Actual outcome
NoneType exception.
### Configuration
* **privacyIDEA version**: v3.6.3
* **Installation method**: (from Ubuntu packages, github, PyPI, ...)
* **Python version**: 3
* **Operating system**: linux
* **Webserver**: apache
### Log file
**Set PI_LOGLEVEL = logging.DEBUG in pi.cfg and take a look at the privacyidea.log!**
**If appropriate, attach the log file or paste relevant portions.**
</issue>
<code>
[start of privacyidea/api/ttype.py]
1 # -*- coding: utf-8 -*-
2 #
3 # http://www.privacyidea.org
4 # (c) Cornelius Kölbel, privacyidea.org
5 #
6 # 2015-09-01 Cornelius Kölbel, <[email protected]>
7 # Initial writeup
8 #
9 # This code is free software; you can redistribute it and/or
10 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
11 # License as published by the Free Software Foundation; either
12 # version 3 of the License, or any later version.
13 #
14 # This code is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
18 #
19 # You should have received a copy of the GNU Affero General Public
20 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #
22 """
23 This API endpoint is a generic endpoint that can be used by any token
24 type.
25
26 The tokentype needs to implement a classmethod *api_endpoint* and can then be
27 called by /ttype/<tokentype>.
28 This way, each tokentype can create its own API without the need to change
29 the core API.
30
31 The TiQR Token uses this API to implement its special functionalities. See
32 :ref:`code_tiqr_token`.
33 """
34 from flask import (Blueprint,
35 request)
36 from .lib.utils import getParam
37 from ..lib.log import log_with
38 from flask import g, jsonify, current_app
39 import logging
40 from privacyidea.api.lib.utils import get_all_params
41 from privacyidea.lib.policy import PolicyClass
42 from privacyidea.lib.audit import getAudit
43 from privacyidea.lib.config import (get_token_class, get_from_config,
44 SYSCONF, ensure_no_config_object)
45 from privacyidea.lib.user import get_user_from_param
46 from privacyidea.lib.utils import get_client_ip
47 import json
48
49 log = logging.getLogger(__name__)
50
51 ttype_blueprint = Blueprint('ttype_blueprint', __name__)
52
53
54 @ttype_blueprint.before_request
55 def before_request():
56 """
57 This is executed before the request
58 """
59 ensure_no_config_object()
60 request.all_data = get_all_params(request)
61 privacyidea_server = current_app.config.get("PI_AUDIT_SERVERNAME") or \
62 request.host
63 # Create a policy_object, that reads the database audit settings
64 # and contains the complete policy definition during the request.
65 # This audit_object can be used in the postpolicy and prepolicy and it
66 # can be passed to the innerpolicies.
67 g.policy_object = PolicyClass()
68 g.audit_object = getAudit(current_app.config)
69 # access_route contains the ip adresses of all clients, hops and proxies.
70 g.client_ip = get_client_ip(request,
71 get_from_config(SYSCONF.OVERRIDECLIENT))
72 g.serial = getParam(request.all_data, "serial") or None
73 g.audit_object.log({"success": False,
74 "action_detail": "",
75 "client": g.client_ip,
76 "client_user_agent": request.user_agent.browser,
77 "privacyidea_server": privacyidea_server,
78 "action": "{0!s} {1!s}".format(request.method, request.url_rule),
79 "info": ""})
80
81
82 @ttype_blueprint.route('/<ttype>', methods=['POST', 'GET'])
83 @log_with(log)
84 def token(ttype=None):
85 """
86 This is a special token function. Each token type can define an
87 additional API call, that does not need authentication on the REST API
88 level.
89
90 :return: Token Type dependent
91 """
92 tokenc = get_token_class(ttype)
93 res = tokenc.api_endpoint(request, g)
94 serial = getParam(request.all_data, "serial")
95 user = get_user_from_param(request.all_data)
96 g.audit_object.log({"success": 1,
97 "user": user.login,
98 "realm": user.realm,
99 "serial": serial,
100 "token_type": ttype})
101 if res[0] == "json":
102 return jsonify(res[1])
103 elif res[0] in ["html", "plain"]:
104 return current_app.response_class(res[1], mimetype="text/{0!s}".format(res[0]))
105 elif len(res) == 2:
106 return current_app.response_class(json.dumps(res[1]),
107 mimetype="application/{0!s}".format(res[0]))
108 else:
109 return current_app.response_class(res[1], mimetype="application/octet-binary",
110 headers=res[2])
111
[end of privacyidea/api/ttype.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/privacyidea/api/ttype.py b/privacyidea/api/ttype.py
--- a/privacyidea/api/ttype.py
+++ b/privacyidea/api/ttype.py
@@ -38,6 +38,7 @@
from flask import g, jsonify, current_app
import logging
from privacyidea.api.lib.utils import get_all_params
+from privacyidea.lib.error import ParameterError
from privacyidea.lib.policy import PolicyClass
from privacyidea.lib.audit import getAudit
from privacyidea.lib.config import (get_token_class, get_from_config,
@@ -90,6 +91,9 @@
:return: Token Type dependent
"""
tokenc = get_token_class(ttype)
+ if tokenc is None:
+ log.error(u"Invalid tokentype provided. ttype: {}".format(ttype.lower()))
+ raise ParameterError(u"Invalid tokentype provided. ttype: {}".format(ttype.lower()))
res = tokenc.api_endpoint(request, g)
serial = getParam(request.all_data, "serial")
user = get_user_from_param(request.all_data)
|
{"golden_diff": "diff --git a/privacyidea/api/ttype.py b/privacyidea/api/ttype.py\n--- a/privacyidea/api/ttype.py\n+++ b/privacyidea/api/ttype.py\n@@ -38,6 +38,7 @@\n from flask import g, jsonify, current_app\n import logging\n from privacyidea.api.lib.utils import get_all_params\n+from privacyidea.lib.error import ParameterError\n from privacyidea.lib.policy import PolicyClass\n from privacyidea.lib.audit import getAudit\n from privacyidea.lib.config import (get_token_class, get_from_config,\n@@ -90,6 +91,9 @@\n :return: Token Type dependent\n \"\"\"\n tokenc = get_token_class(ttype)\n+ if tokenc is None:\n+ log.error(u\"Invalid tokentype provided. ttype: {}\".format(ttype.lower()))\n+ raise ParameterError(u\"Invalid tokentype provided. ttype: {}\".format(ttype.lower()))\n res = tokenc.api_endpoint(request, g)\n serial = getParam(request.all_data, \"serial\")\n user = get_user_from_param(request.all_data)\n", "issue": "Bad error handling in /ttype/<type> endpoint\nWe observed a bad error handling when requesting the `/ttype/<type>` endpoint.\r\n\r\nIn specific, we faced the following error:\r\n```\r\n[ERROR][privacyidea.app:1892] Exception on /ttype/push\"}. [GET]\r\n...\r\nAttributeError: 'NoneType' object has no attribute 'api_endpoint'\r\n```\r\nActually, we could fix the problem but it would be nice to fix this upstream right away.\r\n\r\n### Top-level intent\r\n\r\nAccess the `/ttype/<type>` endpoint.\r\n\r\n### Steps to reproduce\r\n\r\n1. Query `/ttype/test` endpoint\r\n2. There will be a NoneType error in the logs.\r\n\r\n### Expected outcome\r\n\r\nProper error handling\r\n\r\n### Actual outcome\r\n\r\nNoneType exception.\r\n\r\n### Configuration\r\n\r\n* **privacyIDEA version**: v3.6.3\r\n* **Installation method**: (from Ubuntu packages, github, PyPI, ...)\r\n* **Python version**: 3\r\n* **Operating system**: linux\r\n* **Webserver**: apache\r\n\r\n\r\n### Log file\r\n\r\n**Set PI_LOGLEVEL = logging.DEBUG in pi.cfg and take a look at the privacyidea.log!**\r\n**If appropriate, attach the log file or paste relevant portions.**\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# http://www.privacyidea.org\n# (c) Cornelius K\u00f6lbel, privacyidea.org\n#\n# 2015-09-01 Cornelius K\u00f6lbel, <[email protected]>\n# Initial writeup\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThis API endpoint is a generic endpoint that can be used by any token\ntype.\n\nThe tokentype needs to implement a classmethod *api_endpoint* and can then be\ncalled by /ttype/<tokentype>.\nThis way, each tokentype can create its own API without the need to change\nthe core API.\n\nThe TiQR Token uses this API to implement its special functionalities. See\n:ref:`code_tiqr_token`.\n\"\"\"\nfrom flask import (Blueprint,\n request)\nfrom .lib.utils import getParam\nfrom ..lib.log import log_with\nfrom flask import g, jsonify, current_app\nimport logging\nfrom privacyidea.api.lib.utils import get_all_params\nfrom privacyidea.lib.policy import PolicyClass\nfrom privacyidea.lib.audit import getAudit\nfrom privacyidea.lib.config import (get_token_class, get_from_config,\n SYSCONF, ensure_no_config_object)\nfrom privacyidea.lib.user import get_user_from_param\nfrom privacyidea.lib.utils import get_client_ip\nimport json\n\nlog = logging.getLogger(__name__)\n\nttype_blueprint = Blueprint('ttype_blueprint', __name__)\n\n\n@ttype_blueprint.before_request\ndef before_request():\n \"\"\"\n This is executed before the request\n \"\"\"\n ensure_no_config_object()\n request.all_data = get_all_params(request)\n privacyidea_server = current_app.config.get(\"PI_AUDIT_SERVERNAME\") or \\\n request.host\n # Create a policy_object, that reads the database audit settings\n # and contains the complete policy definition during the request.\n # This audit_object can be used in the postpolicy and prepolicy and it\n # can be passed to the innerpolicies.\n g.policy_object = PolicyClass()\n g.audit_object = getAudit(current_app.config)\n # access_route contains the ip adresses of all clients, hops and proxies.\n g.client_ip = get_client_ip(request,\n get_from_config(SYSCONF.OVERRIDECLIENT))\n g.serial = getParam(request.all_data, \"serial\") or None\n g.audit_object.log({\"success\": False,\n \"action_detail\": \"\",\n \"client\": g.client_ip,\n \"client_user_agent\": request.user_agent.browser,\n \"privacyidea_server\": privacyidea_server,\n \"action\": \"{0!s} {1!s}\".format(request.method, request.url_rule),\n \"info\": \"\"})\n\n\n@ttype_blueprint.route('/<ttype>', methods=['POST', 'GET'])\n@log_with(log)\ndef token(ttype=None):\n \"\"\"\n This is a special token function. Each token type can define an\n additional API call, that does not need authentication on the REST API\n level.\n\n :return: Token Type dependent\n \"\"\"\n tokenc = get_token_class(ttype)\n res = tokenc.api_endpoint(request, g)\n serial = getParam(request.all_data, \"serial\")\n user = get_user_from_param(request.all_data)\n g.audit_object.log({\"success\": 1,\n \"user\": user.login,\n \"realm\": user.realm,\n \"serial\": serial,\n \"token_type\": ttype})\n if res[0] == \"json\":\n return jsonify(res[1])\n elif res[0] in [\"html\", \"plain\"]:\n return current_app.response_class(res[1], mimetype=\"text/{0!s}\".format(res[0]))\n elif len(res) == 2:\n return current_app.response_class(json.dumps(res[1]),\n mimetype=\"application/{0!s}\".format(res[0]))\n else:\n return current_app.response_class(res[1], mimetype=\"application/octet-binary\",\n headers=res[2])\n", "path": "privacyidea/api/ttype.py"}]}
| 2,006 | 237 |
gh_patches_debug_6243
|
rasdani/github-patches
|
git_diff
|
lightly-ai__lightly-216
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make transform settable in Lightly Dataset
Currently if one has created a Lightly Dataset and wants to apply transforms at a later stage, we have to do it as follows:
```
dataset = data.LightlyDataset(root="./", name='CIFAR10', download=True)
...
use dataset to train SimCLR model for example
...
test_transforms = torchvision.transforms.ToTensor()
dataset.dataset.transform = test_transforms
```
We should extend the Dataset wrapper to directly support transforms.
</issue>
<code>
[start of lightly/data/dataset.py]
1 """ Lightly Dataset """
2
3 # Copyright (c) 2020. Lightly AG and its affiliates.
4 # All Rights Reserved
5
6 import os
7 import shutil
8 from PIL import Image
9 from typing import List, Union
10
11 import torch.utils.data as data
12 import torchvision.datasets as datasets
13
14 from lightly.data._helpers import _load_dataset
15 from lightly.data._helpers import DatasetFolder
16 from lightly.data._video import VideoDataset
17
18
19 def _get_filename_by_index(dataset, index):
20 """Default function which maps the index of an image to a filename.
21
22 """
23 if isinstance(dataset, datasets.ImageFolder):
24 # filename is the path of the image relative to the dataset root
25 full_path = dataset.imgs[index][0]
26 return os.path.relpath(full_path, dataset.root)
27 elif isinstance(dataset, DatasetFolder):
28 # filename is the path of the image relative to the dataset root
29 full_path = dataset.samples[index][0]
30 return os.path.relpath(full_path, dataset.root)
31 elif isinstance(dataset, VideoDataset):
32 # filename is constructed by the video dataset
33 return dataset.get_filename(index)
34 else:
35 # dummy to prevent crashes
36 return str(index)
37
38
39 def _ensure_dir(path):
40 """Makes sure that the directory at path exists.
41
42 """
43 dirname = os.path.dirname(path)
44 os.makedirs(dirname, exist_ok=True)
45
46
47 def _copy_image(input_dir, output_dir, filename):
48 """Copies an image from the input directory to the output directory.
49
50 """
51 source = os.path.join(input_dir, filename)
52 target = os.path.join(output_dir, filename)
53 _ensure_dir(target)
54 shutil.copyfile(source, target)
55
56 def _save_image(image, output_dir, filename, fmt):
57 """Saves an image in the output directory.
58
59 """
60 target = os.path.join(output_dir, filename)
61 _ensure_dir(target)
62 try:
63 # try to save the image with the specified format or
64 # derive the format from the filename (if format=None)
65 image.save(target, format=fmt)
66 except ValueError:
67 # could not determine format from filename
68 image.save(target, format='png')
69
70
71 def _dump_image(dataset, output_dir, filename, index, fmt):
72 """Saves a single image to the output directory.
73
74 Will copy the image from the input directory to the output directory
75 if possible. If not (e.g. for VideoDatasets), will load the image and
76 then save it to the output directory with the specified format.
77
78 """
79
80 if isinstance(dataset, datasets.ImageFolder):
81 # can safely copy the image from the input to the output directory
82 _copy_image(dataset.root, output_dir, filename)
83 elif isinstance(dataset, DatasetFolder):
84 # can safely copy the image from the input to the output directory
85 _copy_image(dataset.root, output_dir, filename)
86 else:
87 # need to load the image and save it to the output directory
88 image, _ = dataset[index]
89 _save_image(image, output_dir, filename, fmt)
90
91
92 class LightlyDataset:
93 """Provides a uniform data interface for the embedding models.
94
95 Should be used for all models and functions in the lightly package.
96 Returns a tuple (sample, target, fname) when accessed using __getitem__
97
98 Can either be used to load a dataset offered by torchvision (e.g. cifar10)
99 or to load a custom dataset from an input folder.
100
101 Args:
102 input_dir:
103 Path to directory holding the images to load.
104 transform:
105 Image transforms (as in torchvision).
106 index_to_filename:
107 Function which takes the dataset and index as input and returns
108 the filename of the file at the index. If None, uses default.
109
110 Examples:
111 >>> # load cifar10 from a local folder
112 >>> import lightly.data as data
113 >>> dataset = data.LightlyDataset(input_dir='path/to/cifar10/')
114 >>> sample, target, fname = dataset[0]
115
116 """
117
118 def __init__(self,
119 input_dir: str,
120 transform=None,
121 index_to_filename=None):
122
123 # can pass input_dir=None to create an "empty" dataset
124 self.input_dir = input_dir
125 if self.input_dir is not None:
126 self.dataset = _load_dataset(self.input_dir, transform)
127
128 # initialize function to get filename of image
129 self.index_to_filename = _get_filename_by_index
130 if index_to_filename is not None:
131 self.index_to_filename = index_to_filename
132
133 @classmethod
134 def from_torch_dataset(cls,
135 dataset,
136 transform=None,
137 index_to_filename=None):
138 """Builds a LightlyDataset from a PyTorch (or torchvision) dataset.
139
140 Args:
141 dataset:
142 PyTorch/torchvision dataset.
143 transform:
144 Image transforms (as in torchvision).
145 index_to_filename:
146 Function which takes the dataset and index as input and returns
147 the filename of the file at the index. If None, uses default.
148
149 Returns:
150 A LightlyDataset object.
151
152 Examples:
153 >>> # load cifar10 from torchvision
154 >>> import torchvision
155 >>> import lightly.data as data
156 >>> base = torchvision.datasets.CIFAR10(root='./')
157 >>> dataset = data.LightlyDataset.from_torch_dataset(base)
158
159 """
160 # create an "empty" dataset object
161 dataset_obj = cls(
162 None,
163 transform=transform,
164 index_to_filename=index_to_filename
165 )
166
167 # populate it with the torch dataset
168 dataset_obj.dataset = dataset
169 return dataset_obj
170
171 def __getitem__(self, index: int):
172 """Returns (sample, target, fname) of item at index.
173
174 Args:
175 index:
176 Index of the queried item.
177
178 Returns:
179 The image, target, and filename of the item at index.
180
181 """
182 fname = self.index_to_filename(self.dataset, index)
183 sample, target = self.dataset.__getitem__(index)
184
185 return sample, target, fname
186
187
188 def __len__(self):
189 """Returns the length of the dataset.
190
191 """
192 return len(self.dataset)
193
194 def __add__(self, other):
195 """Adds another item to the dataset.
196
197 """
198 raise NotImplementedError()
199
200 def get_filenames(self) -> List[str]:
201 """Returns all filenames in the dataset.
202
203 """
204 list_of_filenames = []
205 for index in range(len(self)):
206 fname = self.index_to_filename(self.dataset, index)
207 list_of_filenames.append(fname)
208 return list_of_filenames
209
210 def dump(self,
211 output_dir: str,
212 filenames: Union[List[str], None] = None,
213 format: Union[str, None] = None):
214 """Saves images in the dataset to the output directory.
215
216 Will copy the images from the input directory to the output directory
217 if possible. If not (e.g. for VideoDatasets), will load the images and
218 then save them to the output directory with the specified format.
219
220 Args:
221 output_dir:
222 Output directory where the image is stored.
223 filenames:
224 Filenames of the images to store. If None, stores all images.
225 format:
226 Image format.
227
228 """
229
230 if self.dataset.transform is not None:
231 raise RuntimeError('Cannot dump dataset which applies transforms!')
232
233 # create directory if it doesn't exist yet
234 os.makedirs(output_dir, exist_ok=True)
235
236 # dump all the files if no filenames were passed, otherwise dump only
237 # the ones referenced in the list
238 if filenames is None:
239 indices = [i for i in range(self.__len__())]
240 filenames = self.get_filenames()
241 else:
242 indices = []
243 all_filenames = self.get_filenames()
244 for i in range(len(filenames)):
245 if filenames[i] in all_filenames:
246 indices.append(i)
247
248 # dump images
249 for i, filename in zip(indices, filenames):
250 _dump_image(self.dataset, output_dir, filename, i, fmt=format)
251
[end of lightly/data/dataset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lightly/data/dataset.py b/lightly/data/dataset.py
--- a/lightly/data/dataset.py
+++ b/lightly/data/dataset.py
@@ -248,3 +248,17 @@
# dump images
for i, filename in zip(indices, filenames):
_dump_image(self.dataset, output_dir, filename, i, fmt=format)
+
+ @property
+ def transform(self):
+ """Getter for the transform of the dataset.
+
+ """
+ return self.dataset.transform
+
+ @transform.setter
+ def transform(self, t):
+ """Setter for the transform of the dataset.
+
+ """
+ self.dataset.transform = t
|
{"golden_diff": "diff --git a/lightly/data/dataset.py b/lightly/data/dataset.py\n--- a/lightly/data/dataset.py\n+++ b/lightly/data/dataset.py\n@@ -248,3 +248,17 @@\n # dump images\n for i, filename in zip(indices, filenames):\n _dump_image(self.dataset, output_dir, filename, i, fmt=format)\n+\n+ @property\n+ def transform(self):\n+ \"\"\"Getter for the transform of the dataset.\n+\n+ \"\"\"\n+ return self.dataset.transform\n+\n+ @transform.setter\n+ def transform(self, t):\n+ \"\"\"Setter for the transform of the dataset.\n+\n+ \"\"\"\n+ self.dataset.transform = t\n", "issue": "Make transform settable in Lightly Dataset \nCurrently if one has created a Lightly Dataset and wants to apply transforms at a later stage, we have to do it as follows:\r\n```\r\ndataset = data.LightlyDataset(root=\"./\", name='CIFAR10', download=True)\r\n...\r\nuse dataset to train SimCLR model for example\r\n...\r\ntest_transforms = torchvision.transforms.ToTensor()\r\ndataset.dataset.transform = test_transforms\r\n```\r\n\r\nWe should extend the Dataset wrapper to directly support transforms.\n", "before_files": [{"content": "\"\"\" Lightly Dataset \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport os\nimport shutil\nfrom PIL import Image\nfrom typing import List, Union\n\nimport torch.utils.data as data\nimport torchvision.datasets as datasets\n\nfrom lightly.data._helpers import _load_dataset\nfrom lightly.data._helpers import DatasetFolder\nfrom lightly.data._video import VideoDataset\n\n\ndef _get_filename_by_index(dataset, index):\n \"\"\"Default function which maps the index of an image to a filename.\n\n \"\"\"\n if isinstance(dataset, datasets.ImageFolder):\n # filename is the path of the image relative to the dataset root\n full_path = dataset.imgs[index][0]\n return os.path.relpath(full_path, dataset.root)\n elif isinstance(dataset, DatasetFolder):\n # filename is the path of the image relative to the dataset root\n full_path = dataset.samples[index][0]\n return os.path.relpath(full_path, dataset.root)\n elif isinstance(dataset, VideoDataset):\n # filename is constructed by the video dataset\n return dataset.get_filename(index)\n else:\n # dummy to prevent crashes\n return str(index)\n\n\ndef _ensure_dir(path):\n \"\"\"Makes sure that the directory at path exists.\n\n \"\"\"\n dirname = os.path.dirname(path)\n os.makedirs(dirname, exist_ok=True)\n\n\ndef _copy_image(input_dir, output_dir, filename):\n \"\"\"Copies an image from the input directory to the output directory.\n\n \"\"\"\n source = os.path.join(input_dir, filename)\n target = os.path.join(output_dir, filename)\n _ensure_dir(target)\n shutil.copyfile(source, target)\n\ndef _save_image(image, output_dir, filename, fmt):\n \"\"\"Saves an image in the output directory.\n\n \"\"\"\n target = os.path.join(output_dir, filename)\n _ensure_dir(target)\n try:\n # try to save the image with the specified format or\n # derive the format from the filename (if format=None)\n image.save(target, format=fmt)\n except ValueError:\n # could not determine format from filename\n image.save(target, format='png')\n\n\ndef _dump_image(dataset, output_dir, filename, index, fmt):\n \"\"\"Saves a single image to the output directory.\n\n Will copy the image from the input directory to the output directory\n if possible. If not (e.g. for VideoDatasets), will load the image and\n then save it to the output directory with the specified format.\n\n \"\"\"\n\n if isinstance(dataset, datasets.ImageFolder):\n # can safely copy the image from the input to the output directory\n _copy_image(dataset.root, output_dir, filename)\n elif isinstance(dataset, DatasetFolder):\n # can safely copy the image from the input to the output directory\n _copy_image(dataset.root, output_dir, filename)\n else:\n # need to load the image and save it to the output directory\n image, _ = dataset[index]\n _save_image(image, output_dir, filename, fmt)\n\n\nclass LightlyDataset:\n \"\"\"Provides a uniform data interface for the embedding models.\n\n Should be used for all models and functions in the lightly package.\n Returns a tuple (sample, target, fname) when accessed using __getitem__\n\n Can either be used to load a dataset offered by torchvision (e.g. cifar10)\n or to load a custom dataset from an input folder.\n\n Args:\n input_dir:\n Path to directory holding the images to load.\n transform:\n Image transforms (as in torchvision).\n index_to_filename:\n Function which takes the dataset and index as input and returns\n the filename of the file at the index. If None, uses default.\n\n Examples:\n >>> # load cifar10 from a local folder\n >>> import lightly.data as data\n >>> dataset = data.LightlyDataset(input_dir='path/to/cifar10/')\n >>> sample, target, fname = dataset[0]\n\n \"\"\"\n\n def __init__(self,\n input_dir: str,\n transform=None,\n index_to_filename=None):\n\n # can pass input_dir=None to create an \"empty\" dataset\n self.input_dir = input_dir\n if self.input_dir is not None:\n self.dataset = _load_dataset(self.input_dir, transform)\n\n # initialize function to get filename of image\n self.index_to_filename = _get_filename_by_index\n if index_to_filename is not None:\n self.index_to_filename = index_to_filename\n\n @classmethod\n def from_torch_dataset(cls,\n dataset,\n transform=None,\n index_to_filename=None):\n \"\"\"Builds a LightlyDataset from a PyTorch (or torchvision) dataset.\n\n Args:\n dataset:\n PyTorch/torchvision dataset.\n transform:\n Image transforms (as in torchvision).\n index_to_filename:\n Function which takes the dataset and index as input and returns\n the filename of the file at the index. If None, uses default.\n\n Returns:\n A LightlyDataset object.\n\n Examples:\n >>> # load cifar10 from torchvision\n >>> import torchvision\n >>> import lightly.data as data\n >>> base = torchvision.datasets.CIFAR10(root='./')\n >>> dataset = data.LightlyDataset.from_torch_dataset(base)\n\n \"\"\"\n # create an \"empty\" dataset object\n dataset_obj = cls(\n None,\n transform=transform,\n index_to_filename=index_to_filename\n )\n\n # populate it with the torch dataset\n dataset_obj.dataset = dataset\n return dataset_obj\n\n def __getitem__(self, index: int):\n \"\"\"Returns (sample, target, fname) of item at index.\n\n Args:\n index:\n Index of the queried item.\n\n Returns:\n The image, target, and filename of the item at index.\n\n \"\"\"\n fname = self.index_to_filename(self.dataset, index)\n sample, target = self.dataset.__getitem__(index)\n \n return sample, target, fname\n\n\n def __len__(self):\n \"\"\"Returns the length of the dataset.\n\n \"\"\"\n return len(self.dataset)\n\n def __add__(self, other):\n \"\"\"Adds another item to the dataset.\n\n \"\"\"\n raise NotImplementedError()\n\n def get_filenames(self) -> List[str]:\n \"\"\"Returns all filenames in the dataset.\n\n \"\"\"\n list_of_filenames = []\n for index in range(len(self)):\n fname = self.index_to_filename(self.dataset, index)\n list_of_filenames.append(fname)\n return list_of_filenames\n\n def dump(self,\n output_dir: str,\n filenames: Union[List[str], None] = None,\n format: Union[str, None] = None):\n \"\"\"Saves images in the dataset to the output directory.\n\n Will copy the images from the input directory to the output directory\n if possible. If not (e.g. for VideoDatasets), will load the images and\n then save them to the output directory with the specified format.\n\n Args:\n output_dir:\n Output directory where the image is stored.\n filenames:\n Filenames of the images to store. If None, stores all images.\n format:\n Image format.\n\n \"\"\"\n\n if self.dataset.transform is not None:\n raise RuntimeError('Cannot dump dataset which applies transforms!')\n\n # create directory if it doesn't exist yet\n os.makedirs(output_dir, exist_ok=True)\n\n # dump all the files if no filenames were passed, otherwise dump only\n # the ones referenced in the list\n if filenames is None:\n indices = [i for i in range(self.__len__())]\n filenames = self.get_filenames()\n else:\n indices = []\n all_filenames = self.get_filenames()\n for i in range(len(filenames)):\n if filenames[i] in all_filenames:\n indices.append(i)\n\n # dump images\n for i, filename in zip(indices, filenames):\n _dump_image(self.dataset, output_dir, filename, i, fmt=format)\n", "path": "lightly/data/dataset.py"}]}
| 3,010 | 156 |
gh_patches_debug_22670
|
rasdani/github-patches
|
git_diff
|
NVIDIA__NVFlare-324
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
log_exception error
This error happens during log_exception
```
nvflare/private/event.py", line 62, in fire_event
h.log_exception(
TypeError: log_exception() got an unexpected keyword argument 'local_logging'
FL client execution exception: log_exception() got an unexpected keyword argument 'local_logging'
2022-03-17 18:29:04,424 - ProcessExecutor - INFO - process finished with execution code: 0
```
</issue>
<code>
[start of nvflare/widgets/fed_event.py]
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import threading
16 import time
17
18 from nvflare.apis.client_engine_spec import ClientEngineSpec
19 from nvflare.apis.event_type import EventType
20 from nvflare.apis.fl_constant import EventScope, FedEventHeader, FLContextKey, ReservedKey, ReturnCode
21 from nvflare.apis.fl_context import FLContext
22 from nvflare.apis.server_engine_spec import ServerEngineSpec
23 from nvflare.apis.shareable import Shareable, make_reply
24 from nvflare.widgets.widget import Widget
25
26 FED_EVENT_TOPIC = "fed.event"
27
28
29 class FedEventRunner(Widget):
30 def __init__(self, topic=FED_EVENT_TOPIC):
31 """Init FedEventRunner.
32
33 The FedEventRunner handles posting and receiving of fed events.
34 The system will do its best to fire off all events in the queue before shutdown
35 using the ABOUT_TO_END_RUN event and a grace period during END_RUN.
36
37 Args:
38 topic: the fed event topic to be handled. Defaults to 'fed.event'
39 """
40 Widget.__init__(self)
41 self.topic = topic
42 self.abort_signal = None
43 self.asked_to_stop = False
44 self.asked_to_flush = False
45 self.regular_interval = 0.001
46 self.grace_period = 2
47 self.flush_wait = 2
48 self.engine = None
49 self.last_timestamps = {} # client name => last_timestamp
50 self.in_events = []
51 self.in_lock = threading.Lock()
52 self.poster = threading.Thread(target=self._post, args=())
53
54 def handle_event(self, event_type: str, fl_ctx: FLContext):
55 if event_type == EventType.START_RUN:
56 self.engine = fl_ctx.get_engine()
57 self.engine.register_aux_message_handler(topic=self.topic, message_handle_func=self._receive)
58 self.abort_signal = fl_ctx.get_run_abort_signal()
59 self.asked_to_stop = False
60 self.asked_to_flush = False
61 self.poster.start()
62 elif event_type == EventType.ABOUT_TO_END_RUN:
63 self.asked_to_flush = True
64 # delay self.flush_wait seconds so
65 # _post can empty the queue before
66 # END_RUN is fired
67 time.sleep(self.flush_wait)
68 elif event_type == EventType.END_RUN:
69 self.asked_to_stop = True
70 if self.poster.is_alive():
71 self.poster.join()
72 else:
73 # handle outgoing fed events
74 event_scope = fl_ctx.get_prop(key=FLContextKey.EVENT_SCOPE, default=EventScope.LOCAL)
75 if event_scope != EventScope.FEDERATION:
76 return
77
78 event_data = fl_ctx.get_prop(FLContextKey.EVENT_DATA, None)
79 if not isinstance(event_data, Shareable):
80 self.log_error(fl_ctx, "bad fed event: expect data to be Shareable but got {}".format(type(event_data)))
81 return
82
83 direction = event_data.get_header(FedEventHeader.DIRECTION, "out")
84 if direction != "out":
85 # ignore incoming events
86 return
87
88 event_data.set_header(FedEventHeader.EVENT_TYPE, event_type)
89 event_data.set_header(FedEventHeader.ORIGIN, fl_ctx.get_identity_name())
90 event_data.set_header(FedEventHeader.TIMESTAMP, time.time())
91
92 targets = event_data.get_header(FedEventHeader.TARGETS, None)
93 self.fire_and_forget_request(request=event_data, fl_ctx=fl_ctx, targets=targets)
94
95 def fire_and_forget_request(self, request: Shareable, fl_ctx: FLContext, targets=None):
96 pass
97
98 def _receive(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable:
99 peer_name = request.get_peer_prop(ReservedKey.IDENTITY_NAME, None)
100 if not peer_name:
101 self.log_error(fl_ctx, "missing identity name of the data sender")
102 return make_reply(ReturnCode.MISSING_PEER_CONTEXT)
103
104 timestamp = request.get_header(FedEventHeader.TIMESTAMP, None)
105 if timestamp is None:
106 self.log_error(fl_ctx, "missing timestamp in incoming fed event")
107 return make_reply(ReturnCode.BAD_REQUEST_DATA)
108
109 event_type = request.get_header(FedEventHeader.EVENT_TYPE, None)
110 if event_type is None:
111 self.log_error(fl_ctx, "missing event_type in incoming fed event")
112 return make_reply(ReturnCode.BAD_REQUEST_DATA)
113
114 with self.in_lock:
115 last_timestamp = self.last_timestamps.get(peer_name, None)
116 if last_timestamp is None or timestamp > last_timestamp:
117 # we only keep new items, in case the peer somehow sent old items
118 request.set_header(FedEventHeader.DIRECTION, "in")
119 self.in_events.append(request)
120 self.last_timestamps[peer_name] = timestamp
121
122 # NOTE: we do not fire event here since event process could take time.
123 # Instead we simply add the package to the queue and return quickly.
124 # The posting of events will be handled in the poster thread
125 return make_reply(ReturnCode.OK)
126
127 def _post(self):
128 """Post an event.
129
130 During ABOUT_TO_END_RUN, sleep_time is 0 and system will flush
131 in_events by firing events without delay.
132
133 During END_RUN, system will wait for self.grace_period, even the queue is empty,
134 so any new item can be processed.
135
136 However, since the system does not guarantee the receiving side of _post is still
137 alive, we catch the exception and show warning messages to users if events can not
138 be handled by receiving side.
139 """
140 sleep_time = self.regular_interval
141 countdown = self.grace_period
142 while True:
143 time.sleep(sleep_time)
144 if self.abort_signal.triggered:
145 break
146 n = len(self.in_events)
147 if n > 0:
148 if self.asked_to_flush:
149 sleep_time = 0
150 else:
151 sleep_time = self.regular_interval
152 with self.in_lock:
153 event_to_post = self.in_events.pop(0)
154 elif self.asked_to_stop:
155 # the queue is empty and we are asked to stop. Give
156 # it self.grace_period seconds to wait, then
157 # exit.
158 if countdown < 0:
159 break
160 else:
161 countdown = countdown - 1
162 time.sleep(1)
163 continue
164 else:
165 sleep_time = min(sleep_time * 2, 1)
166 continue
167
168 with self.engine.new_context() as fl_ctx:
169 if self.asked_to_stop:
170 self.log_warning(fl_ctx, f"{n} items remained in in_events. Will stop when it reaches 0.")
171 fl_ctx.set_prop(key=FLContextKey.EVENT_DATA, value=event_to_post, private=True, sticky=False)
172 fl_ctx.set_prop(key=FLContextKey.EVENT_SCOPE, value=EventScope.FEDERATION, private=True, sticky=False)
173
174 event_type = event_to_post.get_header(FedEventHeader.EVENT_TYPE)
175 try:
176 self.engine.fire_event(event_type=event_type, fl_ctx=fl_ctx)
177 except BaseException as e:
178 if self.asked_to_stop:
179 self.log_warning(fl_ctx, f"event {event_to_post} fired unsuccessfully during END_RUN")
180 else:
181 raise e
182
183
184 class ServerFedEventRunner(FedEventRunner):
185 def __init__(self, topic=FED_EVENT_TOPIC):
186 """Init ServerFedEventRunner."""
187 FedEventRunner.__init__(self, topic)
188
189 def fire_and_forget_request(self, request: Shareable, fl_ctx: FLContext, targets=None):
190 if not isinstance(self.engine, ServerEngineSpec):
191 raise TypeError("self.engine must be ServerEngineSpec but got {}".format(type(self.engine)))
192 self.engine.fire_and_forget_aux_request(
193 topic=self.topic,
194 targets=targets,
195 request=request,
196 fl_ctx=fl_ctx,
197 )
198
199
200 class ClientFedEventRunner(FedEventRunner):
201 def __init__(self, topic=FED_EVENT_TOPIC):
202 """Init ClientFedEventRunner."""
203 FedEventRunner.__init__(self, topic)
204 self.ready = False
205
206 def handle_event(self, event_type: str, fl_ctx: FLContext):
207 super().handle_event(event_type, fl_ctx)
208
209 if event_type == EventType.START_RUN:
210 self.ready = True
211
212 def fire_and_forget_request(self, request: Shareable, fl_ctx: FLContext, targets=None):
213 if not self.ready:
214 self.log_warning(fl_ctx, "Engine in not ready, skip the aux event firing.", local_logging=True)
215 return
216
217 if not isinstance(self.engine, ClientEngineSpec):
218 raise TypeError("self.engine must be ClientEngineSpec but got {}".format(type(self.engine)))
219 self.engine.fire_and_forget_aux_request(topic=self.topic, request=request, fl_ctx=fl_ctx)
220
[end of nvflare/widgets/fed_event.py]
[start of nvflare/private/event.py]
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import uuid
16
17 from nvflare.apis.fl_component import FLComponent
18 from nvflare.apis.fl_constant import EventScope, FLContextKey
19 from nvflare.apis.fl_context import FLContext
20
21 # do not use underscore as key name; otherwise it cannot be removed from ctx
22 _KEY_EVENT_DEPTH = "###event_depth"
23 _MAX_EVENT_DEPTH = 20
24
25
26 def fire_event(event: str, handlers: list, ctx: FLContext):
27 """Fires the specified event and invokes the list of handlers.
28
29 Args:
30 event: the event to be fired
31 handlers: handlers to be invoked
32 ctx: context for cross-component data sharing
33
34 Returns: N/A
35
36 """
37 event_id = str(uuid.uuid4())
38 event_data = ctx.get_prop(FLContextKey.EVENT_DATA, None)
39 event_origin = ctx.get_prop(FLContextKey.EVENT_ORIGIN, None)
40 event_scope = ctx.get_prop(FLContextKey.EVENT_SCOPE, EventScope.LOCAL)
41
42 depth = ctx.get_prop(_KEY_EVENT_DEPTH, 0)
43 if depth > _MAX_EVENT_DEPTH:
44 # too many recursive event calls
45 raise RuntimeError("Recursive event calls too deep (>{})".format(_MAX_EVENT_DEPTH))
46
47 ctx.set_prop(key=_KEY_EVENT_DEPTH, value=depth + 1, private=True, sticky=False)
48
49 if handlers:
50 for h in handlers:
51 if not isinstance(h, FLComponent):
52 raise TypeError("handler must be FLComponent but got {}".format(type(h)))
53 try:
54 # since events could be recursive (a handler fires another event) on the same fl_ctx,
55 # we need to reset these key values into the fl_ctx
56 ctx.set_prop(key=FLContextKey.EVENT_ID, value=event_id, private=True, sticky=False)
57 ctx.set_prop(key=FLContextKey.EVENT_DATA, value=event_data, private=True, sticky=False)
58 ctx.set_prop(key=FLContextKey.EVENT_ORIGIN, value=event_origin, private=True, sticky=False)
59 ctx.set_prop(key=FLContextKey.EVENT_SCOPE, value=event_scope, private=True, sticky=False)
60 h.handle_event(event, ctx)
61 except:
62 h.log_exception(
63 ctx, 'exception when handling event "{}"'.format(event), fire_event=False, local_logging=True
64 )
65
66 ctx.set_prop(key=_KEY_EVENT_DEPTH, value=depth, private=True, sticky=False)
67
[end of nvflare/private/event.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nvflare/private/event.py b/nvflare/private/event.py
--- a/nvflare/private/event.py
+++ b/nvflare/private/event.py
@@ -59,8 +59,6 @@
ctx.set_prop(key=FLContextKey.EVENT_SCOPE, value=event_scope, private=True, sticky=False)
h.handle_event(event, ctx)
except:
- h.log_exception(
- ctx, 'exception when handling event "{}"'.format(event), fire_event=False, local_logging=True
- )
+ h.log_exception(ctx, 'exception when handling event "{}"'.format(event), fire_event=False)
ctx.set_prop(key=_KEY_EVENT_DEPTH, value=depth, private=True, sticky=False)
diff --git a/nvflare/widgets/fed_event.py b/nvflare/widgets/fed_event.py
--- a/nvflare/widgets/fed_event.py
+++ b/nvflare/widgets/fed_event.py
@@ -211,7 +211,7 @@
def fire_and_forget_request(self, request: Shareable, fl_ctx: FLContext, targets=None):
if not self.ready:
- self.log_warning(fl_ctx, "Engine in not ready, skip the aux event firing.", local_logging=True)
+ self.log_warning(fl_ctx, "Engine in not ready, skip the aux event firing.")
return
if not isinstance(self.engine, ClientEngineSpec):
|
{"golden_diff": "diff --git a/nvflare/private/event.py b/nvflare/private/event.py\n--- a/nvflare/private/event.py\n+++ b/nvflare/private/event.py\n@@ -59,8 +59,6 @@\n ctx.set_prop(key=FLContextKey.EVENT_SCOPE, value=event_scope, private=True, sticky=False)\n h.handle_event(event, ctx)\n except:\n- h.log_exception(\n- ctx, 'exception when handling event \"{}\"'.format(event), fire_event=False, local_logging=True\n- )\n+ h.log_exception(ctx, 'exception when handling event \"{}\"'.format(event), fire_event=False)\n \n ctx.set_prop(key=_KEY_EVENT_DEPTH, value=depth, private=True, sticky=False)\ndiff --git a/nvflare/widgets/fed_event.py b/nvflare/widgets/fed_event.py\n--- a/nvflare/widgets/fed_event.py\n+++ b/nvflare/widgets/fed_event.py\n@@ -211,7 +211,7 @@\n \n def fire_and_forget_request(self, request: Shareable, fl_ctx: FLContext, targets=None):\n if not self.ready:\n- self.log_warning(fl_ctx, \"Engine in not ready, skip the aux event firing.\", local_logging=True)\n+ self.log_warning(fl_ctx, \"Engine in not ready, skip the aux event firing.\")\n return\n \n if not isinstance(self.engine, ClientEngineSpec):\n", "issue": "log_exception error\nThis error happens during log_exception\r\n```\r\nnvflare/private/event.py\", line 62, in fire_event\r\n h.log_exception(\r\nTypeError: log_exception() got an unexpected keyword argument 'local_logging'\r\nFL client execution exception: log_exception() got an unexpected keyword argument 'local_logging'\r\n2022-03-17 18:29:04,424 - ProcessExecutor - INFO - process finished with execution code: 0\r\n```\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport threading\nimport time\n\nfrom nvflare.apis.client_engine_spec import ClientEngineSpec\nfrom nvflare.apis.event_type import EventType\nfrom nvflare.apis.fl_constant import EventScope, FedEventHeader, FLContextKey, ReservedKey, ReturnCode\nfrom nvflare.apis.fl_context import FLContext\nfrom nvflare.apis.server_engine_spec import ServerEngineSpec\nfrom nvflare.apis.shareable import Shareable, make_reply\nfrom nvflare.widgets.widget import Widget\n\nFED_EVENT_TOPIC = \"fed.event\"\n\n\nclass FedEventRunner(Widget):\n def __init__(self, topic=FED_EVENT_TOPIC):\n \"\"\"Init FedEventRunner.\n\n The FedEventRunner handles posting and receiving of fed events.\n The system will do its best to fire off all events in the queue before shutdown\n using the ABOUT_TO_END_RUN event and a grace period during END_RUN.\n\n Args:\n topic: the fed event topic to be handled. Defaults to 'fed.event'\n \"\"\"\n Widget.__init__(self)\n self.topic = topic\n self.abort_signal = None\n self.asked_to_stop = False\n self.asked_to_flush = False\n self.regular_interval = 0.001\n self.grace_period = 2\n self.flush_wait = 2\n self.engine = None\n self.last_timestamps = {} # client name => last_timestamp\n self.in_events = []\n self.in_lock = threading.Lock()\n self.poster = threading.Thread(target=self._post, args=())\n\n def handle_event(self, event_type: str, fl_ctx: FLContext):\n if event_type == EventType.START_RUN:\n self.engine = fl_ctx.get_engine()\n self.engine.register_aux_message_handler(topic=self.topic, message_handle_func=self._receive)\n self.abort_signal = fl_ctx.get_run_abort_signal()\n self.asked_to_stop = False\n self.asked_to_flush = False\n self.poster.start()\n elif event_type == EventType.ABOUT_TO_END_RUN:\n self.asked_to_flush = True\n # delay self.flush_wait seconds so\n # _post can empty the queue before\n # END_RUN is fired\n time.sleep(self.flush_wait)\n elif event_type == EventType.END_RUN:\n self.asked_to_stop = True\n if self.poster.is_alive():\n self.poster.join()\n else:\n # handle outgoing fed events\n event_scope = fl_ctx.get_prop(key=FLContextKey.EVENT_SCOPE, default=EventScope.LOCAL)\n if event_scope != EventScope.FEDERATION:\n return\n\n event_data = fl_ctx.get_prop(FLContextKey.EVENT_DATA, None)\n if not isinstance(event_data, Shareable):\n self.log_error(fl_ctx, \"bad fed event: expect data to be Shareable but got {}\".format(type(event_data)))\n return\n\n direction = event_data.get_header(FedEventHeader.DIRECTION, \"out\")\n if direction != \"out\":\n # ignore incoming events\n return\n\n event_data.set_header(FedEventHeader.EVENT_TYPE, event_type)\n event_data.set_header(FedEventHeader.ORIGIN, fl_ctx.get_identity_name())\n event_data.set_header(FedEventHeader.TIMESTAMP, time.time())\n\n targets = event_data.get_header(FedEventHeader.TARGETS, None)\n self.fire_and_forget_request(request=event_data, fl_ctx=fl_ctx, targets=targets)\n\n def fire_and_forget_request(self, request: Shareable, fl_ctx: FLContext, targets=None):\n pass\n\n def _receive(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable:\n peer_name = request.get_peer_prop(ReservedKey.IDENTITY_NAME, None)\n if not peer_name:\n self.log_error(fl_ctx, \"missing identity name of the data sender\")\n return make_reply(ReturnCode.MISSING_PEER_CONTEXT)\n\n timestamp = request.get_header(FedEventHeader.TIMESTAMP, None)\n if timestamp is None:\n self.log_error(fl_ctx, \"missing timestamp in incoming fed event\")\n return make_reply(ReturnCode.BAD_REQUEST_DATA)\n\n event_type = request.get_header(FedEventHeader.EVENT_TYPE, None)\n if event_type is None:\n self.log_error(fl_ctx, \"missing event_type in incoming fed event\")\n return make_reply(ReturnCode.BAD_REQUEST_DATA)\n\n with self.in_lock:\n last_timestamp = self.last_timestamps.get(peer_name, None)\n if last_timestamp is None or timestamp > last_timestamp:\n # we only keep new items, in case the peer somehow sent old items\n request.set_header(FedEventHeader.DIRECTION, \"in\")\n self.in_events.append(request)\n self.last_timestamps[peer_name] = timestamp\n\n # NOTE: we do not fire event here since event process could take time.\n # Instead we simply add the package to the queue and return quickly.\n # The posting of events will be handled in the poster thread\n return make_reply(ReturnCode.OK)\n\n def _post(self):\n \"\"\"Post an event.\n\n During ABOUT_TO_END_RUN, sleep_time is 0 and system will flush\n in_events by firing events without delay.\n\n During END_RUN, system will wait for self.grace_period, even the queue is empty,\n so any new item can be processed.\n\n However, since the system does not guarantee the receiving side of _post is still\n alive, we catch the exception and show warning messages to users if events can not\n be handled by receiving side.\n \"\"\"\n sleep_time = self.regular_interval\n countdown = self.grace_period\n while True:\n time.sleep(sleep_time)\n if self.abort_signal.triggered:\n break\n n = len(self.in_events)\n if n > 0:\n if self.asked_to_flush:\n sleep_time = 0\n else:\n sleep_time = self.regular_interval\n with self.in_lock:\n event_to_post = self.in_events.pop(0)\n elif self.asked_to_stop:\n # the queue is empty and we are asked to stop. Give\n # it self.grace_period seconds to wait, then\n # exit.\n if countdown < 0:\n break\n else:\n countdown = countdown - 1\n time.sleep(1)\n continue\n else:\n sleep_time = min(sleep_time * 2, 1)\n continue\n\n with self.engine.new_context() as fl_ctx:\n if self.asked_to_stop:\n self.log_warning(fl_ctx, f\"{n} items remained in in_events. Will stop when it reaches 0.\")\n fl_ctx.set_prop(key=FLContextKey.EVENT_DATA, value=event_to_post, private=True, sticky=False)\n fl_ctx.set_prop(key=FLContextKey.EVENT_SCOPE, value=EventScope.FEDERATION, private=True, sticky=False)\n\n event_type = event_to_post.get_header(FedEventHeader.EVENT_TYPE)\n try:\n self.engine.fire_event(event_type=event_type, fl_ctx=fl_ctx)\n except BaseException as e:\n if self.asked_to_stop:\n self.log_warning(fl_ctx, f\"event {event_to_post} fired unsuccessfully during END_RUN\")\n else:\n raise e\n\n\nclass ServerFedEventRunner(FedEventRunner):\n def __init__(self, topic=FED_EVENT_TOPIC):\n \"\"\"Init ServerFedEventRunner.\"\"\"\n FedEventRunner.__init__(self, topic)\n\n def fire_and_forget_request(self, request: Shareable, fl_ctx: FLContext, targets=None):\n if not isinstance(self.engine, ServerEngineSpec):\n raise TypeError(\"self.engine must be ServerEngineSpec but got {}\".format(type(self.engine)))\n self.engine.fire_and_forget_aux_request(\n topic=self.topic,\n targets=targets,\n request=request,\n fl_ctx=fl_ctx,\n )\n\n\nclass ClientFedEventRunner(FedEventRunner):\n def __init__(self, topic=FED_EVENT_TOPIC):\n \"\"\"Init ClientFedEventRunner.\"\"\"\n FedEventRunner.__init__(self, topic)\n self.ready = False\n\n def handle_event(self, event_type: str, fl_ctx: FLContext):\n super().handle_event(event_type, fl_ctx)\n\n if event_type == EventType.START_RUN:\n self.ready = True\n\n def fire_and_forget_request(self, request: Shareable, fl_ctx: FLContext, targets=None):\n if not self.ready:\n self.log_warning(fl_ctx, \"Engine in not ready, skip the aux event firing.\", local_logging=True)\n return\n\n if not isinstance(self.engine, ClientEngineSpec):\n raise TypeError(\"self.engine must be ClientEngineSpec but got {}\".format(type(self.engine)))\n self.engine.fire_and_forget_aux_request(topic=self.topic, request=request, fl_ctx=fl_ctx)\n", "path": "nvflare/widgets/fed_event.py"}, {"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport uuid\n\nfrom nvflare.apis.fl_component import FLComponent\nfrom nvflare.apis.fl_constant import EventScope, FLContextKey\nfrom nvflare.apis.fl_context import FLContext\n\n# do not use underscore as key name; otherwise it cannot be removed from ctx\n_KEY_EVENT_DEPTH = \"###event_depth\"\n_MAX_EVENT_DEPTH = 20\n\n\ndef fire_event(event: str, handlers: list, ctx: FLContext):\n \"\"\"Fires the specified event and invokes the list of handlers.\n\n Args:\n event: the event to be fired\n handlers: handlers to be invoked\n ctx: context for cross-component data sharing\n\n Returns: N/A\n\n \"\"\"\n event_id = str(uuid.uuid4())\n event_data = ctx.get_prop(FLContextKey.EVENT_DATA, None)\n event_origin = ctx.get_prop(FLContextKey.EVENT_ORIGIN, None)\n event_scope = ctx.get_prop(FLContextKey.EVENT_SCOPE, EventScope.LOCAL)\n\n depth = ctx.get_prop(_KEY_EVENT_DEPTH, 0)\n if depth > _MAX_EVENT_DEPTH:\n # too many recursive event calls\n raise RuntimeError(\"Recursive event calls too deep (>{})\".format(_MAX_EVENT_DEPTH))\n\n ctx.set_prop(key=_KEY_EVENT_DEPTH, value=depth + 1, private=True, sticky=False)\n\n if handlers:\n for h in handlers:\n if not isinstance(h, FLComponent):\n raise TypeError(\"handler must be FLComponent but got {}\".format(type(h)))\n try:\n # since events could be recursive (a handler fires another event) on the same fl_ctx,\n # we need to reset these key values into the fl_ctx\n ctx.set_prop(key=FLContextKey.EVENT_ID, value=event_id, private=True, sticky=False)\n ctx.set_prop(key=FLContextKey.EVENT_DATA, value=event_data, private=True, sticky=False)\n ctx.set_prop(key=FLContextKey.EVENT_ORIGIN, value=event_origin, private=True, sticky=False)\n ctx.set_prop(key=FLContextKey.EVENT_SCOPE, value=event_scope, private=True, sticky=False)\n h.handle_event(event, ctx)\n except:\n h.log_exception(\n ctx, 'exception when handling event \"{}\"'.format(event), fire_event=False, local_logging=True\n )\n\n ctx.set_prop(key=_KEY_EVENT_DEPTH, value=depth, private=True, sticky=False)\n", "path": "nvflare/private/event.py"}]}
| 4,021 | 302 |
gh_patches_debug_790
|
rasdani/github-patches
|
git_diff
|
ibis-project__ibis-8364
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: `Scalar.isin(Column)` returns a Column, not a Scalar
### What happened?
```python
import ibis
needle = ibis.literal(2)
haystack = ibis.memtable({"x": [1, 2, 3]}).x
type(needle.isin(haystack))
# ibis.expr.types.logical.BooleanColumn
```
### What version of ibis are you using?
main
### What backend(s) are you using, if any?
_No response_
### Relevant log output
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
</issue>
<code>
[start of ibis/expr/operations/subqueries.py]
1 from __future__ import annotations
2
3 from public import public
4
5 import ibis.expr.datashape as ds
6 import ibis.expr.datatypes as dt
7 import ibis.expr.rules as rlz
8 from ibis.common.annotations import attribute
9 from ibis.common.exceptions import IntegrityError
10 from ibis.expr.operations.core import Value
11 from ibis.expr.operations.relations import Relation # noqa: TCH001
12
13
14 @public
15 class Subquery(Value):
16 rel: Relation
17
18 @attribute
19 def relations(self):
20 return frozenset()
21
22
23 @public
24 class ExistsSubquery(Subquery):
25 dtype = dt.boolean
26 shape = ds.columnar
27
28
29 @public
30 class ScalarSubquery(Subquery):
31 shape = ds.scalar
32
33 def __init__(self, rel):
34 if len(rel.schema) != 1:
35 raise IntegrityError(
36 "Relation passed to ScalarSubquery() must have exactly one "
37 f"column, got {len(rel.schema)}"
38 )
39 super().__init__(rel=rel)
40
41 @attribute
42 def value(self):
43 (value,) = self.rel.values.values()
44 return value
45
46 @attribute
47 def dtype(self):
48 return self.value.dtype
49
50
51 @public
52 class InSubquery(Subquery):
53 needle: Value
54
55 dtype = dt.boolean
56 shape = ds.columnar
57
58 def __init__(self, rel, needle):
59 if len(rel.schema) != 1:
60 raise IntegrityError(
61 "Relation passed to InSubquery() must have exactly one "
62 f"column, got {len(rel.schema)}"
63 )
64 (value,) = rel.values.values()
65 if not rlz.comparable(value, needle):
66 raise IntegrityError(f"{needle!r} is not comparable to {value!r}")
67 super().__init__(rel=rel, needle=needle)
68
69 @attribute
70 def value(self):
71 (value,) = self.rel.values.values()
72 return value
73
74 @attribute
75 def relations(self):
76 return self.needle.relations
77
[end of ibis/expr/operations/subqueries.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ibis/expr/operations/subqueries.py b/ibis/expr/operations/subqueries.py
--- a/ibis/expr/operations/subqueries.py
+++ b/ibis/expr/operations/subqueries.py
@@ -53,7 +53,7 @@
needle: Value
dtype = dt.boolean
- shape = ds.columnar
+ shape = rlz.shape_like("needle")
def __init__(self, rel, needle):
if len(rel.schema) != 1:
|
{"golden_diff": "diff --git a/ibis/expr/operations/subqueries.py b/ibis/expr/operations/subqueries.py\n--- a/ibis/expr/operations/subqueries.py\n+++ b/ibis/expr/operations/subqueries.py\n@@ -53,7 +53,7 @@\n needle: Value\n \n dtype = dt.boolean\n- shape = ds.columnar\n+ shape = rlz.shape_like(\"needle\")\n \n def __init__(self, rel, needle):\n if len(rel.schema) != 1:\n", "issue": "bug: `Scalar.isin(Column)` returns a Column, not a Scalar\n### What happened?\n\n```python\r\nimport ibis\r\n\r\nneedle = ibis.literal(2)\r\nhaystack = ibis.memtable({\"x\": [1, 2, 3]}).x\r\ntype(needle.isin(haystack))\r\n# ibis.expr.types.logical.BooleanColumn\r\n```\n\n### What version of ibis are you using?\n\nmain\n\n### What backend(s) are you using, if any?\n\n_No response_\n\n### Relevant log output\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom public import public\n\nimport ibis.expr.datashape as ds\nimport ibis.expr.datatypes as dt\nimport ibis.expr.rules as rlz\nfrom ibis.common.annotations import attribute\nfrom ibis.common.exceptions import IntegrityError\nfrom ibis.expr.operations.core import Value\nfrom ibis.expr.operations.relations import Relation # noqa: TCH001\n\n\n@public\nclass Subquery(Value):\n rel: Relation\n\n @attribute\n def relations(self):\n return frozenset()\n\n\n@public\nclass ExistsSubquery(Subquery):\n dtype = dt.boolean\n shape = ds.columnar\n\n\n@public\nclass ScalarSubquery(Subquery):\n shape = ds.scalar\n\n def __init__(self, rel):\n if len(rel.schema) != 1:\n raise IntegrityError(\n \"Relation passed to ScalarSubquery() must have exactly one \"\n f\"column, got {len(rel.schema)}\"\n )\n super().__init__(rel=rel)\n\n @attribute\n def value(self):\n (value,) = self.rel.values.values()\n return value\n\n @attribute\n def dtype(self):\n return self.value.dtype\n\n\n@public\nclass InSubquery(Subquery):\n needle: Value\n\n dtype = dt.boolean\n shape = ds.columnar\n\n def __init__(self, rel, needle):\n if len(rel.schema) != 1:\n raise IntegrityError(\n \"Relation passed to InSubquery() must have exactly one \"\n f\"column, got {len(rel.schema)}\"\n )\n (value,) = rel.values.values()\n if not rlz.comparable(value, needle):\n raise IntegrityError(f\"{needle!r} is not comparable to {value!r}\")\n super().__init__(rel=rel, needle=needle)\n\n @attribute\n def value(self):\n (value,) = self.rel.values.values()\n return value\n\n @attribute\n def relations(self):\n return self.needle.relations\n", "path": "ibis/expr/operations/subqueries.py"}]}
| 1,261 | 116 |
gh_patches_debug_22414
|
rasdani/github-patches
|
git_diff
|
translate__pootle-6485
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Languages in languages drop down menu are messed
Hi,
the languages in the languages drop down menu are in a pretty mess now. It seems that they are not sorted anymore now, neither by language name nor by locale.
Regards,
Michael
</issue>
<code>
[start of pootle/core/views/base.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django.urls import reverse
10 from django.utils.decorators import method_decorator
11 from django.utils.functional import cached_property
12 from django.utils.translation import get_language
13 from django.views.decorators.cache import never_cache
14 from django.views.generic import DetailView
15
16 from pootle.core.delegate import site_languages
17 from pootle.core.url_helpers import get_path_parts
18 from pootle.i18n.gettext import ugettext as _
19 from pootle_app.models.permissions import check_permission
20 from pootle_misc.util import ajax_required
21
22 from .decorators import requires_permission, set_permissions
23 from .mixins import GatherContextMixin, PootleJSONMixin
24
25
26 class PootleDetailView(GatherContextMixin, DetailView):
27 translate_url_path = ""
28 browse_url_path = ""
29 resource_path = ""
30 view_name = ""
31 sw_version = 0
32 ns = "pootle.core"
33
34 @property
35 def browse_url(self):
36 return reverse(
37 self.browse_url_path,
38 kwargs=self.url_kwargs)
39
40 @property
41 def cache_key(self):
42 return (
43 "%s.%s.%s.%s"
44 % (self.page_name,
45 self.view_name,
46 self.object.data_tool.cache_key,
47 self.request_lang))
48
49 @property
50 def request_lang(self):
51 return get_language()
52
53 @cached_property
54 def has_admin_access(self):
55 return check_permission('administrate', self.request)
56
57 @property
58 def language(self):
59 if self.tp:
60 return self.tp.language
61
62 @property
63 def permission_context(self):
64 return self.get_object()
65
66 @property
67 def pootle_path(self):
68 return self.object.pootle_path
69
70 @property
71 def project(self):
72 if self.tp:
73 return self.tp.project
74
75 @property
76 def tp(self):
77 return None
78
79 @property
80 def translate_url(self):
81 return reverse(
82 self.translate_url_path,
83 kwargs=self.url_kwargs)
84
85 @set_permissions
86 @requires_permission("view")
87 def dispatch(self, request, *args, **kwargs):
88 # get funky with the request 8/
89 return super(PootleDetailView, self).dispatch(request, *args, **kwargs)
90
91 @property
92 def languages(self):
93 languages = site_languages.get()
94 return (
95 languages.all_languages
96 if self.has_admin_access
97 else languages.languages)
98
99 def get_context_data(self, *args, **kwargs):
100 return {
101 'object': self.object,
102 'pootle_path': self.pootle_path,
103 'project': self.project,
104 'language': self.language,
105 "all_languages": self.languages,
106 'translation_project': self.tp,
107 'has_admin_access': self.has_admin_access,
108 'resource_path': self.resource_path,
109 'resource_path_parts': get_path_parts(self.resource_path),
110 'translate_url': self.translate_url,
111 'browse_url': self.browse_url,
112 'paths_placeholder': _("Entire Project"),
113 'unit_api_root': "/xhr/units/"}
114
115
116 class PootleJSON(PootleJSONMixin, PootleDetailView):
117
118 @never_cache
119 @method_decorator(ajax_required)
120 @set_permissions
121 @requires_permission("view")
122 def dispatch(self, request, *args, **kwargs):
123 return super(PootleJSON, self).dispatch(request, *args, **kwargs)
124
125
126 class PootleAdminView(DetailView):
127
128 @set_permissions
129 @requires_permission("administrate")
130 def dispatch(self, request, *args, **kwargs):
131 return super(PootleAdminView, self).dispatch(request, *args, **kwargs)
132
133 @property
134 def permission_context(self):
135 return self.get_object().directory
136
137 def post(self, *args, **kwargs):
138 return self.get(*args, **kwargs)
139
[end of pootle/core/views/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pootle/core/views/base.py b/pootle/core/views/base.py
--- a/pootle/core/views/base.py
+++ b/pootle/core/views/base.py
@@ -6,6 +6,8 @@
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
+from collections import OrderedDict
+
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
@@ -91,10 +93,18 @@
@property
def languages(self):
languages = site_languages.get()
- return (
+ languages = (
languages.all_languages
if self.has_admin_access
else languages.languages)
+ lang_map = {
+ v: k
+ for k, v
+ in languages.items()}
+ return OrderedDict(
+ (lang_map[v], v)
+ for v
+ in sorted(languages.values()))
def get_context_data(self, *args, **kwargs):
return {
|
{"golden_diff": "diff --git a/pootle/core/views/base.py b/pootle/core/views/base.py\n--- a/pootle/core/views/base.py\n+++ b/pootle/core/views/base.py\n@@ -6,6 +6,8 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n+from collections import OrderedDict\n+\n from django.urls import reverse\n from django.utils.decorators import method_decorator\n from django.utils.functional import cached_property\n@@ -91,10 +93,18 @@\n @property\n def languages(self):\n languages = site_languages.get()\n- return (\n+ languages = (\n languages.all_languages\n if self.has_admin_access\n else languages.languages)\n+ lang_map = {\n+ v: k\n+ for k, v\n+ in languages.items()}\n+ return OrderedDict(\n+ (lang_map[v], v)\n+ for v\n+ in sorted(languages.values()))\n \n def get_context_data(self, *args, **kwargs):\n return {\n", "issue": "Languages in languages drop down menu are messed\nHi,\r\n\r\nthe languages in the languages drop down menu are in a pretty mess now. It seems that they are not sorted anymore now, neither by language name nor by locale.\r\n\r\nRegards,\r\nMichael\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import get_language\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import DetailView\n\nfrom pootle.core.delegate import site_languages\nfrom pootle.core.url_helpers import get_path_parts\nfrom pootle.i18n.gettext import ugettext as _\nfrom pootle_app.models.permissions import check_permission\nfrom pootle_misc.util import ajax_required\n\nfrom .decorators import requires_permission, set_permissions\nfrom .mixins import GatherContextMixin, PootleJSONMixin\n\n\nclass PootleDetailView(GatherContextMixin, DetailView):\n translate_url_path = \"\"\n browse_url_path = \"\"\n resource_path = \"\"\n view_name = \"\"\n sw_version = 0\n ns = \"pootle.core\"\n\n @property\n def browse_url(self):\n return reverse(\n self.browse_url_path,\n kwargs=self.url_kwargs)\n\n @property\n def cache_key(self):\n return (\n \"%s.%s.%s.%s\"\n % (self.page_name,\n self.view_name,\n self.object.data_tool.cache_key,\n self.request_lang))\n\n @property\n def request_lang(self):\n return get_language()\n\n @cached_property\n def has_admin_access(self):\n return check_permission('administrate', self.request)\n\n @property\n def language(self):\n if self.tp:\n return self.tp.language\n\n @property\n def permission_context(self):\n return self.get_object()\n\n @property\n def pootle_path(self):\n return self.object.pootle_path\n\n @property\n def project(self):\n if self.tp:\n return self.tp.project\n\n @property\n def tp(self):\n return None\n\n @property\n def translate_url(self):\n return reverse(\n self.translate_url_path,\n kwargs=self.url_kwargs)\n\n @set_permissions\n @requires_permission(\"view\")\n def dispatch(self, request, *args, **kwargs):\n # get funky with the request 8/\n return super(PootleDetailView, self).dispatch(request, *args, **kwargs)\n\n @property\n def languages(self):\n languages = site_languages.get()\n return (\n languages.all_languages\n if self.has_admin_access\n else languages.languages)\n\n def get_context_data(self, *args, **kwargs):\n return {\n 'object': self.object,\n 'pootle_path': self.pootle_path,\n 'project': self.project,\n 'language': self.language,\n \"all_languages\": self.languages,\n 'translation_project': self.tp,\n 'has_admin_access': self.has_admin_access,\n 'resource_path': self.resource_path,\n 'resource_path_parts': get_path_parts(self.resource_path),\n 'translate_url': self.translate_url,\n 'browse_url': self.browse_url,\n 'paths_placeholder': _(\"Entire Project\"),\n 'unit_api_root': \"/xhr/units/\"}\n\n\nclass PootleJSON(PootleJSONMixin, PootleDetailView):\n\n @never_cache\n @method_decorator(ajax_required)\n @set_permissions\n @requires_permission(\"view\")\n def dispatch(self, request, *args, **kwargs):\n return super(PootleJSON, self).dispatch(request, *args, **kwargs)\n\n\nclass PootleAdminView(DetailView):\n\n @set_permissions\n @requires_permission(\"administrate\")\n def dispatch(self, request, *args, **kwargs):\n return super(PootleAdminView, self).dispatch(request, *args, **kwargs)\n\n @property\n def permission_context(self):\n return self.get_object().directory\n\n def post(self, *args, **kwargs):\n return self.get(*args, **kwargs)\n", "path": "pootle/core/views/base.py"}]}
| 1,799 | 233 |
gh_patches_debug_27678
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-854
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add an option to the bug_classifier script to download the model when it doesn't exist
</issue>
<code>
[start of scripts/bug_classifier.py]
1 # -*- coding: utf-8 -*-
2
3 import argparse
4 import os
5
6 import numpy as np
7
8 from bugbug import bugzilla
9 from bugbug.models import get_model_class
10
11 MODELS_WITH_TYPE = ("component",)
12
13
14 def classify_bugs(model_name, classifier):
15 if classifier != "default":
16 assert (
17 model_name in MODELS_WITH_TYPE
18 ), f"{classifier} is not a valid classifier type for {model_name}"
19
20 model_file_name = f"{model_name}{classifier}model"
21 model_name = f"{model_name}_{classifier}"
22 else:
23 model_file_name = f"{model_name}model"
24
25 assert os.path.exists(
26 model_file_name
27 ), f"{model_file_name} does not exist. Train the model with trainer.py first."
28
29 model_class = get_model_class(model_name)
30 model = model_class.load(model_file_name)
31
32 for bug in bugzilla.get_bugs():
33 print(
34 f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug["id"]} - {bug["summary"]} '
35 )
36
37 if model.calculate_importance:
38 probas, importance = model.classify(
39 bug, probabilities=True, importances=True
40 )
41
42 feature_names = model.get_human_readable_feature_names()
43
44 model.print_feature_importances(
45 importance["importances"], feature_names, class_probabilities=probas
46 )
47 else:
48 probas = model.classify(bug, probabilities=True, importances=False)
49
50 if np.argmax(probas) == 1:
51 print(f"Positive! {probas}")
52 else:
53 print(f"Negative! {probas}")
54 input()
55
56
57 def main():
58 description = "Perform evaluation on bugs using the specified model"
59 parser = argparse.ArgumentParser(description=description)
60
61 parser.add_argument("model", help="Which model to use for evaluation")
62 parser.add_argument(
63 "--classifier",
64 help="Type of the classifier. Only used for component classification.",
65 choices=["default", "nn"],
66 default="default",
67 )
68
69 args = parser.parse_args()
70
71 classify_bugs(args.model, args.classifier)
72
[end of scripts/bug_classifier.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/bug_classifier.py b/scripts/bug_classifier.py
--- a/scripts/bug_classifier.py
+++ b/scripts/bug_classifier.py
@@ -2,14 +2,20 @@
import argparse
import os
+from logging import INFO, basicConfig, getLogger
import numpy as np
+import requests
from bugbug import bugzilla
from bugbug.models import get_model_class
+from bugbug.utils import download_check_etag, zstd_decompress
MODELS_WITH_TYPE = ("component",)
+basicConfig(level=INFO)
+logger = getLogger(__name__)
+
def classify_bugs(model_name, classifier):
if classifier != "default":
@@ -22,9 +28,21 @@
else:
model_file_name = f"{model_name}model"
- assert os.path.exists(
- model_file_name
- ), f"{model_file_name} does not exist. Train the model with trainer.py first."
+ if not os.path.exists(model_file_name):
+ logger.info(f"{model_file_name} does not exist. Downloading the model....")
+ try:
+ download_check_etag(
+ f"https://index.taskcluster.net/v1/task/project.relman.bugbug.train_{model_name}.latest/artifacts/public/{model_file_name}.zst",
+ f"{model_file_name}.zst",
+ )
+ except requests.HTTPError:
+ logger.error(
+ f"A pre-trained model is not available, you will need to train it yourself using the trainer script"
+ )
+ raise SystemExit(1)
+
+ zstd_decompress(model_file_name)
+ assert os.path.exists(model_file_name), "Decompressed file doesn't exist"
model_class = get_model_class(model_name)
model = model_class.load(model_file_name)
|
{"golden_diff": "diff --git a/scripts/bug_classifier.py b/scripts/bug_classifier.py\n--- a/scripts/bug_classifier.py\n+++ b/scripts/bug_classifier.py\n@@ -2,14 +2,20 @@\n \n import argparse\n import os\n+from logging import INFO, basicConfig, getLogger\n \n import numpy as np\n+import requests\n \n from bugbug import bugzilla\n from bugbug.models import get_model_class\n+from bugbug.utils import download_check_etag, zstd_decompress\n \n MODELS_WITH_TYPE = (\"component\",)\n \n+basicConfig(level=INFO)\n+logger = getLogger(__name__)\n+\n \n def classify_bugs(model_name, classifier):\n if classifier != \"default\":\n@@ -22,9 +28,21 @@\n else:\n model_file_name = f\"{model_name}model\"\n \n- assert os.path.exists(\n- model_file_name\n- ), f\"{model_file_name} does not exist. Train the model with trainer.py first.\"\n+ if not os.path.exists(model_file_name):\n+ logger.info(f\"{model_file_name} does not exist. Downloading the model....\")\n+ try:\n+ download_check_etag(\n+ f\"https://index.taskcluster.net/v1/task/project.relman.bugbug.train_{model_name}.latest/artifacts/public/{model_file_name}.zst\",\n+ f\"{model_file_name}.zst\",\n+ )\n+ except requests.HTTPError:\n+ logger.error(\n+ f\"A pre-trained model is not available, you will need to train it yourself using the trainer script\"\n+ )\n+ raise SystemExit(1)\n+\n+ zstd_decompress(model_file_name)\n+ assert os.path.exists(model_file_name), \"Decompressed file doesn't exist\"\n \n model_class = get_model_class(model_name)\n model = model_class.load(model_file_name)\n", "issue": "Add an option to the bug_classifier script to download the model when it doesn't exist\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\n\nimport numpy as np\n\nfrom bugbug import bugzilla\nfrom bugbug.models import get_model_class\n\nMODELS_WITH_TYPE = (\"component\",)\n\n\ndef classify_bugs(model_name, classifier):\n if classifier != \"default\":\n assert (\n model_name in MODELS_WITH_TYPE\n ), f\"{classifier} is not a valid classifier type for {model_name}\"\n\n model_file_name = f\"{model_name}{classifier}model\"\n model_name = f\"{model_name}_{classifier}\"\n else:\n model_file_name = f\"{model_name}model\"\n\n assert os.path.exists(\n model_file_name\n ), f\"{model_file_name} does not exist. Train the model with trainer.py first.\"\n\n model_class = get_model_class(model_name)\n model = model_class.load(model_file_name)\n\n for bug in bugzilla.get_bugs():\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]} - {bug[\"summary\"]} '\n )\n\n if model.calculate_importance:\n probas, importance = model.classify(\n bug, probabilities=True, importances=True\n )\n\n feature_names = model.get_human_readable_feature_names()\n\n model.print_feature_importances(\n importance[\"importances\"], feature_names, class_probabilities=probas\n )\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n if np.argmax(probas) == 1:\n print(f\"Positive! {probas}\")\n else:\n print(f\"Negative! {probas}\")\n input()\n\n\ndef main():\n description = \"Perform evaluation on bugs using the specified model\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to use for evaluation\")\n parser.add_argument(\n \"--classifier\",\n help=\"Type of the classifier. Only used for component classification.\",\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n\n args = parser.parse_args()\n\n classify_bugs(args.model, args.classifier)\n", "path": "scripts/bug_classifier.py"}]}
| 1,145 | 396 |
gh_patches_debug_34975
|
rasdani/github-patches
|
git_diff
|
facebookresearch__hydra-1271
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] hydra.job.id and hydra.job.num are not properly transferred to jobs in multirun
</issue>
<code>
[start of hydra/core/utils.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import copy
3 import logging
4 import os
5 import re
6 import sys
7 import warnings
8 from contextlib import contextmanager
9 from dataclasses import dataclass
10 from os.path import basename, dirname, splitext
11 from pathlib import Path
12 from time import localtime, strftime
13 from typing import Any, Dict, Optional, Sequence, Tuple, Union, cast
14
15 from omegaconf import DictConfig, OmegaConf, open_dict, read_write
16
17 from hydra.core.hydra_config import HydraConfig
18 from hydra.core.singleton import Singleton
19 from hydra.types import TaskFunction
20
21 log = logging.getLogger(__name__)
22
23
24 def simple_stdout_log_config(level: int = logging.INFO) -> None:
25 root = logging.getLogger()
26 root.setLevel(level)
27 handler = logging.StreamHandler(sys.stdout)
28 formatter = logging.Formatter("%(message)s")
29 handler.setFormatter(formatter)
30 root.addHandler(handler)
31
32
33 def configure_log(
34 log_config: DictConfig, verbose_config: Union[bool, str, Sequence[str]]
35 ) -> None:
36 assert isinstance(verbose_config, (bool, str)) or OmegaConf.is_list(verbose_config)
37 if log_config is not None:
38 conf: Dict[str, Any] = OmegaConf.to_container( # type: ignore
39 log_config, resolve=True
40 )
41 if conf["root"] is not None:
42 logging.config.dictConfig(conf)
43 else:
44 # default logging to stdout
45 root = logging.getLogger()
46 root.setLevel(logging.INFO)
47 handler = logging.StreamHandler(sys.stdout)
48 formatter = logging.Formatter(
49 "[%(asctime)s][%(name)s][%(levelname)s] - %(message)s"
50 )
51 handler.setFormatter(formatter)
52 root.addHandler(handler)
53 if isinstance(verbose_config, bool):
54 if verbose_config:
55 logging.getLogger().setLevel(logging.DEBUG)
56 else:
57 if isinstance(verbose_config, str):
58 verbose_list = OmegaConf.create([verbose_config])
59 elif OmegaConf.is_list(verbose_config):
60 verbose_list = verbose_config # type: ignore
61 else:
62 assert False
63
64 for logger in verbose_list:
65 logging.getLogger(logger).setLevel(logging.DEBUG)
66
67
68 def _save_config(cfg: DictConfig, filename: str, output_dir: Path) -> None:
69 output_dir.mkdir(parents=True, exist_ok=True)
70 with open(str(output_dir / filename), "w") as file:
71 file.write(OmegaConf.to_yaml(cfg))
72
73
74 def filter_overrides(overrides: Sequence[str]) -> Sequence[str]:
75 """
76 :param overrides: overrides list
77 :return: returning a new overrides list with all the keys starting with hydra. filtered.
78 """
79 return [x for x in overrides if not x.startswith("hydra.")]
80
81
82 def run_job(
83 config: DictConfig,
84 task_function: TaskFunction,
85 job_dir_key: str,
86 job_subdir_key: Optional[str],
87 configure_logging: bool = True,
88 ) -> "JobReturn":
89 old_cwd = os.getcwd()
90 working_dir = str(OmegaConf.select(config, job_dir_key))
91 if job_subdir_key is not None:
92 # evaluate job_subdir_key lazily.
93 # this is running on the client side in sweep and contains things such as job:id which
94 # are only available there.
95 subdir = str(OmegaConf.select(config, job_subdir_key))
96 working_dir = os.path.join(working_dir, subdir)
97 try:
98 ret = JobReturn()
99 ret.working_dir = working_dir
100 task_cfg = copy.deepcopy(config)
101 with read_write(task_cfg):
102 with open_dict(task_cfg):
103 del task_cfg["hydra"]
104 ret.cfg = task_cfg
105 ret.hydra_cfg = OmegaConf.create({"hydra": HydraConfig.get()})
106 overrides = OmegaConf.to_container(config.hydra.overrides.task)
107 assert isinstance(overrides, list)
108 ret.overrides = overrides
109 # handle output directories here
110 Path(str(working_dir)).mkdir(parents=True, exist_ok=True)
111 os.chdir(working_dir)
112
113 if configure_logging:
114 configure_log(config.hydra.job_logging, config.hydra.verbose)
115
116 hydra_cfg = OmegaConf.masked_copy(config, "hydra")
117 assert isinstance(hydra_cfg, DictConfig)
118
119 if config.hydra.output_subdir is not None:
120 hydra_output = Path(config.hydra.output_subdir)
121 _save_config(task_cfg, "config.yaml", hydra_output)
122 _save_config(hydra_cfg, "hydra.yaml", hydra_output)
123 _save_config(config.hydra.overrides.task, "overrides.yaml", hydra_output)
124
125 with env_override(hydra_cfg.hydra.job.env_set):
126 ret.return_value = task_function(task_cfg)
127 ret.task_name = JobRuntime.instance().get("name")
128
129 _flush_loggers()
130
131 return ret
132 finally:
133 os.chdir(old_cwd)
134
135
136 def get_valid_filename(s: str) -> str:
137 s = str(s).strip().replace(" ", "_")
138 return re.sub(r"(?u)[^-\w.]", "", s)
139
140
141 def setup_globals() -> None:
142 def register(name: str, f: Any) -> None:
143 try:
144 OmegaConf.register_resolver(name, f)
145 except AssertionError:
146 # calling it again in no_workers mode will throw. safe to ignore.
147 pass
148
149 # please add documentation when you add a new resolver
150 register("now", lambda pattern: strftime(pattern, localtime()))
151 register(
152 "hydra",
153 lambda path: OmegaConf.select(cast(DictConfig, HydraConfig.get()), path),
154 )
155
156 vi = sys.version_info
157 version_dict = {
158 "major": f"{vi[0]}",
159 "minor": f"{vi[0]}.{vi[1]}",
160 "micro": f"{vi[0]}.{vi[1]}.{vi[2]}",
161 }
162 register("python_version", lambda level="minor": version_dict.get(level))
163
164
165 @dataclass
166 class JobReturn:
167 overrides: Optional[Sequence[str]] = None
168 return_value: Any = None
169 cfg: Optional[DictConfig] = None
170 hydra_cfg: Optional[DictConfig] = None
171 working_dir: Optional[str] = None
172 task_name: Optional[str] = None
173
174
175 class JobRuntime(metaclass=Singleton):
176 def __init__(self) -> None:
177 self.conf: DictConfig = OmegaConf.create()
178 self.set("name", "UNKNOWN_NAME")
179
180 def get(self, key: str) -> Any:
181 ret = OmegaConf.select(self.conf, key)
182 if ret is None:
183 raise KeyError(f"Key not found in {type(self).__name__}: {key}")
184 return ret
185
186 def set(self, key: str, value: Any) -> None:
187 log.debug(f"Setting {type(self).__name__}:{key}={value}")
188 self.conf[key] = value
189
190
191 def split_config_path(
192 config_path: Optional[str], config_name: Optional[str]
193 ) -> Tuple[Optional[str], Optional[str]]:
194 if config_path is None or config_path == "":
195 return None, config_name
196 split_file = splitext(config_path)
197 if split_file[1] in (".yaml", ".yml"):
198 # assuming dir/config.yaml form
199 config_file: Optional[str] = basename(config_path)
200 config_dir: Optional[str] = dirname(config_path)
201 # DEPRECATED: remove in 1.1
202 msg = (
203 "\nUsing config_path to specify the config name is deprecated, specify the config name via config_name"
204 "\nSee https://hydra.cc/docs/next/upgrades/0.11_to_1.0/config_path_changes"
205 )
206 warnings.warn(category=UserWarning, message=msg)
207 else:
208 # assuming dir form without a config file.
209 config_file = None
210 config_dir = config_path
211
212 if config_dir == "":
213 config_dir = None
214
215 if config_file == "":
216 config_file = None
217
218 if config_file is not None:
219 if config_name is not None:
220 raise ValueError(
221 "Config name should be specified in either normalized_config_path or config_name, but not both"
222 )
223 config_name = config_file
224
225 return config_dir, config_name
226
227
228 @contextmanager
229 def env_override(env: Dict[str, str]) -> Any:
230 """Temporarily set environment variables inside the context manager and
231 fully restore previous environment afterwards
232 """
233 original_env = {key: os.getenv(key) for key in env}
234 os.environ.update(env)
235 try:
236 yield
237 finally:
238 for key, value in original_env.items():
239 if value is None:
240 del os.environ[key]
241 else:
242 os.environ[key] = value
243
244
245 def _flush_loggers() -> None:
246 # Python logging does not have an official API to flush all loggers.
247 # This will have to do.
248 for h_weak_ref in logging._handlerList: # type: ignore
249 try:
250 h_weak_ref().flush()
251 except Exception:
252 # ignore exceptions thrown during flushing
253 pass
254
[end of hydra/core/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hydra/core/utils.py b/hydra/core/utils.py
--- a/hydra/core/utils.py
+++ b/hydra/core/utils.py
@@ -88,6 +88,7 @@
) -> "JobReturn":
old_cwd = os.getcwd()
working_dir = str(OmegaConf.select(config, job_dir_key))
+ orig_hydra_cfg = HydraConfig.instance().cfg
if job_subdir_key is not None:
# evaluate job_subdir_key lazily.
# this is running on the client side in sweep and contains things such as job:id which
@@ -98,11 +99,16 @@
ret = JobReturn()
ret.working_dir = working_dir
task_cfg = copy.deepcopy(config)
+ hydra_cfg = OmegaConf.masked_copy(task_cfg, "hydra")
+ # maintain parent to preserve interpolation links from hydra_cfg to job_cfg
+ hydra_cfg._set_parent(task_cfg)
with read_write(task_cfg):
with open_dict(task_cfg):
del task_cfg["hydra"]
+ HydraConfig.instance().cfg = hydra_cfg # type: ignore
+
ret.cfg = task_cfg
- ret.hydra_cfg = OmegaConf.create({"hydra": HydraConfig.get()})
+ ret.hydra_cfg = hydra_cfg
overrides = OmegaConf.to_container(config.hydra.overrides.task)
assert isinstance(overrides, list)
ret.overrides = overrides
@@ -113,9 +119,6 @@
if configure_logging:
configure_log(config.hydra.job_logging, config.hydra.verbose)
- hydra_cfg = OmegaConf.masked_copy(config, "hydra")
- assert isinstance(hydra_cfg, DictConfig)
-
if config.hydra.output_subdir is not None:
hydra_output = Path(config.hydra.output_subdir)
_save_config(task_cfg, "config.yaml", hydra_output)
@@ -130,6 +133,7 @@
return ret
finally:
+ HydraConfig.instance().cfg = orig_hydra_cfg
os.chdir(old_cwd)
|
{"golden_diff": "diff --git a/hydra/core/utils.py b/hydra/core/utils.py\n--- a/hydra/core/utils.py\n+++ b/hydra/core/utils.py\n@@ -88,6 +88,7 @@\n ) -> \"JobReturn\":\n old_cwd = os.getcwd()\n working_dir = str(OmegaConf.select(config, job_dir_key))\n+ orig_hydra_cfg = HydraConfig.instance().cfg\n if job_subdir_key is not None:\n # evaluate job_subdir_key lazily.\n # this is running on the client side in sweep and contains things such as job:id which\n@@ -98,11 +99,16 @@\n ret = JobReturn()\n ret.working_dir = working_dir\n task_cfg = copy.deepcopy(config)\n+ hydra_cfg = OmegaConf.masked_copy(task_cfg, \"hydra\")\n+ # maintain parent to preserve interpolation links from hydra_cfg to job_cfg\n+ hydra_cfg._set_parent(task_cfg)\n with read_write(task_cfg):\n with open_dict(task_cfg):\n del task_cfg[\"hydra\"]\n+ HydraConfig.instance().cfg = hydra_cfg # type: ignore\n+\n ret.cfg = task_cfg\n- ret.hydra_cfg = OmegaConf.create({\"hydra\": HydraConfig.get()})\n+ ret.hydra_cfg = hydra_cfg\n overrides = OmegaConf.to_container(config.hydra.overrides.task)\n assert isinstance(overrides, list)\n ret.overrides = overrides\n@@ -113,9 +119,6 @@\n if configure_logging:\n configure_log(config.hydra.job_logging, config.hydra.verbose)\n \n- hydra_cfg = OmegaConf.masked_copy(config, \"hydra\")\n- assert isinstance(hydra_cfg, DictConfig)\n-\n if config.hydra.output_subdir is not None:\n hydra_output = Path(config.hydra.output_subdir)\n _save_config(task_cfg, \"config.yaml\", hydra_output)\n@@ -130,6 +133,7 @@\n \n return ret\n finally:\n+ HydraConfig.instance().cfg = orig_hydra_cfg\n os.chdir(old_cwd)\n", "issue": "[Bug] hydra.job.id and hydra.job.num are not properly transferred to jobs in multirun\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport copy\nimport logging\nimport os\nimport re\nimport sys\nimport warnings\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom os.path import basename, dirname, splitext\nfrom pathlib import Path\nfrom time import localtime, strftime\nfrom typing import Any, Dict, Optional, Sequence, Tuple, Union, cast\n\nfrom omegaconf import DictConfig, OmegaConf, open_dict, read_write\n\nfrom hydra.core.hydra_config import HydraConfig\nfrom hydra.core.singleton import Singleton\nfrom hydra.types import TaskFunction\n\nlog = logging.getLogger(__name__)\n\n\ndef simple_stdout_log_config(level: int = logging.INFO) -> None:\n root = logging.getLogger()\n root.setLevel(level)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\"%(message)s\")\n handler.setFormatter(formatter)\n root.addHandler(handler)\n\n\ndef configure_log(\n log_config: DictConfig, verbose_config: Union[bool, str, Sequence[str]]\n) -> None:\n assert isinstance(verbose_config, (bool, str)) or OmegaConf.is_list(verbose_config)\n if log_config is not None:\n conf: Dict[str, Any] = OmegaConf.to_container( # type: ignore\n log_config, resolve=True\n )\n if conf[\"root\"] is not None:\n logging.config.dictConfig(conf)\n else:\n # default logging to stdout\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"[%(asctime)s][%(name)s][%(levelname)s] - %(message)s\"\n )\n handler.setFormatter(formatter)\n root.addHandler(handler)\n if isinstance(verbose_config, bool):\n if verbose_config:\n logging.getLogger().setLevel(logging.DEBUG)\n else:\n if isinstance(verbose_config, str):\n verbose_list = OmegaConf.create([verbose_config])\n elif OmegaConf.is_list(verbose_config):\n verbose_list = verbose_config # type: ignore\n else:\n assert False\n\n for logger in verbose_list:\n logging.getLogger(logger).setLevel(logging.DEBUG)\n\n\ndef _save_config(cfg: DictConfig, filename: str, output_dir: Path) -> None:\n output_dir.mkdir(parents=True, exist_ok=True)\n with open(str(output_dir / filename), \"w\") as file:\n file.write(OmegaConf.to_yaml(cfg))\n\n\ndef filter_overrides(overrides: Sequence[str]) -> Sequence[str]:\n \"\"\"\n :param overrides: overrides list\n :return: returning a new overrides list with all the keys starting with hydra. filtered.\n \"\"\"\n return [x for x in overrides if not x.startswith(\"hydra.\")]\n\n\ndef run_job(\n config: DictConfig,\n task_function: TaskFunction,\n job_dir_key: str,\n job_subdir_key: Optional[str],\n configure_logging: bool = True,\n) -> \"JobReturn\":\n old_cwd = os.getcwd()\n working_dir = str(OmegaConf.select(config, job_dir_key))\n if job_subdir_key is not None:\n # evaluate job_subdir_key lazily.\n # this is running on the client side in sweep and contains things such as job:id which\n # are only available there.\n subdir = str(OmegaConf.select(config, job_subdir_key))\n working_dir = os.path.join(working_dir, subdir)\n try:\n ret = JobReturn()\n ret.working_dir = working_dir\n task_cfg = copy.deepcopy(config)\n with read_write(task_cfg):\n with open_dict(task_cfg):\n del task_cfg[\"hydra\"]\n ret.cfg = task_cfg\n ret.hydra_cfg = OmegaConf.create({\"hydra\": HydraConfig.get()})\n overrides = OmegaConf.to_container(config.hydra.overrides.task)\n assert isinstance(overrides, list)\n ret.overrides = overrides\n # handle output directories here\n Path(str(working_dir)).mkdir(parents=True, exist_ok=True)\n os.chdir(working_dir)\n\n if configure_logging:\n configure_log(config.hydra.job_logging, config.hydra.verbose)\n\n hydra_cfg = OmegaConf.masked_copy(config, \"hydra\")\n assert isinstance(hydra_cfg, DictConfig)\n\n if config.hydra.output_subdir is not None:\n hydra_output = Path(config.hydra.output_subdir)\n _save_config(task_cfg, \"config.yaml\", hydra_output)\n _save_config(hydra_cfg, \"hydra.yaml\", hydra_output)\n _save_config(config.hydra.overrides.task, \"overrides.yaml\", hydra_output)\n\n with env_override(hydra_cfg.hydra.job.env_set):\n ret.return_value = task_function(task_cfg)\n ret.task_name = JobRuntime.instance().get(\"name\")\n\n _flush_loggers()\n\n return ret\n finally:\n os.chdir(old_cwd)\n\n\ndef get_valid_filename(s: str) -> str:\n s = str(s).strip().replace(\" \", \"_\")\n return re.sub(r\"(?u)[^-\\w.]\", \"\", s)\n\n\ndef setup_globals() -> None:\n def register(name: str, f: Any) -> None:\n try:\n OmegaConf.register_resolver(name, f)\n except AssertionError:\n # calling it again in no_workers mode will throw. safe to ignore.\n pass\n\n # please add documentation when you add a new resolver\n register(\"now\", lambda pattern: strftime(pattern, localtime()))\n register(\n \"hydra\",\n lambda path: OmegaConf.select(cast(DictConfig, HydraConfig.get()), path),\n )\n\n vi = sys.version_info\n version_dict = {\n \"major\": f\"{vi[0]}\",\n \"minor\": f\"{vi[0]}.{vi[1]}\",\n \"micro\": f\"{vi[0]}.{vi[1]}.{vi[2]}\",\n }\n register(\"python_version\", lambda level=\"minor\": version_dict.get(level))\n\n\n@dataclass\nclass JobReturn:\n overrides: Optional[Sequence[str]] = None\n return_value: Any = None\n cfg: Optional[DictConfig] = None\n hydra_cfg: Optional[DictConfig] = None\n working_dir: Optional[str] = None\n task_name: Optional[str] = None\n\n\nclass JobRuntime(metaclass=Singleton):\n def __init__(self) -> None:\n self.conf: DictConfig = OmegaConf.create()\n self.set(\"name\", \"UNKNOWN_NAME\")\n\n def get(self, key: str) -> Any:\n ret = OmegaConf.select(self.conf, key)\n if ret is None:\n raise KeyError(f\"Key not found in {type(self).__name__}: {key}\")\n return ret\n\n def set(self, key: str, value: Any) -> None:\n log.debug(f\"Setting {type(self).__name__}:{key}={value}\")\n self.conf[key] = value\n\n\ndef split_config_path(\n config_path: Optional[str], config_name: Optional[str]\n) -> Tuple[Optional[str], Optional[str]]:\n if config_path is None or config_path == \"\":\n return None, config_name\n split_file = splitext(config_path)\n if split_file[1] in (\".yaml\", \".yml\"):\n # assuming dir/config.yaml form\n config_file: Optional[str] = basename(config_path)\n config_dir: Optional[str] = dirname(config_path)\n # DEPRECATED: remove in 1.1\n msg = (\n \"\\nUsing config_path to specify the config name is deprecated, specify the config name via config_name\"\n \"\\nSee https://hydra.cc/docs/next/upgrades/0.11_to_1.0/config_path_changes\"\n )\n warnings.warn(category=UserWarning, message=msg)\n else:\n # assuming dir form without a config file.\n config_file = None\n config_dir = config_path\n\n if config_dir == \"\":\n config_dir = None\n\n if config_file == \"\":\n config_file = None\n\n if config_file is not None:\n if config_name is not None:\n raise ValueError(\n \"Config name should be specified in either normalized_config_path or config_name, but not both\"\n )\n config_name = config_file\n\n return config_dir, config_name\n\n\n@contextmanager\ndef env_override(env: Dict[str, str]) -> Any:\n \"\"\"Temporarily set environment variables inside the context manager and\n fully restore previous environment afterwards\n \"\"\"\n original_env = {key: os.getenv(key) for key in env}\n os.environ.update(env)\n try:\n yield\n finally:\n for key, value in original_env.items():\n if value is None:\n del os.environ[key]\n else:\n os.environ[key] = value\n\n\ndef _flush_loggers() -> None:\n # Python logging does not have an official API to flush all loggers.\n # This will have to do.\n for h_weak_ref in logging._handlerList: # type: ignore\n try:\n h_weak_ref().flush()\n except Exception:\n # ignore exceptions thrown during flushing\n pass\n", "path": "hydra/core/utils.py"}]}
| 3,183 | 466 |
gh_patches_debug_8135
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-1047
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Show land module in left menus
As we said it would be interesting to move it to its own menu, we should take opportunity to rename some elements :
Module name :
"Gestion foncière" TO "Gestionnaires"
AND ALSO :
"Zone de compétence" TO "Compétence sentiers"
</issue>
<code>
[start of geotrek/land/urls.py]
1 from mapentity import registry
2
3 from . import models
4
5
6 urlpatterns = registry.register(models.PhysicalEdge, menu=False)
7 urlpatterns += registry.register(models.LandEdge, menu=False)
8 urlpatterns += registry.register(models.CompetenceEdge, menu=False)
9 urlpatterns += registry.register(models.WorkManagementEdge, menu=False)
10 urlpatterns += registry.register(models.SignageManagementEdge, menu=False)
11
[end of geotrek/land/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geotrek/land/urls.py b/geotrek/land/urls.py
--- a/geotrek/land/urls.py
+++ b/geotrek/land/urls.py
@@ -4,7 +4,7 @@
urlpatterns = registry.register(models.PhysicalEdge, menu=False)
-urlpatterns += registry.register(models.LandEdge, menu=False)
+urlpatterns += registry.register(models.LandEdge)
urlpatterns += registry.register(models.CompetenceEdge, menu=False)
urlpatterns += registry.register(models.WorkManagementEdge, menu=False)
urlpatterns += registry.register(models.SignageManagementEdge, menu=False)
|
{"golden_diff": "diff --git a/geotrek/land/urls.py b/geotrek/land/urls.py\n--- a/geotrek/land/urls.py\n+++ b/geotrek/land/urls.py\n@@ -4,7 +4,7 @@\n \n \n urlpatterns = registry.register(models.PhysicalEdge, menu=False)\n-urlpatterns += registry.register(models.LandEdge, menu=False)\n+urlpatterns += registry.register(models.LandEdge)\n urlpatterns += registry.register(models.CompetenceEdge, menu=False)\n urlpatterns += registry.register(models.WorkManagementEdge, menu=False)\n urlpatterns += registry.register(models.SignageManagementEdge, menu=False)\n", "issue": "Show land module in left menus\nAs we said it would be interesting to move it to its own menu, we should take opportunity to rename some elements : \n\nModule name : \n\"Gestion fonci\u00e8re\" TO \"Gestionnaires\"\nAND ALSO : \n\"Zone de comp\u00e9tence\" TO \"Comp\u00e9tence sentiers\"\n\n", "before_files": [{"content": "from mapentity import registry\n\nfrom . import models\n\n\nurlpatterns = registry.register(models.PhysicalEdge, menu=False)\nurlpatterns += registry.register(models.LandEdge, menu=False)\nurlpatterns += registry.register(models.CompetenceEdge, menu=False)\nurlpatterns += registry.register(models.WorkManagementEdge, menu=False)\nurlpatterns += registry.register(models.SignageManagementEdge, menu=False)\n", "path": "geotrek/land/urls.py"}]}
| 696 | 128 |
gh_patches_debug_21656
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-36746
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ini: Lock file when saving to prevent corruption
<!---
Verify first that your issue/request is not already reported on GitHub.
Also test if the latest release, and master branch are affected too.
-->
##### ISSUE TYPE
- Bug Report / Feature Idea
##### COMPONENT NAME
ini_file
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes below -->
```
ansible 2.4.2.0
config file = /etc/ansible/ansible.cfg
configured module search path = [u'/home/something/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python2.7/dist-packages/ansible
executable location = /usr/bin/ansible
python version = 2.7.6 (default, Nov 23 2017, 15:49:48) [GCC 4.8.4]
```
##### CONFIGURATION
DEFAULT_REMOTE_PORT(/etc/ansible/ansible.cfg) = 22
PERSISTENT_CONNECT_TIMEOUT(/etc/ansible/ansible.cfg) = 30
##### OS / ENVIRONMENT
Ubuntu Server 16.04 LTS, 64-bit
##### SUMMARY
When writing INI files from two instances to the same file and host it sometimes corrupts the file, resulting in wrong INI syntax (duplicates or a half section written to file).
##### STEPS TO REPRODUCE
Try to setup a test host and write to a INI file, execute the playbook/script few times at the same time.
##### EXPECTED RESULTS
I understand that it's a bad concept of potentially having the INI write at the same time, but with some deployment setup it could happen at any time you have scheduled tasks and manual tasks at the same time. It would be awesome to have a lock mechanism to the INI function.
##### ACTUAL RESULTS
Duplicates or weird syntax in the ini file.
</issue>
<code>
[start of lib/ansible/modules/files/ini_file.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
5 # Copyright: (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
6 # Copyright: (c) 2017, Ansible Project
7 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
8
9 from __future__ import absolute_import, division, print_function
10 __metaclass__ = type
11 #
12
13 ANSIBLE_METADATA = {'metadata_version': '1.1',
14 'status': ['preview'],
15 'supported_by': 'community'}
16
17 DOCUMENTATION = '''
18 ---
19 module: ini_file
20 short_description: Tweak settings in INI files
21 extends_documentation_fragment: files
22 description:
23 - Manage (add, remove, change) individual settings in an INI-style file without having
24 to manage the file as a whole with, say, M(template) or M(assemble). Adds missing
25 sections if they don't exist.
26 - Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
27 - Since version 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when
28 no other modifications need to be applied.
29 version_added: "0.9"
30 options:
31 path:
32 description:
33 - Path to the INI-style file; this file is created if required.
34 - Before 2.3 this option was only usable as I(dest).
35 aliases: [ dest ]
36 required: true
37 section:
38 description:
39 - Section name in INI file. This is added if C(state=present) automatically when
40 a single value is being set.
41 - If left empty or set to `null`, the I(option) will be placed before the first I(section).
42 Using `null` is also required if the config format does not support sections.
43 required: true
44 option:
45 description:
46 - If set (required for changing a I(value)), this is the name of the option.
47 - May be omitted if adding/removing a whole I(section).
48 value:
49 description:
50 - The string value to be associated with an I(option). May be omitted when removing an I(option).
51 backup:
52 description:
53 - Create a backup file including the timestamp information so you can get
54 the original file back if you somehow clobbered it incorrectly.
55 type: bool
56 default: 'no'
57 others:
58 description:
59 - All arguments accepted by the M(file) module also work here
60 state:
61 description:
62 - If set to C(absent) the option or section will be removed if present instead of created.
63 choices: [ absent, present ]
64 default: present
65 no_extra_spaces:
66 description:
67 - Do not insert spaces before and after '=' symbol
68 type: bool
69 default: 'no'
70 version_added: "2.1"
71 create:
72 description:
73 - If set to 'no', the module will fail if the file does not already exist.
74 By default it will create the file if it is missing.
75 type: bool
76 default: 'yes'
77 version_added: "2.2"
78 notes:
79 - While it is possible to add an I(option) without specifying a I(value), this makes
80 no sense.
81 - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but
82 I(dest) still works as well.
83 author:
84 - Jan-Piet Mens (@jpmens)
85 - Ales Nosek (@noseka1)
86 '''
87
88 EXAMPLES = '''
89 # Before 2.3, option 'dest' was used instead of 'path'
90 - name: Ensure "fav=lemonade is in section "[drinks]" in specified file
91 ini_file:
92 path: /etc/conf
93 section: drinks
94 option: fav
95 value: lemonade
96 mode: 0600
97 backup: yes
98
99 - ini_file:
100 path: /etc/anotherconf
101 section: drinks
102 option: temperature
103 value: cold
104 backup: yes
105 '''
106
107 import os
108 import re
109
110 from ansible.module_utils.basic import AnsibleModule
111
112
113 def match_opt(option, line):
114 option = re.escape(option)
115 return re.match('( |\t)*%s( |\t)*=' % option, line) \
116 or re.match('#( |\t)*%s( |\t)*=' % option, line) \
117 or re.match(';( |\t)*%s( |\t)*=' % option, line)
118
119
120 def match_active_opt(option, line):
121 option = re.escape(option)
122 return re.match('( |\t)*%s( |\t)*=' % option, line)
123
124
125 def do_ini(module, filename, section=None, option=None, value=None,
126 state='present', backup=False, no_extra_spaces=False, create=True):
127
128 diff = dict(
129 before='',
130 after='',
131 before_header='%s (content)' % filename,
132 after_header='%s (content)' % filename,
133 )
134
135 if not os.path.exists(filename):
136 if not create:
137 module.fail_json(rc=257, msg='Destination %s does not exist !' % filename)
138 destpath = os.path.dirname(filename)
139 if not os.path.exists(destpath) and not module.check_mode:
140 os.makedirs(destpath)
141 ini_lines = []
142 else:
143 ini_file = open(filename, 'r')
144 try:
145 ini_lines = ini_file.readlines()
146 finally:
147 ini_file.close()
148
149 if module._diff:
150 diff['before'] = ''.join(ini_lines)
151
152 changed = False
153
154 # ini file could be empty
155 if not ini_lines:
156 ini_lines.append('\n')
157
158 # last line of file may not contain a trailing newline
159 if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n':
160 ini_lines[-1] += '\n'
161 changed = True
162
163 # append a fake section line to simplify the logic
164 ini_lines.append('[')
165
166 within_section = not section
167 section_start = 0
168 msg = 'OK'
169 if no_extra_spaces:
170 assignment_format = '%s=%s\n'
171 else:
172 assignment_format = '%s = %s\n'
173
174 for index, line in enumerate(ini_lines):
175 if line.startswith('[%s]' % section):
176 within_section = True
177 section_start = index
178 elif line.startswith('['):
179 if within_section:
180 if state == 'present':
181 # insert missing option line at the end of the section
182 for i in range(index, 0, -1):
183 # search backwards for previous non-blank or non-comment line
184 if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]):
185 ini_lines.insert(i, assignment_format % (option, value))
186 msg = 'option added'
187 changed = True
188 break
189 elif state == 'absent' and not option:
190 # remove the entire section
191 del ini_lines[section_start:index]
192 msg = 'section removed'
193 changed = True
194 break
195 else:
196 if within_section and option:
197 if state == 'present':
198 # change the existing option line
199 if match_opt(option, line):
200 newline = assignment_format % (option, value)
201 option_changed = ini_lines[index] != newline
202 changed = changed or option_changed
203 if option_changed:
204 msg = 'option changed'
205 ini_lines[index] = newline
206 if option_changed:
207 # remove all possible option occurrences from the rest of the section
208 index = index + 1
209 while index < len(ini_lines):
210 line = ini_lines[index]
211 if line.startswith('['):
212 break
213 if match_active_opt(option, line):
214 del ini_lines[index]
215 else:
216 index = index + 1
217 break
218 elif state == 'absent':
219 # delete the existing line
220 if match_active_opt(option, line):
221 del ini_lines[index]
222 changed = True
223 msg = 'option changed'
224 break
225
226 # remove the fake section line
227 del ini_lines[-1:]
228
229 if not within_section and option and state == 'present':
230 ini_lines.append('[%s]\n' % section)
231 ini_lines.append(assignment_format % (option, value))
232 changed = True
233 msg = 'section and option added'
234
235 if module._diff:
236 diff['after'] = ''.join(ini_lines)
237
238 backup_file = None
239 if changed and not module.check_mode:
240 if backup:
241 backup_file = module.backup_local(filename)
242 ini_file = open(filename, 'w')
243 try:
244 ini_file.writelines(ini_lines)
245 finally:
246 ini_file.close()
247
248 return (changed, backup_file, diff, msg)
249
250
251 def main():
252
253 module = AnsibleModule(
254 argument_spec=dict(
255 path=dict(type='path', required=True, aliases=['dest']),
256 section=dict(type='str', required=True),
257 option=dict(type='str'),
258 value=dict(type='str'),
259 backup=dict(type='bool', default=False),
260 state=dict(type='str', default='present', choices=['absent', 'present']),
261 no_extra_spaces=dict(type='bool', default=False),
262 create=dict(type='bool', default=True)
263 ),
264 add_file_common_args=True,
265 supports_check_mode=True,
266 )
267
268 path = module.params['path']
269 section = module.params['section']
270 option = module.params['option']
271 value = module.params['value']
272 state = module.params['state']
273 backup = module.params['backup']
274 no_extra_spaces = module.params['no_extra_spaces']
275 create = module.params['create']
276
277 (changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create)
278
279 if not module.check_mode and os.path.exists(path):
280 file_args = module.load_file_common_arguments(module.params)
281 changed = module.set_fs_attributes_if_different(file_args, changed)
282
283 results = dict(
284 changed=changed,
285 diff=diff,
286 msg=msg,
287 path=path,
288 )
289 if backup_file is not None:
290 results['backup_file'] = backup_file
291
292 # Mission complete
293 module.exit_json(**results)
294
295 if __name__ == '__main__':
296 main()
297
[end of lib/ansible/modules/files/ini_file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/ansible/modules/files/ini_file.py b/lib/ansible/modules/files/ini_file.py
--- a/lib/ansible/modules/files/ini_file.py
+++ b/lib/ansible/modules/files/ini_file.py
@@ -106,6 +106,8 @@
import os
import re
+import tempfile
+import traceback
from ansible.module_utils.basic import AnsibleModule
@@ -239,11 +241,20 @@
if changed and not module.check_mode:
if backup:
backup_file = module.backup_local(filename)
- ini_file = open(filename, 'w')
+
try:
- ini_file.writelines(ini_lines)
- finally:
- ini_file.close()
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ f = os.fdopen(tmpfd, 'w')
+ f.writelines(ini_lines)
+ f.close()
+ except IOError:
+ module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc())
+
+ try:
+ module.atomic_move(tmpfile, filename)
+ except IOError:
+ module.ansible.fail_json(msg='Unable to move temporary \
+ file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc())
return (changed, backup_file, diff, msg)
|
{"golden_diff": "diff --git a/lib/ansible/modules/files/ini_file.py b/lib/ansible/modules/files/ini_file.py\n--- a/lib/ansible/modules/files/ini_file.py\n+++ b/lib/ansible/modules/files/ini_file.py\n@@ -106,6 +106,8 @@\n \n import os\n import re\n+import tempfile\n+import traceback\n \n from ansible.module_utils.basic import AnsibleModule\n \n@@ -239,11 +241,20 @@\n if changed and not module.check_mode:\n if backup:\n backup_file = module.backup_local(filename)\n- ini_file = open(filename, 'w')\n+\n try:\n- ini_file.writelines(ini_lines)\n- finally:\n- ini_file.close()\n+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)\n+ f = os.fdopen(tmpfd, 'w')\n+ f.writelines(ini_lines)\n+ f.close()\n+ except IOError:\n+ module.fail_json(msg=\"Unable to create temporary file %s\", traceback=traceback.format_exc())\n+\n+ try:\n+ module.atomic_move(tmpfile, filename)\n+ except IOError:\n+ module.ansible.fail_json(msg='Unable to move temporary \\\n+ file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc())\n \n return (changed, backup_file, diff, msg)\n", "issue": "ini: Lock file when saving to prevent corruption\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and master branch are affected too.\r\n-->\r\n\r\n##### ISSUE TYPE\r\n - Bug Report / Feature Idea\r\n\r\n##### COMPONENT NAME\r\nini_file\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\nansible 2.4.2.0\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = [u'/home/something/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python2.7/dist-packages/ansible\r\n executable location = /usr/bin/ansible\r\n python version = 2.7.6 (default, Nov 23 2017, 15:49:48) [GCC 4.8.4]\r\n```\r\n\r\n##### CONFIGURATION\r\nDEFAULT_REMOTE_PORT(/etc/ansible/ansible.cfg) = 22\r\nPERSISTENT_CONNECT_TIMEOUT(/etc/ansible/ansible.cfg) = 30\r\n\r\n##### OS / ENVIRONMENT\r\nUbuntu Server 16.04 LTS, 64-bit\r\n\r\n##### SUMMARY\r\nWhen writing INI files from two instances to the same file and host it sometimes corrupts the file, resulting in wrong INI syntax (duplicates or a half section written to file).\r\n\r\n##### STEPS TO REPRODUCE\r\nTry to setup a test host and write to a INI file, execute the playbook/script few times at the same time.\r\n\r\n##### EXPECTED RESULTS\r\nI understand that it's a bad concept of potentially having the INI write at the same time, but with some deployment setup it could happen at any time you have scheduled tasks and manual tasks at the same time. It would be awesome to have a lock mechanism to the INI function.\r\n\r\n##### ACTUAL RESULTS\r\nDuplicates or weird syntax in the ini file.\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>\n# Copyright: (c) 2015, Ales Nosek <anosek.nosek () gmail.com>\n# Copyright: (c) 2017, Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n#\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: ini_file\nshort_description: Tweak settings in INI files\nextends_documentation_fragment: files\ndescription:\n - Manage (add, remove, change) individual settings in an INI-style file without having\n to manage the file as a whole with, say, M(template) or M(assemble). Adds missing\n sections if they don't exist.\n - Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.\n - Since version 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when\n no other modifications need to be applied.\nversion_added: \"0.9\"\noptions:\n path:\n description:\n - Path to the INI-style file; this file is created if required.\n - Before 2.3 this option was only usable as I(dest).\n aliases: [ dest ]\n required: true\n section:\n description:\n - Section name in INI file. This is added if C(state=present) automatically when\n a single value is being set.\n - If left empty or set to `null`, the I(option) will be placed before the first I(section).\n Using `null` is also required if the config format does not support sections.\n required: true\n option:\n description:\n - If set (required for changing a I(value)), this is the name of the option.\n - May be omitted if adding/removing a whole I(section).\n value:\n description:\n - The string value to be associated with an I(option). May be omitted when removing an I(option).\n backup:\n description:\n - Create a backup file including the timestamp information so you can get\n the original file back if you somehow clobbered it incorrectly.\n type: bool\n default: 'no'\n others:\n description:\n - All arguments accepted by the M(file) module also work here\n state:\n description:\n - If set to C(absent) the option or section will be removed if present instead of created.\n choices: [ absent, present ]\n default: present\n no_extra_spaces:\n description:\n - Do not insert spaces before and after '=' symbol\n type: bool\n default: 'no'\n version_added: \"2.1\"\n create:\n description:\n - If set to 'no', the module will fail if the file does not already exist.\n By default it will create the file if it is missing.\n type: bool\n default: 'yes'\n version_added: \"2.2\"\nnotes:\n - While it is possible to add an I(option) without specifying a I(value), this makes\n no sense.\n - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but\n I(dest) still works as well.\nauthor:\n - Jan-Piet Mens (@jpmens)\n - Ales Nosek (@noseka1)\n'''\n\nEXAMPLES = '''\n# Before 2.3, option 'dest' was used instead of 'path'\n- name: Ensure \"fav=lemonade is in section \"[drinks]\" in specified file\n ini_file:\n path: /etc/conf\n section: drinks\n option: fav\n value: lemonade\n mode: 0600\n backup: yes\n\n- ini_file:\n path: /etc/anotherconf\n section: drinks\n option: temperature\n value: cold\n backup: yes\n'''\n\nimport os\nimport re\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\ndef match_opt(option, line):\n option = re.escape(option)\n return re.match('( |\\t)*%s( |\\t)*=' % option, line) \\\n or re.match('#( |\\t)*%s( |\\t)*=' % option, line) \\\n or re.match(';( |\\t)*%s( |\\t)*=' % option, line)\n\n\ndef match_active_opt(option, line):\n option = re.escape(option)\n return re.match('( |\\t)*%s( |\\t)*=' % option, line)\n\n\ndef do_ini(module, filename, section=None, option=None, value=None,\n state='present', backup=False, no_extra_spaces=False, create=True):\n\n diff = dict(\n before='',\n after='',\n before_header='%s (content)' % filename,\n after_header='%s (content)' % filename,\n )\n\n if not os.path.exists(filename):\n if not create:\n module.fail_json(rc=257, msg='Destination %s does not exist !' % filename)\n destpath = os.path.dirname(filename)\n if not os.path.exists(destpath) and not module.check_mode:\n os.makedirs(destpath)\n ini_lines = []\n else:\n ini_file = open(filename, 'r')\n try:\n ini_lines = ini_file.readlines()\n finally:\n ini_file.close()\n\n if module._diff:\n diff['before'] = ''.join(ini_lines)\n\n changed = False\n\n # ini file could be empty\n if not ini_lines:\n ini_lines.append('\\n')\n\n # last line of file may not contain a trailing newline\n if ini_lines[-1] == \"\" or ini_lines[-1][-1] != '\\n':\n ini_lines[-1] += '\\n'\n changed = True\n\n # append a fake section line to simplify the logic\n ini_lines.append('[')\n\n within_section = not section\n section_start = 0\n msg = 'OK'\n if no_extra_spaces:\n assignment_format = '%s=%s\\n'\n else:\n assignment_format = '%s = %s\\n'\n\n for index, line in enumerate(ini_lines):\n if line.startswith('[%s]' % section):\n within_section = True\n section_start = index\n elif line.startswith('['):\n if within_section:\n if state == 'present':\n # insert missing option line at the end of the section\n for i in range(index, 0, -1):\n # search backwards for previous non-blank or non-comment line\n if not re.match(r'^[ \\t]*([#;].*)?$', ini_lines[i - 1]):\n ini_lines.insert(i, assignment_format % (option, value))\n msg = 'option added'\n changed = True\n break\n elif state == 'absent' and not option:\n # remove the entire section\n del ini_lines[section_start:index]\n msg = 'section removed'\n changed = True\n break\n else:\n if within_section and option:\n if state == 'present':\n # change the existing option line\n if match_opt(option, line):\n newline = assignment_format % (option, value)\n option_changed = ini_lines[index] != newline\n changed = changed or option_changed\n if option_changed:\n msg = 'option changed'\n ini_lines[index] = newline\n if option_changed:\n # remove all possible option occurrences from the rest of the section\n index = index + 1\n while index < len(ini_lines):\n line = ini_lines[index]\n if line.startswith('['):\n break\n if match_active_opt(option, line):\n del ini_lines[index]\n else:\n index = index + 1\n break\n elif state == 'absent':\n # delete the existing line\n if match_active_opt(option, line):\n del ini_lines[index]\n changed = True\n msg = 'option changed'\n break\n\n # remove the fake section line\n del ini_lines[-1:]\n\n if not within_section and option and state == 'present':\n ini_lines.append('[%s]\\n' % section)\n ini_lines.append(assignment_format % (option, value))\n changed = True\n msg = 'section and option added'\n\n if module._diff:\n diff['after'] = ''.join(ini_lines)\n\n backup_file = None\n if changed and not module.check_mode:\n if backup:\n backup_file = module.backup_local(filename)\n ini_file = open(filename, 'w')\n try:\n ini_file.writelines(ini_lines)\n finally:\n ini_file.close()\n\n return (changed, backup_file, diff, msg)\n\n\ndef main():\n\n module = AnsibleModule(\n argument_spec=dict(\n path=dict(type='path', required=True, aliases=['dest']),\n section=dict(type='str', required=True),\n option=dict(type='str'),\n value=dict(type='str'),\n backup=dict(type='bool', default=False),\n state=dict(type='str', default='present', choices=['absent', 'present']),\n no_extra_spaces=dict(type='bool', default=False),\n create=dict(type='bool', default=True)\n ),\n add_file_common_args=True,\n supports_check_mode=True,\n )\n\n path = module.params['path']\n section = module.params['section']\n option = module.params['option']\n value = module.params['value']\n state = module.params['state']\n backup = module.params['backup']\n no_extra_spaces = module.params['no_extra_spaces']\n create = module.params['create']\n\n (changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create)\n\n if not module.check_mode and os.path.exists(path):\n file_args = module.load_file_common_arguments(module.params)\n changed = module.set_fs_attributes_if_different(file_args, changed)\n\n results = dict(\n changed=changed,\n diff=diff,\n msg=msg,\n path=path,\n )\n if backup_file is not None:\n results['backup_file'] = backup_file\n\n # Mission complete\n module.exit_json(**results)\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/files/ini_file.py"}]}
| 4,056 | 304 |
gh_patches_debug_29614
|
rasdani/github-patches
|
git_diff
|
Lightning-Universe__lightning-flash-393
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`METADATA` can't be a dict
## 🐛 Bug
If a dict is used for the `METADATA` key then it will give an error. This should be supported.
</issue>
<code>
[start of flash/core/data/batch.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, TYPE_CHECKING, Union
15
16 import torch
17 from pytorch_lightning.trainer.states import RunningStage
18 from pytorch_lightning.utilities.exceptions import MisconfigurationException
19 from torch import Tensor
20
21 from flash.core.data.callback import ControlFlow
22 from flash.core.data.data_source import DefaultDataKeys
23 from flash.core.data.utils import (
24 _contains_any_tensor,
25 convert_to_modules,
26 CurrentFuncContext,
27 CurrentRunningStageContext,
28 )
29
30 if TYPE_CHECKING: # pragma: no-cover
31 from flash.core.data.process import Preprocess
32
33
34 class _Sequential(torch.nn.Module):
35 """
36 This class is used to chain 3 functions together for the _Preprocessor ``per_sample_transform`` function.
37 1. ``pre_tensor_transform``
38 2. ``to_tensor_transform``
39 3. ``post_tensor_transform``
40 """
41
42 def __init__(
43 self,
44 preprocess: 'Preprocess',
45 pre_tensor_transform: Callable,
46 to_tensor_transform: Callable,
47 post_tensor_transform: Callable,
48 stage: RunningStage,
49 assert_contains_tensor: bool = False,
50 ):
51 super().__init__()
52 self.preprocess = preprocess
53 self.callback = ControlFlow(self.preprocess.callbacks)
54 self.pre_tensor_transform = convert_to_modules(pre_tensor_transform)
55 self.to_tensor_transform = convert_to_modules(to_tensor_transform)
56 self.post_tensor_transform = convert_to_modules(post_tensor_transform)
57 self.stage = stage
58 self.assert_contains_tensor = assert_contains_tensor
59
60 self._current_stage_context = CurrentRunningStageContext(stage, preprocess, reset=False)
61 self._pre_tensor_transform_context = CurrentFuncContext("pre_tensor_transform", preprocess)
62 self._to_tensor_transform_context = CurrentFuncContext("to_tensor_transform", preprocess)
63 self._post_tensor_transform_context = CurrentFuncContext("post_tensor_transform", preprocess)
64
65 def forward(self, sample: Any) -> Any:
66 self.callback.on_load_sample(sample, self.stage)
67
68 with self._current_stage_context:
69 with self._pre_tensor_transform_context:
70 sample = self.pre_tensor_transform(sample)
71 self.callback.on_pre_tensor_transform(sample, self.stage)
72
73 with self._to_tensor_transform_context:
74 sample = self.to_tensor_transform(sample)
75 self.callback.on_to_tensor_transform(sample, self.stage)
76
77 if self.assert_contains_tensor:
78 if not _contains_any_tensor(sample):
79 raise MisconfigurationException(
80 "When ``to_tensor_transform`` is overriden, "
81 "``DataPipeline`` expects the outputs to be ``tensors``"
82 )
83
84 with self._post_tensor_transform_context:
85 sample = self.post_tensor_transform(sample)
86 self.callback.on_post_tensor_transform(sample, self.stage)
87
88 return sample
89
90 def __str__(self) -> str:
91 return (
92 f"{self.__class__.__name__}:\n"
93 f"\t(pre_tensor_transform): {str(self.pre_tensor_transform)}\n"
94 f"\t(to_tensor_transform): {str(self.to_tensor_transform)}\n"
95 f"\t(post_tensor_transform): {str(self.post_tensor_transform)}\n"
96 f"\t(assert_contains_tensor): {str(self.assert_contains_tensor)}\n"
97 f"\t(stage): {str(self.stage)}"
98 )
99
100
101 class _Preprocessor(torch.nn.Module):
102 """
103 This class is used to encapsultate the following functions of a Preprocess Object:
104 Inside a worker:
105 per_sample_transform: Function to transform an individual sample
106 Inside a worker, it is actually make of 3 functions:
107 * pre_tensor_transform
108 * to_tensor_transform
109 * post_tensor_transform
110 collate: Function to merge sample into a batch
111 per_batch_transform: Function to transform an individual batch
112 * per_batch_transform
113
114 Inside main process:
115 per_sample_transform: Function to transform an individual sample
116 * per_sample_transform_on_device
117 collate: Function to merge sample into a batch
118 per_batch_transform: Function to transform an individual batch
119 * per_batch_transform_on_device
120 """
121
122 def __init__(
123 self,
124 preprocess: 'Preprocess',
125 collate_fn: Callable,
126 per_sample_transform: Union[Callable, _Sequential],
127 per_batch_transform: Callable,
128 stage: RunningStage,
129 apply_per_sample_transform: bool = True,
130 on_device: bool = False,
131 ):
132 super().__init__()
133 self.preprocess = preprocess
134 self.callback = ControlFlow(self.preprocess.callbacks)
135 self.collate_fn = convert_to_modules(collate_fn)
136 self.per_sample_transform = convert_to_modules(per_sample_transform)
137 self.per_batch_transform = convert_to_modules(per_batch_transform)
138 self.apply_per_sample_transform = apply_per_sample_transform
139 self.stage = stage
140 self.on_device = on_device
141
142 extension = f"{'_on_device' if self.on_device else ''}"
143 self._current_stage_context = CurrentRunningStageContext(stage, preprocess)
144 self._per_sample_transform_context = CurrentFuncContext(f"per_sample_transform{extension}", preprocess)
145 self._collate_context = CurrentFuncContext("collate", preprocess)
146 self._per_batch_transform_context = CurrentFuncContext(f"per_batch_transform{extension}", preprocess)
147
148 def _extract_metadata(
149 self,
150 samples: List[Dict[str, Any]],
151 ) -> Tuple[List[Dict[str, Any]], Optional[List[Dict[str, Any]]]]:
152 metadata = [s.pop(DefaultDataKeys.METADATA, None) if isinstance(s, Mapping) else None for s in samples]
153 return samples, metadata if any(m is not None for m in metadata) else None
154
155 def forward(self, samples: Sequence[Any]) -> Any:
156 # we create a new dict to prevent from potential memory leaks
157 # assuming that the dictionary samples are stored in between and
158 # potentially modified before the transforms are applied.
159 if isinstance(samples, dict):
160 samples = dict(samples.items())
161
162 with self._current_stage_context:
163
164 if self.apply_per_sample_transform:
165 with self._per_sample_transform_context:
166 _samples = []
167 for sample in samples:
168 sample = self.per_sample_transform(sample)
169 if self.on_device:
170 self.callback.on_per_sample_transform_on_device(sample, self.stage)
171 _samples.append(sample)
172
173 samples = type(_samples)(_samples)
174
175 with self._collate_context:
176 samples, metadata = self._extract_metadata(samples)
177 samples = self.collate_fn(samples)
178 if metadata:
179 samples[DefaultDataKeys.METADATA] = metadata
180 self.callback.on_collate(samples, self.stage)
181
182 with self._per_batch_transform_context:
183 samples = self.per_batch_transform(samples)
184 if self.on_device:
185 self.callback.on_per_batch_transform_on_device(samples, self.stage)
186 else:
187 self.callback.on_per_batch_transform(samples, self.stage)
188 return samples
189
190 def __str__(self) -> str:
191 # todo: define repr function which would take object and string attributes to be shown
192 return (
193 "_Preprocessor:\n"
194 f"\t(per_sample_transform): {str(self.per_sample_transform)}\n"
195 f"\t(collate_fn): {str(self.collate_fn)}\n"
196 f"\t(per_batch_transform): {str(self.per_batch_transform)}\n"
197 f"\t(apply_per_sample_transform): {str(self.apply_per_sample_transform)}\n"
198 f"\t(on_device): {str(self.on_device)}\n"
199 f"\t(stage): {str(self.stage)}"
200 )
201
202
203 class _Postprocessor(torch.nn.Module):
204 """
205 This class is used to encapsultate the following functions of a Postprocess Object:
206 Inside main process:
207 per_batch_transform: Function to transform a batch
208 per_sample_transform: Function to transform an individual sample
209 uncollate_fn: Function to split a batch into samples
210 per_sample_transform: Function to transform an individual sample
211 save_fn: Function to save all data
212 save_per_sample: Function to save an individual sample
213 """
214
215 def __init__(
216 self,
217 uncollate_fn: Callable,
218 per_batch_transform: Callable,
219 per_sample_transform: Callable,
220 serializer: Optional[Callable],
221 save_fn: Optional[Callable] = None,
222 save_per_sample: bool = False
223 ):
224 super().__init__()
225 self.uncollate_fn = convert_to_modules(uncollate_fn)
226 self.per_batch_transform = convert_to_modules(per_batch_transform)
227 self.per_sample_transform = convert_to_modules(per_sample_transform)
228 self.serializer = convert_to_modules(serializer)
229 self.save_fn = convert_to_modules(save_fn)
230 self.save_per_sample = convert_to_modules(save_per_sample)
231
232 def forward(self, batch: Sequence[Any]):
233 uncollated = self.uncollate_fn(self.per_batch_transform(batch))
234
235 final_preds = type(uncollated)([self.serializer(self.per_sample_transform(sample)) for sample in uncollated])
236
237 if self.save_fn:
238 if self.save_per_sample:
239 for pred in final_preds:
240 self.save_fn(pred)
241 else:
242 self.save_fn(final_preds)
243 else:
244 return final_preds
245
246 def __str__(self) -> str:
247 return (
248 "_Postprocessor:\n"
249 f"\t(per_batch_transform): {str(self.per_batch_transform)}\n"
250 f"\t(uncollate_fn): {str(self.uncollate_fn)}\n"
251 f"\t(per_sample_transform): {str(self.per_sample_transform)}\n"
252 f"\t(serializer): {str(self.serializer)}"
253 )
254
255
256 def default_uncollate(batch: Any):
257 """
258 This function is used to uncollate a batch into samples.
259
260 Examples:
261 >>> a, b = default_uncollate(torch.rand((2,1)))
262 """
263
264 batch_type = type(batch)
265
266 if isinstance(batch, Tensor):
267 if len(batch.shape) == 0: # 0 shape tensors
268 return batch
269 return list(torch.unbind(batch, 0))
270
271 elif isinstance(batch, Mapping):
272 return [batch_type(dict(zip(batch, default_uncollate(t)))) for t in zip(*batch.values())]
273
274 elif isinstance(batch, tuple) and hasattr(batch, '_fields'): # namedtuple
275 return [batch_type(*default_uncollate(sample)) for sample in zip(*batch)]
276
277 elif isinstance(batch, Sequence) and not isinstance(batch, str):
278 return [default_uncollate(sample) for sample in batch]
279
280 return batch
281
[end of flash/core/data/batch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/flash/core/data/batch.py b/flash/core/data/batch.py
--- a/flash/core/data/batch.py
+++ b/flash/core/data/batch.py
@@ -145,8 +145,8 @@
self._collate_context = CurrentFuncContext("collate", preprocess)
self._per_batch_transform_context = CurrentFuncContext(f"per_batch_transform{extension}", preprocess)
+ @staticmethod
def _extract_metadata(
- self,
samples: List[Dict[str, Any]],
) -> Tuple[List[Dict[str, Any]], Optional[List[Dict[str, Any]]]]:
metadata = [s.pop(DefaultDataKeys.METADATA, None) if isinstance(s, Mapping) else None for s in samples]
@@ -229,8 +229,18 @@
self.save_fn = convert_to_modules(save_fn)
self.save_per_sample = convert_to_modules(save_per_sample)
+ @staticmethod
+ def _extract_metadata(batch: Any) -> Tuple[Any, Optional[Any]]:
+ if isinstance(batch, Mapping):
+ return batch, batch.get(DefaultDataKeys.METADATA, None)
+ return batch, None
+
def forward(self, batch: Sequence[Any]):
+ batch, metadata = self._extract_metadata(batch)
uncollated = self.uncollate_fn(self.per_batch_transform(batch))
+ if metadata:
+ for sample, sample_metadata in zip(uncollated, metadata):
+ sample[DefaultDataKeys.METADATA] = sample_metadata
final_preds = type(uncollated)([self.serializer(self.per_sample_transform(sample)) for sample in uncollated])
|
{"golden_diff": "diff --git a/flash/core/data/batch.py b/flash/core/data/batch.py\n--- a/flash/core/data/batch.py\n+++ b/flash/core/data/batch.py\n@@ -145,8 +145,8 @@\n self._collate_context = CurrentFuncContext(\"collate\", preprocess)\n self._per_batch_transform_context = CurrentFuncContext(f\"per_batch_transform{extension}\", preprocess)\n \n+ @staticmethod\n def _extract_metadata(\n- self,\n samples: List[Dict[str, Any]],\n ) -> Tuple[List[Dict[str, Any]], Optional[List[Dict[str, Any]]]]:\n metadata = [s.pop(DefaultDataKeys.METADATA, None) if isinstance(s, Mapping) else None for s in samples]\n@@ -229,8 +229,18 @@\n self.save_fn = convert_to_modules(save_fn)\n self.save_per_sample = convert_to_modules(save_per_sample)\n \n+ @staticmethod\n+ def _extract_metadata(batch: Any) -> Tuple[Any, Optional[Any]]:\n+ if isinstance(batch, Mapping):\n+ return batch, batch.get(DefaultDataKeys.METADATA, None)\n+ return batch, None\n+\n def forward(self, batch: Sequence[Any]):\n+ batch, metadata = self._extract_metadata(batch)\n uncollated = self.uncollate_fn(self.per_batch_transform(batch))\n+ if metadata:\n+ for sample, sample_metadata in zip(uncollated, metadata):\n+ sample[DefaultDataKeys.METADATA] = sample_metadata\n \n final_preds = type(uncollated)([self.serializer(self.per_sample_transform(sample)) for sample in uncollated])\n", "issue": "`METADATA` can't be a dict\n## \ud83d\udc1b Bug\r\n\r\nIf a dict is used for the `METADATA` key then it will give an error. This should be supported.\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, TYPE_CHECKING, Union\n\nimport torch\nfrom pytorch_lightning.trainer.states import RunningStage\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch import Tensor\n\nfrom flash.core.data.callback import ControlFlow\nfrom flash.core.data.data_source import DefaultDataKeys\nfrom flash.core.data.utils import (\n _contains_any_tensor,\n convert_to_modules,\n CurrentFuncContext,\n CurrentRunningStageContext,\n)\n\nif TYPE_CHECKING: # pragma: no-cover\n from flash.core.data.process import Preprocess\n\n\nclass _Sequential(torch.nn.Module):\n \"\"\"\n This class is used to chain 3 functions together for the _Preprocessor ``per_sample_transform`` function.\n 1. ``pre_tensor_transform``\n 2. ``to_tensor_transform``\n 3. ``post_tensor_transform``\n \"\"\"\n\n def __init__(\n self,\n preprocess: 'Preprocess',\n pre_tensor_transform: Callable,\n to_tensor_transform: Callable,\n post_tensor_transform: Callable,\n stage: RunningStage,\n assert_contains_tensor: bool = False,\n ):\n super().__init__()\n self.preprocess = preprocess\n self.callback = ControlFlow(self.preprocess.callbacks)\n self.pre_tensor_transform = convert_to_modules(pre_tensor_transform)\n self.to_tensor_transform = convert_to_modules(to_tensor_transform)\n self.post_tensor_transform = convert_to_modules(post_tensor_transform)\n self.stage = stage\n self.assert_contains_tensor = assert_contains_tensor\n\n self._current_stage_context = CurrentRunningStageContext(stage, preprocess, reset=False)\n self._pre_tensor_transform_context = CurrentFuncContext(\"pre_tensor_transform\", preprocess)\n self._to_tensor_transform_context = CurrentFuncContext(\"to_tensor_transform\", preprocess)\n self._post_tensor_transform_context = CurrentFuncContext(\"post_tensor_transform\", preprocess)\n\n def forward(self, sample: Any) -> Any:\n self.callback.on_load_sample(sample, self.stage)\n\n with self._current_stage_context:\n with self._pre_tensor_transform_context:\n sample = self.pre_tensor_transform(sample)\n self.callback.on_pre_tensor_transform(sample, self.stage)\n\n with self._to_tensor_transform_context:\n sample = self.to_tensor_transform(sample)\n self.callback.on_to_tensor_transform(sample, self.stage)\n\n if self.assert_contains_tensor:\n if not _contains_any_tensor(sample):\n raise MisconfigurationException(\n \"When ``to_tensor_transform`` is overriden, \"\n \"``DataPipeline`` expects the outputs to be ``tensors``\"\n )\n\n with self._post_tensor_transform_context:\n sample = self.post_tensor_transform(sample)\n self.callback.on_post_tensor_transform(sample, self.stage)\n\n return sample\n\n def __str__(self) -> str:\n return (\n f\"{self.__class__.__name__}:\\n\"\n f\"\\t(pre_tensor_transform): {str(self.pre_tensor_transform)}\\n\"\n f\"\\t(to_tensor_transform): {str(self.to_tensor_transform)}\\n\"\n f\"\\t(post_tensor_transform): {str(self.post_tensor_transform)}\\n\"\n f\"\\t(assert_contains_tensor): {str(self.assert_contains_tensor)}\\n\"\n f\"\\t(stage): {str(self.stage)}\"\n )\n\n\nclass _Preprocessor(torch.nn.Module):\n \"\"\"\n This class is used to encapsultate the following functions of a Preprocess Object:\n Inside a worker:\n per_sample_transform: Function to transform an individual sample\n Inside a worker, it is actually make of 3 functions:\n * pre_tensor_transform\n * to_tensor_transform\n * post_tensor_transform\n collate: Function to merge sample into a batch\n per_batch_transform: Function to transform an individual batch\n * per_batch_transform\n\n Inside main process:\n per_sample_transform: Function to transform an individual sample\n * per_sample_transform_on_device\n collate: Function to merge sample into a batch\n per_batch_transform: Function to transform an individual batch\n * per_batch_transform_on_device\n \"\"\"\n\n def __init__(\n self,\n preprocess: 'Preprocess',\n collate_fn: Callable,\n per_sample_transform: Union[Callable, _Sequential],\n per_batch_transform: Callable,\n stage: RunningStage,\n apply_per_sample_transform: bool = True,\n on_device: bool = False,\n ):\n super().__init__()\n self.preprocess = preprocess\n self.callback = ControlFlow(self.preprocess.callbacks)\n self.collate_fn = convert_to_modules(collate_fn)\n self.per_sample_transform = convert_to_modules(per_sample_transform)\n self.per_batch_transform = convert_to_modules(per_batch_transform)\n self.apply_per_sample_transform = apply_per_sample_transform\n self.stage = stage\n self.on_device = on_device\n\n extension = f\"{'_on_device' if self.on_device else ''}\"\n self._current_stage_context = CurrentRunningStageContext(stage, preprocess)\n self._per_sample_transform_context = CurrentFuncContext(f\"per_sample_transform{extension}\", preprocess)\n self._collate_context = CurrentFuncContext(\"collate\", preprocess)\n self._per_batch_transform_context = CurrentFuncContext(f\"per_batch_transform{extension}\", preprocess)\n\n def _extract_metadata(\n self,\n samples: List[Dict[str, Any]],\n ) -> Tuple[List[Dict[str, Any]], Optional[List[Dict[str, Any]]]]:\n metadata = [s.pop(DefaultDataKeys.METADATA, None) if isinstance(s, Mapping) else None for s in samples]\n return samples, metadata if any(m is not None for m in metadata) else None\n\n def forward(self, samples: Sequence[Any]) -> Any:\n # we create a new dict to prevent from potential memory leaks\n # assuming that the dictionary samples are stored in between and\n # potentially modified before the transforms are applied.\n if isinstance(samples, dict):\n samples = dict(samples.items())\n\n with self._current_stage_context:\n\n if self.apply_per_sample_transform:\n with self._per_sample_transform_context:\n _samples = []\n for sample in samples:\n sample = self.per_sample_transform(sample)\n if self.on_device:\n self.callback.on_per_sample_transform_on_device(sample, self.stage)\n _samples.append(sample)\n\n samples = type(_samples)(_samples)\n\n with self._collate_context:\n samples, metadata = self._extract_metadata(samples)\n samples = self.collate_fn(samples)\n if metadata:\n samples[DefaultDataKeys.METADATA] = metadata\n self.callback.on_collate(samples, self.stage)\n\n with self._per_batch_transform_context:\n samples = self.per_batch_transform(samples)\n if self.on_device:\n self.callback.on_per_batch_transform_on_device(samples, self.stage)\n else:\n self.callback.on_per_batch_transform(samples, self.stage)\n return samples\n\n def __str__(self) -> str:\n # todo: define repr function which would take object and string attributes to be shown\n return (\n \"_Preprocessor:\\n\"\n f\"\\t(per_sample_transform): {str(self.per_sample_transform)}\\n\"\n f\"\\t(collate_fn): {str(self.collate_fn)}\\n\"\n f\"\\t(per_batch_transform): {str(self.per_batch_transform)}\\n\"\n f\"\\t(apply_per_sample_transform): {str(self.apply_per_sample_transform)}\\n\"\n f\"\\t(on_device): {str(self.on_device)}\\n\"\n f\"\\t(stage): {str(self.stage)}\"\n )\n\n\nclass _Postprocessor(torch.nn.Module):\n \"\"\"\n This class is used to encapsultate the following functions of a Postprocess Object:\n Inside main process:\n per_batch_transform: Function to transform a batch\n per_sample_transform: Function to transform an individual sample\n uncollate_fn: Function to split a batch into samples\n per_sample_transform: Function to transform an individual sample\n save_fn: Function to save all data\n save_per_sample: Function to save an individual sample\n \"\"\"\n\n def __init__(\n self,\n uncollate_fn: Callable,\n per_batch_transform: Callable,\n per_sample_transform: Callable,\n serializer: Optional[Callable],\n save_fn: Optional[Callable] = None,\n save_per_sample: bool = False\n ):\n super().__init__()\n self.uncollate_fn = convert_to_modules(uncollate_fn)\n self.per_batch_transform = convert_to_modules(per_batch_transform)\n self.per_sample_transform = convert_to_modules(per_sample_transform)\n self.serializer = convert_to_modules(serializer)\n self.save_fn = convert_to_modules(save_fn)\n self.save_per_sample = convert_to_modules(save_per_sample)\n\n def forward(self, batch: Sequence[Any]):\n uncollated = self.uncollate_fn(self.per_batch_transform(batch))\n\n final_preds = type(uncollated)([self.serializer(self.per_sample_transform(sample)) for sample in uncollated])\n\n if self.save_fn:\n if self.save_per_sample:\n for pred in final_preds:\n self.save_fn(pred)\n else:\n self.save_fn(final_preds)\n else:\n return final_preds\n\n def __str__(self) -> str:\n return (\n \"_Postprocessor:\\n\"\n f\"\\t(per_batch_transform): {str(self.per_batch_transform)}\\n\"\n f\"\\t(uncollate_fn): {str(self.uncollate_fn)}\\n\"\n f\"\\t(per_sample_transform): {str(self.per_sample_transform)}\\n\"\n f\"\\t(serializer): {str(self.serializer)}\"\n )\n\n\ndef default_uncollate(batch: Any):\n \"\"\"\n This function is used to uncollate a batch into samples.\n\n Examples:\n >>> a, b = default_uncollate(torch.rand((2,1)))\n \"\"\"\n\n batch_type = type(batch)\n\n if isinstance(batch, Tensor):\n if len(batch.shape) == 0: # 0 shape tensors\n return batch\n return list(torch.unbind(batch, 0))\n\n elif isinstance(batch, Mapping):\n return [batch_type(dict(zip(batch, default_uncollate(t)))) for t in zip(*batch.values())]\n\n elif isinstance(batch, tuple) and hasattr(batch, '_fields'): # namedtuple\n return [batch_type(*default_uncollate(sample)) for sample in zip(*batch)]\n\n elif isinstance(batch, Sequence) and not isinstance(batch, str):\n return [default_uncollate(sample) for sample in batch]\n\n return batch\n", "path": "flash/core/data/batch.py"}]}
| 3,691 | 360 |
gh_patches_debug_23375
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-2863
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FR] Add integration tests to catch breaking changes in the API
### What's the problem this feature will solve?
It would be nice to have integration tests focusing on the usage of setuptools "public API" by some popular packages in the community.
This way we can catch breaking changes in the API before publishing new releases
### Describe the solution you'd like
According to the discussion in https://github.com/pypa/setuptools/pull/2844, if adding a new "integration test suite", the following characteristics are desirable:
1. It should run separated from the main test suite (integration tests are resource intensive and time consuming, so the best is to avoid always running them and postponing until a new release is ready).
2. It should test how setuptools' API is being used by popular packages in the community to catch rare errors.
### Alternative Solutions
_No response_
### Additional context
_No response_
### Code of Conduct
- [X] I agree to follow the PSF Code of Conduct
</issue>
<code>
[start of conftest.py]
1 import sys
2
3
4 pytest_plugins = 'setuptools.tests.fixtures'
5
6
7 def pytest_addoption(parser):
8 parser.addoption(
9 "--package_name", action="append", default=[],
10 help="list of package_name to pass to test functions",
11 )
12
13
14 collect_ignore = [
15 'tests/manual_test.py',
16 'setuptools/tests/mod_with_constant.py',
17 'setuptools/_distutils',
18 '_distutils_hack',
19 'setuptools/extern',
20 'pkg_resources/extern',
21 'pkg_resources/tests/data',
22 'setuptools/_vendor',
23 'pkg_resources/_vendor',
24 ]
25
26
27 if sys.version_info < (3, 6):
28 collect_ignore.append('docs/conf.py') # uses f-strings
29 collect_ignore.append('pavement.py')
30
[end of conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conftest.py b/conftest.py
--- a/conftest.py
+++ b/conftest.py
@@ -1,5 +1,7 @@
import sys
+import pytest
+
pytest_plugins = 'setuptools.tests.fixtures'
@@ -9,6 +11,14 @@
"--package_name", action="append", default=[],
help="list of package_name to pass to test functions",
)
+ parser.addoption(
+ "--integration", action="store_true", default=False,
+ help="run integration tests (only)"
+ )
+
+
+def pytest_configure(config):
+ config.addinivalue_line("markers", "integration: integration tests")
collect_ignore = [
@@ -27,3 +37,13 @@
if sys.version_info < (3, 6):
collect_ignore.append('docs/conf.py') # uses f-strings
collect_ignore.append('pavement.py')
+
+
[email protected](autouse=True)
+def _skip_integration(request):
+ running_integration_tests = request.config.getoption("--integration")
+ is_integration_test = request.node.get_closest_marker("integration")
+ if running_integration_tests and not is_integration_test:
+ pytest.skip("running integration tests only")
+ if not running_integration_tests and is_integration_test:
+ pytest.skip("skipping integration tests")
|
{"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -1,5 +1,7 @@\n import sys\n \n+import pytest\n+\n \n pytest_plugins = 'setuptools.tests.fixtures'\n \n@@ -9,6 +11,14 @@\n \"--package_name\", action=\"append\", default=[],\n help=\"list of package_name to pass to test functions\",\n )\n+ parser.addoption(\n+ \"--integration\", action=\"store_true\", default=False,\n+ help=\"run integration tests (only)\"\n+ )\n+\n+\n+def pytest_configure(config):\n+ config.addinivalue_line(\"markers\", \"integration: integration tests\")\n \n \n collect_ignore = [\n@@ -27,3 +37,13 @@\n if sys.version_info < (3, 6):\n collect_ignore.append('docs/conf.py') # uses f-strings\n collect_ignore.append('pavement.py')\n+\n+\[email protected](autouse=True)\n+def _skip_integration(request):\n+ running_integration_tests = request.config.getoption(\"--integration\")\n+ is_integration_test = request.node.get_closest_marker(\"integration\")\n+ if running_integration_tests and not is_integration_test:\n+ pytest.skip(\"running integration tests only\")\n+ if not running_integration_tests and is_integration_test:\n+ pytest.skip(\"skipping integration tests\")\n", "issue": "[FR] Add integration tests to catch breaking changes in the API\n### What's the problem this feature will solve?\n\nIt would be nice to have integration tests focusing on the usage of setuptools \"public API\" by some popular packages in the community.\r\n\r\nThis way we can catch breaking changes in the API before publishing new releases\n\n### Describe the solution you'd like\n\nAccording to the discussion in https://github.com/pypa/setuptools/pull/2844, if adding a new \"integration test suite\", the following characteristics are desirable:\r\n\r\n1. It should run separated from the main test suite (integration tests are resource intensive and time consuming, so the best is to avoid always running them and postponing until a new release is ready).\r\n2. It should test how setuptools' API is being used by popular packages in the community to catch rare errors.\n\n### Alternative Solutions\n\n_No response_\n\n### Additional context\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow the PSF Code of Conduct\n", "before_files": [{"content": "import sys\n\n\npytest_plugins = 'setuptools.tests.fixtures'\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--package_name\", action=\"append\", default=[],\n help=\"list of package_name to pass to test functions\",\n )\n\n\ncollect_ignore = [\n 'tests/manual_test.py',\n 'setuptools/tests/mod_with_constant.py',\n 'setuptools/_distutils',\n '_distutils_hack',\n 'setuptools/extern',\n 'pkg_resources/extern',\n 'pkg_resources/tests/data',\n 'setuptools/_vendor',\n 'pkg_resources/_vendor',\n]\n\n\nif sys.version_info < (3, 6):\n collect_ignore.append('docs/conf.py') # uses f-strings\n collect_ignore.append('pavement.py')\n", "path": "conftest.py"}]}
| 959 | 299 |
gh_patches_debug_26462
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-2269
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of litestar/contrib/sqlalchemy/plugins/__init__.py]
1 from __future__ import annotations
2
3 from .init import (
4 AsyncSessionConfig,
5 EngineConfig,
6 GenericSessionConfig,
7 GenericSQLAlchemyConfig,
8 SQLAlchemyAsyncConfig,
9 SQLAlchemyInitPlugin,
10 SQLAlchemySyncConfig,
11 SyncSessionConfig,
12 )
13 from .serialization import SQLAlchemySerializationPlugin
14
15
16 class SQLAlchemyPlugin(SQLAlchemyInitPlugin, SQLAlchemySerializationPlugin):
17 """A plugin that provides SQLAlchemy integration."""
18
19 def __init__(self, config: SQLAlchemyAsyncConfig | SQLAlchemySyncConfig) -> None:
20 SQLAlchemyInitPlugin.__init__(self, config=config)
21 SQLAlchemySerializationPlugin.__init__(self)
22
23
24 __all__ = (
25 "AsyncSessionConfig",
26 "EngineConfig",
27 "GenericSQLAlchemyConfig",
28 "GenericSessionConfig",
29 "SQLAlchemyAsyncConfig",
30 "SQLAlchemyInitPlugin",
31 "SQLAlchemyPlugin",
32 "SQLAlchemySerializationPlugin",
33 "SQLAlchemySyncConfig",
34 "SyncSessionConfig",
35 )
36
[end of litestar/contrib/sqlalchemy/plugins/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/litestar/contrib/sqlalchemy/plugins/__init__.py b/litestar/contrib/sqlalchemy/plugins/__init__.py
--- a/litestar/contrib/sqlalchemy/plugins/__init__.py
+++ b/litestar/contrib/sqlalchemy/plugins/__init__.py
@@ -1,5 +1,10 @@
from __future__ import annotations
+from typing import TYPE_CHECKING
+
+from litestar.contrib.sqlalchemy.plugins import _slots_base
+from litestar.plugins import InitPluginProtocol
+
from .init import (
AsyncSessionConfig,
EngineConfig,
@@ -12,13 +17,29 @@
)
from .serialization import SQLAlchemySerializationPlugin
+if TYPE_CHECKING:
+ from litestar.config.app import AppConfig
+
-class SQLAlchemyPlugin(SQLAlchemyInitPlugin, SQLAlchemySerializationPlugin):
+class SQLAlchemyPlugin(InitPluginProtocol, _slots_base.SlotsBase):
"""A plugin that provides SQLAlchemy integration."""
def __init__(self, config: SQLAlchemyAsyncConfig | SQLAlchemySyncConfig) -> None:
- SQLAlchemyInitPlugin.__init__(self, config=config)
- SQLAlchemySerializationPlugin.__init__(self)
+ """Initialize ``SQLAlchemyPlugin``.
+
+ Args:
+ config: configure DB connection and hook handlers and dependencies.
+ """
+ self._config = config
+
+ def on_app_init(self, app_config: AppConfig) -> AppConfig:
+ """Configure application for use with SQLAlchemy.
+
+ Args:
+ app_config: The :class:`AppConfig <.config.app.AppConfig>` instance.
+ """
+ app_config.plugins.extend([SQLAlchemyInitPlugin(config=self._config), SQLAlchemySerializationPlugin()])
+ return app_config
__all__ = (
|
{"golden_diff": "diff --git a/litestar/contrib/sqlalchemy/plugins/__init__.py b/litestar/contrib/sqlalchemy/plugins/__init__.py\n--- a/litestar/contrib/sqlalchemy/plugins/__init__.py\n+++ b/litestar/contrib/sqlalchemy/plugins/__init__.py\n@@ -1,5 +1,10 @@\n from __future__ import annotations\n \n+from typing import TYPE_CHECKING\n+\n+from litestar.contrib.sqlalchemy.plugins import _slots_base\n+from litestar.plugins import InitPluginProtocol\n+\n from .init import (\n AsyncSessionConfig,\n EngineConfig,\n@@ -12,13 +17,29 @@\n )\n from .serialization import SQLAlchemySerializationPlugin\n \n+if TYPE_CHECKING:\n+ from litestar.config.app import AppConfig\n+\n \n-class SQLAlchemyPlugin(SQLAlchemyInitPlugin, SQLAlchemySerializationPlugin):\n+class SQLAlchemyPlugin(InitPluginProtocol, _slots_base.SlotsBase):\n \"\"\"A plugin that provides SQLAlchemy integration.\"\"\"\n \n def __init__(self, config: SQLAlchemyAsyncConfig | SQLAlchemySyncConfig) -> None:\n- SQLAlchemyInitPlugin.__init__(self, config=config)\n- SQLAlchemySerializationPlugin.__init__(self)\n+ \"\"\"Initialize ``SQLAlchemyPlugin``.\n+\n+ Args:\n+ config: configure DB connection and hook handlers and dependencies.\n+ \"\"\"\n+ self._config = config\n+\n+ def on_app_init(self, app_config: AppConfig) -> AppConfig:\n+ \"\"\"Configure application for use with SQLAlchemy.\n+\n+ Args:\n+ app_config: The :class:`AppConfig <.config.app.AppConfig>` instance.\n+ \"\"\"\n+ app_config.plugins.extend([SQLAlchemyInitPlugin(config=self._config), SQLAlchemySerializationPlugin()])\n+ return app_config\n \n \n __all__ = (\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom .init import (\n AsyncSessionConfig,\n EngineConfig,\n GenericSessionConfig,\n GenericSQLAlchemyConfig,\n SQLAlchemyAsyncConfig,\n SQLAlchemyInitPlugin,\n SQLAlchemySyncConfig,\n SyncSessionConfig,\n)\nfrom .serialization import SQLAlchemySerializationPlugin\n\n\nclass SQLAlchemyPlugin(SQLAlchemyInitPlugin, SQLAlchemySerializationPlugin):\n \"\"\"A plugin that provides SQLAlchemy integration.\"\"\"\n\n def __init__(self, config: SQLAlchemyAsyncConfig | SQLAlchemySyncConfig) -> None:\n SQLAlchemyInitPlugin.__init__(self, config=config)\n SQLAlchemySerializationPlugin.__init__(self)\n\n\n__all__ = (\n \"AsyncSessionConfig\",\n \"EngineConfig\",\n \"GenericSQLAlchemyConfig\",\n \"GenericSessionConfig\",\n \"SQLAlchemyAsyncConfig\",\n \"SQLAlchemyInitPlugin\",\n \"SQLAlchemyPlugin\",\n \"SQLAlchemySerializationPlugin\",\n \"SQLAlchemySyncConfig\",\n \"SyncSessionConfig\",\n)\n", "path": "litestar/contrib/sqlalchemy/plugins/__init__.py"}]}
| 976 | 373 |
gh_patches_debug_3229
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-2371
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Test Translations & Support Spanish
We need to test translations before release and make sure we support Spanish
</issue>
<code>
[start of CTFd/constants/languages.py]
1 from CTFd.constants import RawEnum
2
3
4 class Languages(str, RawEnum):
5 ENGLISH = "en"
6 GERMAN = "de"
7 POLISH = "pl"
8
9
10 LANGUAGE_NAMES = {
11 "en": "English",
12 "de": "Deutsch",
13 "pl": "Polski",
14 }
15
16 SELECT_LANGUAGE_LIST = [("", "")] + [
17 (str(lang), LANGUAGE_NAMES.get(str(lang))) for lang in Languages
18 ]
19
[end of CTFd/constants/languages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/CTFd/constants/languages.py b/CTFd/constants/languages.py
--- a/CTFd/constants/languages.py
+++ b/CTFd/constants/languages.py
@@ -5,12 +5,16 @@
ENGLISH = "en"
GERMAN = "de"
POLISH = "pl"
+ SPANISH = "es"
+ CHINESE = "zh"
LANGUAGE_NAMES = {
"en": "English",
"de": "Deutsch",
"pl": "Polski",
+ "es": "Español",
+ "zh": "中文",
}
SELECT_LANGUAGE_LIST = [("", "")] + [
|
{"golden_diff": "diff --git a/CTFd/constants/languages.py b/CTFd/constants/languages.py\n--- a/CTFd/constants/languages.py\n+++ b/CTFd/constants/languages.py\n@@ -5,12 +5,16 @@\n ENGLISH = \"en\"\n GERMAN = \"de\"\n POLISH = \"pl\"\n+ SPANISH = \"es\"\n+ CHINESE = \"zh\"\n \n \n LANGUAGE_NAMES = {\n \"en\": \"English\",\n \"de\": \"Deutsch\",\n \"pl\": \"Polski\",\n+ \"es\": \"Espa\u00f1ol\",\n+ \"zh\": \"\u4e2d\u6587\",\n }\n \n SELECT_LANGUAGE_LIST = [(\"\", \"\")] + [\n", "issue": "Test Translations & Support Spanish\nWe need to test translations before release and make sure we support Spanish\n", "before_files": [{"content": "from CTFd.constants import RawEnum\n\n\nclass Languages(str, RawEnum):\n ENGLISH = \"en\"\n GERMAN = \"de\"\n POLISH = \"pl\"\n\n\nLANGUAGE_NAMES = {\n \"en\": \"English\",\n \"de\": \"Deutsch\",\n \"pl\": \"Polski\",\n}\n\nSELECT_LANGUAGE_LIST = [(\"\", \"\")] + [\n (str(lang), LANGUAGE_NAMES.get(str(lang))) for lang in Languages\n]\n", "path": "CTFd/constants/languages.py"}]}
| 687 | 150 |
gh_patches_debug_35036
|
rasdani/github-patches
|
git_diff
|
scverse__scanpy-816
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Memory issue running zheng17 on ~1M cells
Running https://github.com/theislab/scanpy_usage/blob/master/170522_visualizing_one_million_cells/cluster.py on the latest released Scanpy (1.4.4.post1) gives a memory error:
```
reading 1M_neurons_filtered_gene_bc_matrices_h5.h5
Variable names are not unique. To make them unique, call `.var_names_make_unique`.
(0:01:39)
running recipe zheng17
filtered out 3983 genes that are detectedin less than 1 counts
Killed
```
This is running with 60GB of memory (n1-standard-16), but also occurs with 104GB (n1-highmem-16). It looks like there has been a regression somewhere since this used to run OK. I think the error may be happening in anndata.
memory requirements
Hi,
I am trying to run the full 1.3M 10X mouse cell dataset (using the 1M_neurons_filtered_gene_bc_matrices_h5.h5 file from 10X website).
I have 126GB RAM and Intel® Xeon(R) W-2123 CPU @ 3.60GHz × 8 which is above the requirements you mention needed to run the full cluster.py method without subsampling. (https://github.com/theislab/scanpy_usage/tree/master/170522_visualizing_one_million_cells)
I get a memory error at the normalization and filter_genes_dispersion stage, should i modify the code in anyway? (without subsampling)
Thanks,Shobi
</issue>
<code>
[start of scanpy/preprocessing/_recipes.py]
1 """Preprocessing recipes from the literature
2 """
3 from anndata import AnnData
4
5 from . import _simple as pp
6 from ._deprecated.highly_variable_genes import filter_genes_dispersion, filter_genes_cv_deprecated
7 from .. import logging as logg
8
9
10 def recipe_weinreb17(
11 adata: AnnData,
12 log: bool = True,
13 mean_threshold=0.01,
14 cv_threshold=2,
15 n_pcs=50,
16 svd_solver='randomized',
17 random_state=0,
18 copy: bool = False,
19 ):
20 """\
21 Normalization and filtering as of [Weinreb17]_.
22
23 Expects non-logarithmized data.
24 If using logarithmized data, pass `log=False`.
25
26 Parameters
27 ----------
28 adata
29 Annotated data matrix.
30 log
31 Logarithmize data?
32 copy
33 Return a copy if true.
34 """
35 from scipy.sparse import issparse
36 if issparse(adata.X):
37 raise ValueError('`recipe_weinreb16 does not support sparse matrices.')
38 if copy: adata = adata.copy()
39 if log: pp.log1p(adata)
40 adata.X = pp.normalize_per_cell_weinreb16_deprecated(adata.X,
41 max_fraction=0.05,
42 mult_with_mean=True)
43 gene_subset = filter_genes_cv_deprecated(adata.X, mean_threshold, cv_threshold)
44 adata._inplace_subset_var(gene_subset) # this modifies the object itself
45 X_pca = pp.pca(pp.zscore_deprecated(adata.X),
46 n_comps=n_pcs, svd_solver=svd_solver, random_state=random_state)
47 # update adata
48 adata.obsm['X_pca'] = X_pca
49 return adata if copy else None
50
51
52 def recipe_seurat(adata, log=True, plot=False, copy=False):
53 """Normalization and filtering as of Seurat [Satija15]_.
54
55 This uses a particular preprocessing.
56
57 Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
58 """
59 if copy: adata = adata.copy()
60 pp.filter_cells(adata, min_genes=200)
61 pp.filter_genes(adata, min_cells=3)
62 pp.normalize_per_cell(adata, counts_per_cell_after=1e4)
63 filter_result = filter_genes_dispersion(
64 adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=not log)
65 if plot:
66 from ..plotting import _preprocessing as ppp # should not import at the top of the file
67 ppp.filter_genes_dispersion(filter_result, log=not log)
68 adata._inplace_subset_var(filter_result.gene_subset) # filter genes
69 if log: pp.log1p(adata)
70 pp.scale(adata, max_value=10)
71 return adata if copy else None
72
73
74 def recipe_zheng17(adata, n_top_genes=1000, log=True, plot=False, copy=False):
75 """Normalization and filtering as of [Zheng17]_.
76
77 Reproduces the preprocessing of [Zheng17]_ - the Cell Ranger R Kit of 10x
78 Genomics.
79
80 Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
81
82 The recipe runs the following steps
83
84 .. code:: python
85
86 sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count
87 sc.pp.normalize_per_cell( # normalize with total UMI count per cell
88 adata, key_n_counts='n_counts_all')
89 filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
90 adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)
91 adata = adata[:, filter_result.gene_subset] # subset the genes
92 sc.pp.normalize_per_cell(adata) # renormalize after filtering
93 if log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
94 sc.pp.scale(adata) # scale to unit variance and shift to zero mean
95
96
97 Parameters
98 ----------
99 adata : :class:`~anndata.AnnData`
100 Annotated data matrix.
101 n_top_genes : `int`, optional (default: 1000)
102 Number of genes to keep.
103 log : `bool`, optional (default: `True`)
104 Take logarithm.
105 plot : `bool`, optional (default: `True`)
106 Show a plot of the gene dispersion vs. mean relation.
107 copy : `bool`, optional (default: `False`)
108 Return a copy of `adata` instead of updating it.
109
110 Returns
111 -------
112 Returns or updates `adata` depending on `copy`.
113 """
114 start = logg.info('running recipe zheng17')
115 if copy: adata = adata.copy()
116 pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count
117 pp.normalize_per_cell(adata, # normalize with total UMI count per cell
118 key_n_counts='n_counts_all')
119 filter_result = filter_genes_dispersion(
120 adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)
121 if plot:
122 from ..plotting import _preprocessing as ppp # should not import at the top of the file
123 ppp.filter_genes_dispersion(filter_result, log=True)
124 # actually filter the genes, the following is the inplace version of
125 # adata = adata[:, filter_result.gene_subset]
126 adata._inplace_subset_var(filter_result.gene_subset) # filter genes
127 pp.normalize_per_cell(adata) # renormalize after filtering
128 if log: pp.log1p(adata) # log transform: X = log(X + 1)
129 pp.scale(adata)
130 logg.info(' finished', time=start)
131 return adata if copy else None
132
[end of scanpy/preprocessing/_recipes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scanpy/preprocessing/_recipes.py b/scanpy/preprocessing/_recipes.py
--- a/scanpy/preprocessing/_recipes.py
+++ b/scanpy/preprocessing/_recipes.py
@@ -4,6 +4,7 @@
from . import _simple as pp
from ._deprecated.highly_variable_genes import filter_genes_dispersion, filter_genes_cv_deprecated
+from ._normalization import normalize_total
from .. import logging as logg
@@ -59,7 +60,7 @@
if copy: adata = adata.copy()
pp.filter_cells(adata, min_genes=200)
pp.filter_genes(adata, min_cells=3)
- pp.normalize_per_cell(adata, counts_per_cell_after=1e4)
+ normalize_total(adata, target_sum=1e4)
filter_result = filter_genes_dispersion(
adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=not log)
if plot:
@@ -114,8 +115,8 @@
start = logg.info('running recipe zheng17')
if copy: adata = adata.copy()
pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count
- pp.normalize_per_cell(adata, # normalize with total UMI count per cell
- key_n_counts='n_counts_all')
+ normalize_total(adata, # normalize with total UMI count per cell
+ key_added='n_counts_all')
filter_result = filter_genes_dispersion(
adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)
if plot:
@@ -124,7 +125,7 @@
# actually filter the genes, the following is the inplace version of
# adata = adata[:, filter_result.gene_subset]
adata._inplace_subset_var(filter_result.gene_subset) # filter genes
- pp.normalize_per_cell(adata) # renormalize after filtering
+ normalize_total(adata) # renormalize after filtering
if log: pp.log1p(adata) # log transform: X = log(X + 1)
pp.scale(adata)
logg.info(' finished', time=start)
|
{"golden_diff": "diff --git a/scanpy/preprocessing/_recipes.py b/scanpy/preprocessing/_recipes.py\n--- a/scanpy/preprocessing/_recipes.py\n+++ b/scanpy/preprocessing/_recipes.py\n@@ -4,6 +4,7 @@\n \n from . import _simple as pp\n from ._deprecated.highly_variable_genes import filter_genes_dispersion, filter_genes_cv_deprecated\n+from ._normalization import normalize_total\n from .. import logging as logg\n \n \n@@ -59,7 +60,7 @@\n if copy: adata = adata.copy()\n pp.filter_cells(adata, min_genes=200)\n pp.filter_genes(adata, min_cells=3)\n- pp.normalize_per_cell(adata, counts_per_cell_after=1e4)\n+ normalize_total(adata, target_sum=1e4)\n filter_result = filter_genes_dispersion(\n adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=not log)\n if plot:\n@@ -114,8 +115,8 @@\n start = logg.info('running recipe zheng17')\n if copy: adata = adata.copy()\n pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count\n- pp.normalize_per_cell(adata, # normalize with total UMI count per cell\n- key_n_counts='n_counts_all')\n+ normalize_total(adata, # normalize with total UMI count per cell\n+ key_added='n_counts_all')\n filter_result = filter_genes_dispersion(\n adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)\n if plot:\n@@ -124,7 +125,7 @@\n # actually filter the genes, the following is the inplace version of\n # adata = adata[:, filter_result.gene_subset]\n adata._inplace_subset_var(filter_result.gene_subset) # filter genes\n- pp.normalize_per_cell(adata) # renormalize after filtering\n+ normalize_total(adata) # renormalize after filtering\n if log: pp.log1p(adata) # log transform: X = log(X + 1)\n pp.scale(adata)\n logg.info(' finished', time=start)\n", "issue": "Memory issue running zheng17 on ~1M cells\nRunning https://github.com/theislab/scanpy_usage/blob/master/170522_visualizing_one_million_cells/cluster.py on the latest released Scanpy (1.4.4.post1) gives a memory error:\r\n\r\n```\r\nreading 1M_neurons_filtered_gene_bc_matrices_h5.h5\r\nVariable names are not unique. To make them unique, call `.var_names_make_unique`.\r\n (0:01:39)\r\nrunning recipe zheng17\r\nfiltered out 3983 genes that are detectedin less than 1 counts\r\nKilled\r\n```\r\n\r\nThis is running with 60GB of memory (n1-standard-16), but also occurs with 104GB (n1-highmem-16). It looks like there has been a regression somewhere since this used to run OK. I think the error may be happening in anndata.\nmemory requirements\nHi,\r\nI am trying to run the full 1.3M 10X mouse cell dataset (using the 1M_neurons_filtered_gene_bc_matrices_h5.h5 file from 10X website).\r\nI have 126GB RAM and Intel\u00ae Xeon(R) W-2123 CPU @ 3.60GHz \u00d7 8 which is above the requirements you mention needed to run the full cluster.py method without subsampling. (https://github.com/theislab/scanpy_usage/tree/master/170522_visualizing_one_million_cells)\r\nI get a memory error at the normalization and filter_genes_dispersion stage, should i modify the code in anyway? (without subsampling)\r\nThanks,Shobi\n", "before_files": [{"content": "\"\"\"Preprocessing recipes from the literature\n\"\"\"\nfrom anndata import AnnData\n\nfrom . import _simple as pp\nfrom ._deprecated.highly_variable_genes import filter_genes_dispersion, filter_genes_cv_deprecated\nfrom .. import logging as logg\n\n\ndef recipe_weinreb17(\n adata: AnnData,\n log: bool = True,\n mean_threshold=0.01,\n cv_threshold=2,\n n_pcs=50,\n svd_solver='randomized',\n random_state=0,\n copy: bool = False,\n):\n \"\"\"\\\n Normalization and filtering as of [Weinreb17]_.\n\n Expects non-logarithmized data.\n If using logarithmized data, pass `log=False`.\n\n Parameters\n ----------\n adata\n Annotated data matrix.\n log\n Logarithmize data?\n copy\n Return a copy if true.\n \"\"\"\n from scipy.sparse import issparse\n if issparse(adata.X):\n raise ValueError('`recipe_weinreb16 does not support sparse matrices.')\n if copy: adata = adata.copy()\n if log: pp.log1p(adata)\n adata.X = pp.normalize_per_cell_weinreb16_deprecated(adata.X,\n max_fraction=0.05,\n mult_with_mean=True)\n gene_subset = filter_genes_cv_deprecated(adata.X, mean_threshold, cv_threshold)\n adata._inplace_subset_var(gene_subset) # this modifies the object itself\n X_pca = pp.pca(pp.zscore_deprecated(adata.X),\n n_comps=n_pcs, svd_solver=svd_solver, random_state=random_state)\n # update adata\n adata.obsm['X_pca'] = X_pca\n return adata if copy else None\n\n\ndef recipe_seurat(adata, log=True, plot=False, copy=False):\n \"\"\"Normalization and filtering as of Seurat [Satija15]_.\n\n This uses a particular preprocessing.\n\n Expects non-logarithmized data. If using logarithmized data, pass `log=False`.\n \"\"\"\n if copy: adata = adata.copy()\n pp.filter_cells(adata, min_genes=200)\n pp.filter_genes(adata, min_cells=3)\n pp.normalize_per_cell(adata, counts_per_cell_after=1e4)\n filter_result = filter_genes_dispersion(\n adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=not log)\n if plot:\n from ..plotting import _preprocessing as ppp # should not import at the top of the file\n ppp.filter_genes_dispersion(filter_result, log=not log)\n adata._inplace_subset_var(filter_result.gene_subset) # filter genes\n if log: pp.log1p(adata)\n pp.scale(adata, max_value=10)\n return adata if copy else None\n\n\ndef recipe_zheng17(adata, n_top_genes=1000, log=True, plot=False, copy=False):\n \"\"\"Normalization and filtering as of [Zheng17]_.\n\n Reproduces the preprocessing of [Zheng17]_ - the Cell Ranger R Kit of 10x\n Genomics.\n\n Expects non-logarithmized data. If using logarithmized data, pass `log=False`.\n\n The recipe runs the following steps\n\n .. code:: python\n\n sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count\n sc.pp.normalize_per_cell( # normalize with total UMI count per cell\n adata, key_n_counts='n_counts_all')\n filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes\n adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)\n adata = adata[:, filter_result.gene_subset] # subset the genes\n sc.pp.normalize_per_cell(adata) # renormalize after filtering\n if log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)\n sc.pp.scale(adata) # scale to unit variance and shift to zero mean\n\n\n Parameters\n ----------\n adata : :class:`~anndata.AnnData`\n Annotated data matrix.\n n_top_genes : `int`, optional (default: 1000)\n Number of genes to keep.\n log : `bool`, optional (default: `True`)\n Take logarithm.\n plot : `bool`, optional (default: `True`)\n Show a plot of the gene dispersion vs. mean relation.\n copy : `bool`, optional (default: `False`)\n Return a copy of `adata` instead of updating it.\n\n Returns\n -------\n Returns or updates `adata` depending on `copy`.\n \"\"\"\n start = logg.info('running recipe zheng17')\n if copy: adata = adata.copy()\n pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count\n pp.normalize_per_cell(adata, # normalize with total UMI count per cell\n key_n_counts='n_counts_all')\n filter_result = filter_genes_dispersion(\n adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)\n if plot:\n from ..plotting import _preprocessing as ppp # should not import at the top of the file\n ppp.filter_genes_dispersion(filter_result, log=True)\n # actually filter the genes, the following is the inplace version of\n # adata = adata[:, filter_result.gene_subset]\n adata._inplace_subset_var(filter_result.gene_subset) # filter genes\n pp.normalize_per_cell(adata) # renormalize after filtering\n if log: pp.log1p(adata) # log transform: X = log(X + 1)\n pp.scale(adata)\n logg.info(' finished', time=start)\n return adata if copy else None\n", "path": "scanpy/preprocessing/_recipes.py"}]}
| 2,540 | 516 |
gh_patches_debug_15602
|
rasdani/github-patches
|
git_diff
|
yt-dlp__yt-dlp-7108
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cc.com
### Checklist
- [X] I'm reporting a broken site
- [X] I've verified that I'm running yt-dlp version **2021.12.01**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
- [X] I've checked that all provided URLs are alive and playable in a browser
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
United States
### Description
https://www.cc.com/topic/a-clusterfunke-christmas
TV episodes work fine, but the movie comes back Unsupported URL
### Verbose log
```shell
C:\Users\Kevin\Downloads\yt>ytdl.exe -Uv https://www.cc.com/movies/tkp406/a-clue
sterfuenke-christmas
[debug] Command-line config: ['-Uv', 'https://www.cc.com/movies/tkp406/a-clueste
rfuenke-christmas']
[debug] Encodings: locale cp1252, fs utf-8, out utf-8 (No ANSI), err utf-8 (No A
NSI), pref cp1252
[debug] yt-dlp version 2021.12.01 [91f071a] (win_exe)
[debug] Python version 3.8.10 (CPython 64bit) - Windows-7-6.1.7601-SP1
[debug] exe versions: ffmpeg 4.4-full_build-www.gyan.dev (setts), ffprobe 4.4-fu
ll_build-www.gyan.dev
[debug] Optional libraries: Cryptodome, mutagen, sqlite, websockets
[debug] Proxy map: {}
Latest version: 2021.12.01, Current version: 2021.12.01
yt-dlp is up to date (2021.12.01)
[debug] [generic] Extracting URL: https://www.cc.com/movies/tkp406/a-cluesterfue
nke-christmas
[generic] a-cluesterfuenke-christmas: Requesting header
WARNING: [generic] Falling back on generic information extractor.
[generic] a-cluesterfuenke-christmas: Downloading webpage
[generic] a-cluesterfuenke-christmas: Extracting information
[debug] Looking for video embeds
ERROR: Unsupported URL: https://www.cc.com/movies/tkp406/a-cluesterfuenke-christ
mas
Traceback (most recent call last):
File "yt_dlp\YoutubeDL.py", line 1329, in wrapper
File "yt_dlp\YoutubeDL.py", line 1398, in __extract_info
File "yt_dlp\extractor\common.py", line 597, in extract
File "yt_dlp\extractor\generic.py", line 3813, in _real_extract
yt_dlp.utils.UnsupportedError: Unsupported URL: https://www.cc.com/movies/tkp406
/a-cluesterfuenke-christmas
```
</issue>
<code>
[start of yt_dlp/extractor/comedycentral.py]
1 from .mtv import MTVServicesInfoExtractor
2
3
4 class ComedyCentralIE(MTVServicesInfoExtractor):
5 _VALID_URL = r'https?://(?:www\.)?cc\.com/(?:episodes|video(?:-clips)?|collection-playlist)/(?P<id>[0-9a-z]{6})'
6 _FEED_URL = 'http://comedycentral.com/feeds/mrss/'
7
8 _TESTS = [{
9 'url': 'http://www.cc.com/video-clips/5ke9v2/the-daily-show-with-trevor-noah-doc-rivers-and-steve-ballmer---the-nba-player-strike',
10 'md5': 'b8acb347177c680ff18a292aa2166f80',
11 'info_dict': {
12 'id': '89ccc86e-1b02-4f83-b0c9-1d9592ecd025',
13 'ext': 'mp4',
14 'title': 'The Daily Show with Trevor Noah|August 28, 2020|25|25149|Doc Rivers and Steve Ballmer - The NBA Player Strike',
15 'description': 'md5:5334307c433892b85f4f5e5ac9ef7498',
16 'timestamp': 1598670000,
17 'upload_date': '20200829',
18 },
19 }, {
20 'url': 'http://www.cc.com/episodes/pnzzci/drawn-together--american-idol--parody-clip-show-season-3-ep-314',
21 'only_matching': True,
22 }, {
23 'url': 'https://www.cc.com/video/k3sdvm/the-daily-show-with-jon-stewart-exclusive-the-fourth-estate',
24 'only_matching': True,
25 }, {
26 'url': 'https://www.cc.com/collection-playlist/cosnej/stand-up-specials/t6vtjb',
27 'only_matching': True,
28 }]
29
30
31 class ComedyCentralTVIE(MTVServicesInfoExtractor):
32 _VALID_URL = r'https?://(?:www\.)?comedycentral\.tv/folgen/(?P<id>[0-9a-z]{6})'
33 _TESTS = [{
34 'url': 'https://www.comedycentral.tv/folgen/pxdpec/josh-investigates-klimawandel-staffel-1-ep-1',
35 'info_dict': {
36 'id': '15907dc3-ec3c-11e8-a442-0e40cf2fc285',
37 'ext': 'mp4',
38 'title': 'Josh Investigates',
39 'description': 'Steht uns das Ende der Welt bevor?',
40 },
41 }]
42 _FEED_URL = 'http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed'
43 _GEO_COUNTRIES = ['DE']
44
45 def _get_feed_query(self, uri):
46 return {
47 'accountOverride': 'intl.mtvi.com',
48 'arcEp': 'web.cc.tv',
49 'ep': 'b9032c3a',
50 'imageEp': 'web.cc.tv',
51 'mgid': uri,
52 }
53
[end of yt_dlp/extractor/comedycentral.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/yt_dlp/extractor/comedycentral.py b/yt_dlp/extractor/comedycentral.py
--- a/yt_dlp/extractor/comedycentral.py
+++ b/yt_dlp/extractor/comedycentral.py
@@ -2,7 +2,7 @@
class ComedyCentralIE(MTVServicesInfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?cc\.com/(?:episodes|video(?:-clips)?|collection-playlist)/(?P<id>[0-9a-z]{6})'
+ _VALID_URL = r'https?://(?:www\.)?cc\.com/(?:episodes|video(?:-clips)?|collection-playlist|movies)/(?P<id>[0-9a-z]{6})'
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
_TESTS = [{
@@ -25,6 +25,9 @@
}, {
'url': 'https://www.cc.com/collection-playlist/cosnej/stand-up-specials/t6vtjb',
'only_matching': True,
+ }, {
+ 'url': 'https://www.cc.com/movies/tkp406/a-cluesterfuenke-christmas',
+ 'only_matching': True,
}]
|
{"golden_diff": "diff --git a/yt_dlp/extractor/comedycentral.py b/yt_dlp/extractor/comedycentral.py\n--- a/yt_dlp/extractor/comedycentral.py\n+++ b/yt_dlp/extractor/comedycentral.py\n@@ -2,7 +2,7 @@\n \n \n class ComedyCentralIE(MTVServicesInfoExtractor):\n- _VALID_URL = r'https?://(?:www\\.)?cc\\.com/(?:episodes|video(?:-clips)?|collection-playlist)/(?P<id>[0-9a-z]{6})'\n+ _VALID_URL = r'https?://(?:www\\.)?cc\\.com/(?:episodes|video(?:-clips)?|collection-playlist|movies)/(?P<id>[0-9a-z]{6})'\n _FEED_URL = 'http://comedycentral.com/feeds/mrss/'\n \n _TESTS = [{\n@@ -25,6 +25,9 @@\n }, {\n 'url': 'https://www.cc.com/collection-playlist/cosnej/stand-up-specials/t6vtjb',\n 'only_matching': True,\n+ }, {\n+ 'url': 'https://www.cc.com/movies/tkp406/a-cluesterfuenke-christmas',\n+ 'only_matching': True,\n }]\n", "issue": "cc.com\n### Checklist\n\n- [X] I'm reporting a broken site\n- [X] I've verified that I'm running yt-dlp version **2021.12.01**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))\n- [X] I've checked that all provided URLs are alive and playable in a browser\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nUnited States\n\n### Description\n\nhttps://www.cc.com/topic/a-clusterfunke-christmas\r\n\r\nTV episodes work fine, but the movie comes back Unsupported URL\n\n### Verbose log\n\n```shell\nC:\\Users\\Kevin\\Downloads\\yt>ytdl.exe -Uv https://www.cc.com/movies/tkp406/a-clue\r\nsterfuenke-christmas\r\n[debug] Command-line config: ['-Uv', 'https://www.cc.com/movies/tkp406/a-clueste\r\nrfuenke-christmas']\r\n[debug] Encodings: locale cp1252, fs utf-8, out utf-8 (No ANSI), err utf-8 (No A\r\nNSI), pref cp1252\r\n[debug] yt-dlp version 2021.12.01 [91f071a] (win_exe)\r\n[debug] Python version 3.8.10 (CPython 64bit) - Windows-7-6.1.7601-SP1\r\n[debug] exe versions: ffmpeg 4.4-full_build-www.gyan.dev (setts), ffprobe 4.4-fu\r\nll_build-www.gyan.dev\r\n[debug] Optional libraries: Cryptodome, mutagen, sqlite, websockets\r\n[debug] Proxy map: {}\r\nLatest version: 2021.12.01, Current version: 2021.12.01\r\nyt-dlp is up to date (2021.12.01)\r\n[debug] [generic] Extracting URL: https://www.cc.com/movies/tkp406/a-cluesterfue\r\nnke-christmas\r\n[generic] a-cluesterfuenke-christmas: Requesting header\r\nWARNING: [generic] Falling back on generic information extractor.\r\n[generic] a-cluesterfuenke-christmas: Downloading webpage\r\n[generic] a-cluesterfuenke-christmas: Extracting information\r\n[debug] Looking for video embeds\r\nERROR: Unsupported URL: https://www.cc.com/movies/tkp406/a-cluesterfuenke-christ\r\nmas\r\nTraceback (most recent call last):\r\n File \"yt_dlp\\YoutubeDL.py\", line 1329, in wrapper\r\n File \"yt_dlp\\YoutubeDL.py\", line 1398, in __extract_info\r\n File \"yt_dlp\\extractor\\common.py\", line 597, in extract\r\n File \"yt_dlp\\extractor\\generic.py\", line 3813, in _real_extract\r\nyt_dlp.utils.UnsupportedError: Unsupported URL: https://www.cc.com/movies/tkp406\r\n/a-cluesterfuenke-christmas\n```\n\n", "before_files": [{"content": "from .mtv import MTVServicesInfoExtractor\n\n\nclass ComedyCentralIE(MTVServicesInfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?cc\\.com/(?:episodes|video(?:-clips)?|collection-playlist)/(?P<id>[0-9a-z]{6})'\n _FEED_URL = 'http://comedycentral.com/feeds/mrss/'\n\n _TESTS = [{\n 'url': 'http://www.cc.com/video-clips/5ke9v2/the-daily-show-with-trevor-noah-doc-rivers-and-steve-ballmer---the-nba-player-strike',\n 'md5': 'b8acb347177c680ff18a292aa2166f80',\n 'info_dict': {\n 'id': '89ccc86e-1b02-4f83-b0c9-1d9592ecd025',\n 'ext': 'mp4',\n 'title': 'The Daily Show with Trevor Noah|August 28, 2020|25|25149|Doc Rivers and Steve Ballmer - The NBA Player Strike',\n 'description': 'md5:5334307c433892b85f4f5e5ac9ef7498',\n 'timestamp': 1598670000,\n 'upload_date': '20200829',\n },\n }, {\n 'url': 'http://www.cc.com/episodes/pnzzci/drawn-together--american-idol--parody-clip-show-season-3-ep-314',\n 'only_matching': True,\n }, {\n 'url': 'https://www.cc.com/video/k3sdvm/the-daily-show-with-jon-stewart-exclusive-the-fourth-estate',\n 'only_matching': True,\n }, {\n 'url': 'https://www.cc.com/collection-playlist/cosnej/stand-up-specials/t6vtjb',\n 'only_matching': True,\n }]\n\n\nclass ComedyCentralTVIE(MTVServicesInfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?comedycentral\\.tv/folgen/(?P<id>[0-9a-z]{6})'\n _TESTS = [{\n 'url': 'https://www.comedycentral.tv/folgen/pxdpec/josh-investigates-klimawandel-staffel-1-ep-1',\n 'info_dict': {\n 'id': '15907dc3-ec3c-11e8-a442-0e40cf2fc285',\n 'ext': 'mp4',\n 'title': 'Josh Investigates',\n 'description': 'Steht uns das Ende der Welt bevor?',\n },\n }]\n _FEED_URL = 'http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed'\n _GEO_COUNTRIES = ['DE']\n\n def _get_feed_query(self, uri):\n return {\n 'accountOverride': 'intl.mtvi.com',\n 'arcEp': 'web.cc.tv',\n 'ep': 'b9032c3a',\n 'imageEp': 'web.cc.tv',\n 'mgid': uri,\n }\n", "path": "yt_dlp/extractor/comedycentral.py"}]}
| 2,302 | 292 |
gh_patches_debug_38746
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-3627
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider teavana is broken
During the global build at 2021-05-26-14-42-23, spider **teavana** failed with **0 features** and **2 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/teavana.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/teavana.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/teavana.geojson))
</issue>
<code>
[start of locations/spiders/teavana.py]
1 import scrapy
2 import re
3 from locations.items import GeojsonPointItem
4
5
6 class ExpressSpider(scrapy.Spider):
7
8 name = "teavana"
9 item_attributes = {"brand": "Teavana"}
10 allowed_domains = ["locations.teavana.com"]
11 download_delay = 0.5
12 start_urls = ("https://locations.teavana.com/",)
13
14 def parse_stores(self, response):
15 ref = re.findall(r"[^(\/)]+$", response.url)
16 if len(ref) > 0:
17 ref = ref[0].split(".")[0]
18 properties = {
19 "addr_full": " ".join(
20 response.xpath(
21 '//span[@itemprop="streetAddress"]/span/text()'
22 ).extract()
23 ),
24 "phone": response.xpath(
25 'normalize-space(//span[@itemprop="telephone"]/text())'
26 ).extract_first(),
27 "city": response.xpath(
28 'normalize-space(//span[@itemprop="addressLocality"]/text())'
29 ).extract_first(),
30 "state": response.xpath(
31 'normalize-space(//abbr[@itemprop="addressRegion"]/text())'
32 ).extract_first(),
33 "postcode": response.xpath(
34 'normalize-space(//span[@itemprop="postalCode"]/text())'
35 ).extract_first(),
36 "ref": ref,
37 "website": response.url,
38 "lat": float(
39 response.xpath(
40 'normalize-space(//meta[@itemprop="latitude"]/@content)'
41 ).extract_first()
42 ),
43 "lon": float(
44 response.xpath(
45 'normalize-space(//meta[@itemprop="longitude"]/@content)'
46 ).extract_first()
47 ),
48 }
49 hours = response.xpath('//div[@itemprop="openingHours"]/@content').extract()
50 if hours != []:
51 hours = "; ".join(hours)
52 properties["opening_hours"] = hours
53 yield GeojsonPointItem(**properties)
54
55 def parse_city_stores(self, response):
56 stores = response.xpath(
57 '//h3[@class="Teaser-title Link Link--teaser Heading--h5"]/a/@href'
58 ).extract()
59 for store in stores:
60 yield scrapy.Request(response.urljoin(store), callback=self.parse_stores)
61
62 def parse_state(self, response):
63 urls = response.xpath(
64 '//div[@class="c-directory-list-content-wrapper"]/ul/li/a/@href'
65 ).extract()
66 for path in urls:
67 pattern = re.compile(r"..\/[a-z]{2}\/[a-z]{2}\/[^()]+\/[^()]+.html$")
68 if pattern.match(path.strip()):
69 yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)
70 else:
71 yield scrapy.Request(
72 response.urljoin(path), callback=self.parse_city_stores
73 )
74
75 def parse(self, response):
76 urls = response.xpath(
77 '//div[@class="c-directory-list-content-wrapper"]/ul/li/a/@href'
78 ).extract()
79 for path in urls:
80 pattern = re.compile(r"^[a-z]{2}\/[a-z]{2}.html$")
81 pattern1 = re.compile(r"^[a-z]{2}\/[a-z]{2}\/[^()]+\/[^()]+.html$")
82 if pattern.match(path.strip()):
83 yield scrapy.Request(response.urljoin(path), callback=self.parse_state)
84 elif pattern1.match(path.strip()):
85 yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)
86 else:
87 yield scrapy.Request(
88 response.urljoin(path), callback=self.parse_city_stores
89 )
90
[end of locations/spiders/teavana.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/locations/spiders/teavana.py b/locations/spiders/teavana.py
deleted file mode 100644
--- a/locations/spiders/teavana.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import scrapy
-import re
-from locations.items import GeojsonPointItem
-
-
-class ExpressSpider(scrapy.Spider):
-
- name = "teavana"
- item_attributes = {"brand": "Teavana"}
- allowed_domains = ["locations.teavana.com"]
- download_delay = 0.5
- start_urls = ("https://locations.teavana.com/",)
-
- def parse_stores(self, response):
- ref = re.findall(r"[^(\/)]+$", response.url)
- if len(ref) > 0:
- ref = ref[0].split(".")[0]
- properties = {
- "addr_full": " ".join(
- response.xpath(
- '//span[@itemprop="streetAddress"]/span/text()'
- ).extract()
- ),
- "phone": response.xpath(
- 'normalize-space(//span[@itemprop="telephone"]/text())'
- ).extract_first(),
- "city": response.xpath(
- 'normalize-space(//span[@itemprop="addressLocality"]/text())'
- ).extract_first(),
- "state": response.xpath(
- 'normalize-space(//abbr[@itemprop="addressRegion"]/text())'
- ).extract_first(),
- "postcode": response.xpath(
- 'normalize-space(//span[@itemprop="postalCode"]/text())'
- ).extract_first(),
- "ref": ref,
- "website": response.url,
- "lat": float(
- response.xpath(
- 'normalize-space(//meta[@itemprop="latitude"]/@content)'
- ).extract_first()
- ),
- "lon": float(
- response.xpath(
- 'normalize-space(//meta[@itemprop="longitude"]/@content)'
- ).extract_first()
- ),
- }
- hours = response.xpath('//div[@itemprop="openingHours"]/@content').extract()
- if hours != []:
- hours = "; ".join(hours)
- properties["opening_hours"] = hours
- yield GeojsonPointItem(**properties)
-
- def parse_city_stores(self, response):
- stores = response.xpath(
- '//h3[@class="Teaser-title Link Link--teaser Heading--h5"]/a/@href'
- ).extract()
- for store in stores:
- yield scrapy.Request(response.urljoin(store), callback=self.parse_stores)
-
- def parse_state(self, response):
- urls = response.xpath(
- '//div[@class="c-directory-list-content-wrapper"]/ul/li/a/@href'
- ).extract()
- for path in urls:
- pattern = re.compile(r"..\/[a-z]{2}\/[a-z]{2}\/[^()]+\/[^()]+.html$")
- if pattern.match(path.strip()):
- yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)
- else:
- yield scrapy.Request(
- response.urljoin(path), callback=self.parse_city_stores
- )
-
- def parse(self, response):
- urls = response.xpath(
- '//div[@class="c-directory-list-content-wrapper"]/ul/li/a/@href'
- ).extract()
- for path in urls:
- pattern = re.compile(r"^[a-z]{2}\/[a-z]{2}.html$")
- pattern1 = re.compile(r"^[a-z]{2}\/[a-z]{2}\/[^()]+\/[^()]+.html$")
- if pattern.match(path.strip()):
- yield scrapy.Request(response.urljoin(path), callback=self.parse_state)
- elif pattern1.match(path.strip()):
- yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)
- else:
- yield scrapy.Request(
- response.urljoin(path), callback=self.parse_city_stores
- )
|
{"golden_diff": "diff --git a/locations/spiders/teavana.py b/locations/spiders/teavana.py\ndeleted file mode 100644\n--- a/locations/spiders/teavana.py\n+++ /dev/null\n@@ -1,89 +0,0 @@\n-import scrapy\n-import re\n-from locations.items import GeojsonPointItem\n-\n-\n-class ExpressSpider(scrapy.Spider):\n-\n- name = \"teavana\"\n- item_attributes = {\"brand\": \"Teavana\"}\n- allowed_domains = [\"locations.teavana.com\"]\n- download_delay = 0.5\n- start_urls = (\"https://locations.teavana.com/\",)\n-\n- def parse_stores(self, response):\n- ref = re.findall(r\"[^(\\/)]+$\", response.url)\n- if len(ref) > 0:\n- ref = ref[0].split(\".\")[0]\n- properties = {\n- \"addr_full\": \" \".join(\n- response.xpath(\n- '//span[@itemprop=\"streetAddress\"]/span/text()'\n- ).extract()\n- ),\n- \"phone\": response.xpath(\n- 'normalize-space(//span[@itemprop=\"telephone\"]/text())'\n- ).extract_first(),\n- \"city\": response.xpath(\n- 'normalize-space(//span[@itemprop=\"addressLocality\"]/text())'\n- ).extract_first(),\n- \"state\": response.xpath(\n- 'normalize-space(//abbr[@itemprop=\"addressRegion\"]/text())'\n- ).extract_first(),\n- \"postcode\": response.xpath(\n- 'normalize-space(//span[@itemprop=\"postalCode\"]/text())'\n- ).extract_first(),\n- \"ref\": ref,\n- \"website\": response.url,\n- \"lat\": float(\n- response.xpath(\n- 'normalize-space(//meta[@itemprop=\"latitude\"]/@content)'\n- ).extract_first()\n- ),\n- \"lon\": float(\n- response.xpath(\n- 'normalize-space(//meta[@itemprop=\"longitude\"]/@content)'\n- ).extract_first()\n- ),\n- }\n- hours = response.xpath('//div[@itemprop=\"openingHours\"]/@content').extract()\n- if hours != []:\n- hours = \"; \".join(hours)\n- properties[\"opening_hours\"] = hours\n- yield GeojsonPointItem(**properties)\n-\n- def parse_city_stores(self, response):\n- stores = response.xpath(\n- '//h3[@class=\"Teaser-title Link Link--teaser Heading--h5\"]/a/@href'\n- ).extract()\n- for store in stores:\n- yield scrapy.Request(response.urljoin(store), callback=self.parse_stores)\n-\n- def parse_state(self, response):\n- urls = response.xpath(\n- '//div[@class=\"c-directory-list-content-wrapper\"]/ul/li/a/@href'\n- ).extract()\n- for path in urls:\n- pattern = re.compile(r\"..\\/[a-z]{2}\\/[a-z]{2}\\/[^()]+\\/[^()]+.html$\")\n- if pattern.match(path.strip()):\n- yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)\n- else:\n- yield scrapy.Request(\n- response.urljoin(path), callback=self.parse_city_stores\n- )\n-\n- def parse(self, response):\n- urls = response.xpath(\n- '//div[@class=\"c-directory-list-content-wrapper\"]/ul/li/a/@href'\n- ).extract()\n- for path in urls:\n- pattern = re.compile(r\"^[a-z]{2}\\/[a-z]{2}.html$\")\n- pattern1 = re.compile(r\"^[a-z]{2}\\/[a-z]{2}\\/[^()]+\\/[^()]+.html$\")\n- if pattern.match(path.strip()):\n- yield scrapy.Request(response.urljoin(path), callback=self.parse_state)\n- elif pattern1.match(path.strip()):\n- yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)\n- else:\n- yield scrapy.Request(\n- response.urljoin(path), callback=self.parse_city_stores\n- )\n", "issue": "Spider teavana is broken\nDuring the global build at 2021-05-26-14-42-23, spider **teavana** failed with **0 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/teavana.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/teavana.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/teavana.geojson))\n", "before_files": [{"content": "import scrapy\nimport re\nfrom locations.items import GeojsonPointItem\n\n\nclass ExpressSpider(scrapy.Spider):\n\n name = \"teavana\"\n item_attributes = {\"brand\": \"Teavana\"}\n allowed_domains = [\"locations.teavana.com\"]\n download_delay = 0.5\n start_urls = (\"https://locations.teavana.com/\",)\n\n def parse_stores(self, response):\n ref = re.findall(r\"[^(\\/)]+$\", response.url)\n if len(ref) > 0:\n ref = ref[0].split(\".\")[0]\n properties = {\n \"addr_full\": \" \".join(\n response.xpath(\n '//span[@itemprop=\"streetAddress\"]/span/text()'\n ).extract()\n ),\n \"phone\": response.xpath(\n 'normalize-space(//span[@itemprop=\"telephone\"]/text())'\n ).extract_first(),\n \"city\": response.xpath(\n 'normalize-space(//span[@itemprop=\"addressLocality\"]/text())'\n ).extract_first(),\n \"state\": response.xpath(\n 'normalize-space(//abbr[@itemprop=\"addressRegion\"]/text())'\n ).extract_first(),\n \"postcode\": response.xpath(\n 'normalize-space(//span[@itemprop=\"postalCode\"]/text())'\n ).extract_first(),\n \"ref\": ref,\n \"website\": response.url,\n \"lat\": float(\n response.xpath(\n 'normalize-space(//meta[@itemprop=\"latitude\"]/@content)'\n ).extract_first()\n ),\n \"lon\": float(\n response.xpath(\n 'normalize-space(//meta[@itemprop=\"longitude\"]/@content)'\n ).extract_first()\n ),\n }\n hours = response.xpath('//div[@itemprop=\"openingHours\"]/@content').extract()\n if hours != []:\n hours = \"; \".join(hours)\n properties[\"opening_hours\"] = hours\n yield GeojsonPointItem(**properties)\n\n def parse_city_stores(self, response):\n stores = response.xpath(\n '//h3[@class=\"Teaser-title Link Link--teaser Heading--h5\"]/a/@href'\n ).extract()\n for store in stores:\n yield scrapy.Request(response.urljoin(store), callback=self.parse_stores)\n\n def parse_state(self, response):\n urls = response.xpath(\n '//div[@class=\"c-directory-list-content-wrapper\"]/ul/li/a/@href'\n ).extract()\n for path in urls:\n pattern = re.compile(r\"..\\/[a-z]{2}\\/[a-z]{2}\\/[^()]+\\/[^()]+.html$\")\n if pattern.match(path.strip()):\n yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)\n else:\n yield scrapy.Request(\n response.urljoin(path), callback=self.parse_city_stores\n )\n\n def parse(self, response):\n urls = response.xpath(\n '//div[@class=\"c-directory-list-content-wrapper\"]/ul/li/a/@href'\n ).extract()\n for path in urls:\n pattern = re.compile(r\"^[a-z]{2}\\/[a-z]{2}.html$\")\n pattern1 = re.compile(r\"^[a-z]{2}\\/[a-z]{2}\\/[^()]+\\/[^()]+.html$\")\n if pattern.match(path.strip()):\n yield scrapy.Request(response.urljoin(path), callback=self.parse_state)\n elif pattern1.match(path.strip()):\n yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)\n else:\n yield scrapy.Request(\n response.urljoin(path), callback=self.parse_city_stores\n )\n", "path": "locations/spiders/teavana.py"}]}
| 1,648 | 893 |
gh_patches_debug_2128
|
rasdani/github-patches
|
git_diff
|
projectmesa__mesa-891
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cookiecutter doesn't work on 0.8.7 release
**Describe the bug**
`mesa startproject` fails after `pipenv install mesa`
```
A valid repository for "/home/neil/.local/share/virtualenvs/baseline-economy-6fg_iky1/lib/python3.8/site-packages/mesa/cookiecutter-mesa" could not be found in the following locations:
...
```
**Expected behavior**
Generate the project layout
**To Reproduce**
- pipenv install mesa
- mesa startproject
**Additional context**
The cookiecutter directory from the repo is missing from the installation.
Additionally there is no help message for `startproject` when you run `mesa --help`
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 import re
4
5 from setuptools import setup, find_packages
6 from codecs import open
7
8 requires = ["click", "cookiecutter", "networkx", "numpy", "pandas", "tornado", "tqdm"]
9
10 extras_require = {
11 "dev": ["coverage", "flake8", "pytest >= 3.6", "pytest-cov", "sphinx"],
12 "docs": ["sphinx"],
13 }
14
15 version = ""
16 with open("mesa/__init__.py", "r") as fd:
17 version = re.search(
18 r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE
19 ).group(1)
20
21 with open("README.rst", "rb", encoding="utf-8") as f:
22 readme = f.read()
23
24 setup(
25 name="Mesa",
26 version=version,
27 description="Agent-based modeling (ABM) in Python 3+",
28 long_description=readme,
29 author="Project Mesa Team",
30 author_email="[email protected]",
31 url="https://github.com/projectmesa/mesa",
32 packages=find_packages(),
33 package_data={
34 "mesa": [
35 "visualization/templates/*.html",
36 "visualization/templates/css/*",
37 "visualization/templates/fonts/*",
38 "visualization/templates/js/*",
39 ],
40 "cookiecutter-mesa": ["cookiecutter-mesa/*"],
41 },
42 include_package_data=True,
43 install_requires=requires,
44 extras_require=extras_require,
45 keywords="agent based modeling model ABM simulation multi-agent",
46 license="Apache 2.0",
47 zip_safe=False,
48 classifiers=[
49 "Topic :: Scientific/Engineering",
50 "Topic :: Scientific/Engineering :: Artificial Life",
51 "Topic :: Scientific/Engineering :: Artificial Intelligence",
52 "Intended Audience :: Science/Research",
53 "Programming Language :: Python :: 3 :: Only",
54 "License :: OSI Approved :: Apache Software License",
55 "Operating System :: OS Independent",
56 "Development Status :: 3 - Alpha",
57 "Natural Language :: English",
58 ],
59 entry_points="""
60 [console_scripts]
61 mesa=mesa.main:cli
62 """,
63 )
64
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -8,7 +8,7 @@
requires = ["click", "cookiecutter", "networkx", "numpy", "pandas", "tornado", "tqdm"]
extras_require = {
- "dev": ["coverage", "flake8", "pytest >= 3.6", "pytest-cov", "sphinx"],
+ "dev": ["coverage", "flake8", "pytest >= 4.6", "pytest-cov", "sphinx"],
"docs": ["sphinx"],
}
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -8,7 +8,7 @@\n requires = [\"click\", \"cookiecutter\", \"networkx\", \"numpy\", \"pandas\", \"tornado\", \"tqdm\"]\n \n extras_require = {\n- \"dev\": [\"coverage\", \"flake8\", \"pytest >= 3.6\", \"pytest-cov\", \"sphinx\"],\n+ \"dev\": [\"coverage\", \"flake8\", \"pytest >= 4.6\", \"pytest-cov\", \"sphinx\"],\n \"docs\": [\"sphinx\"],\n }\n", "issue": "Cookiecutter doesn't work on 0.8.7 release\n**Describe the bug**\r\n`mesa startproject` fails after `pipenv install mesa`\r\n```\r\nA valid repository for \"/home/neil/.local/share/virtualenvs/baseline-economy-6fg_iky1/lib/python3.8/site-packages/mesa/cookiecutter-mesa\" could not be found in the following locations:\r\n...\r\n```\r\n\r\n**Expected behavior**\r\nGenerate the project layout\r\n\r\n**To Reproduce**\r\n- pipenv install mesa\r\n- mesa startproject\r\n\r\n**Additional context**\r\nThe cookiecutter directory from the repo is missing from the installation.\r\nAdditionally there is no help message for `startproject` when you run `mesa --help`\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\n\nfrom setuptools import setup, find_packages\nfrom codecs import open\n\nrequires = [\"click\", \"cookiecutter\", \"networkx\", \"numpy\", \"pandas\", \"tornado\", \"tqdm\"]\n\nextras_require = {\n \"dev\": [\"coverage\", \"flake8\", \"pytest >= 3.6\", \"pytest-cov\", \"sphinx\"],\n \"docs\": [\"sphinx\"],\n}\n\nversion = \"\"\nwith open(\"mesa/__init__.py\", \"r\") as fd:\n version = re.search(\n r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', fd.read(), re.MULTILINE\n ).group(1)\n\nwith open(\"README.rst\", \"rb\", encoding=\"utf-8\") as f:\n readme = f.read()\n\nsetup(\n name=\"Mesa\",\n version=version,\n description=\"Agent-based modeling (ABM) in Python 3+\",\n long_description=readme,\n author=\"Project Mesa Team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/projectmesa/mesa\",\n packages=find_packages(),\n package_data={\n \"mesa\": [\n \"visualization/templates/*.html\",\n \"visualization/templates/css/*\",\n \"visualization/templates/fonts/*\",\n \"visualization/templates/js/*\",\n ],\n \"cookiecutter-mesa\": [\"cookiecutter-mesa/*\"],\n },\n include_package_data=True,\n install_requires=requires,\n extras_require=extras_require,\n keywords=\"agent based modeling model ABM simulation multi-agent\",\n license=\"Apache 2.0\",\n zip_safe=False,\n classifiers=[\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Life\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 3 - Alpha\",\n \"Natural Language :: English\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n mesa=mesa.main:cli\n \"\"\",\n)\n", "path": "setup.py"}]}
| 1,289 | 132 |
gh_patches_debug_26935
|
rasdani/github-patches
|
git_diff
|
horovod__horovod-3036
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
【ELASTIC HOROVOD】It perhaps some deadlock when all workers are recorded.
**Environment:**
1. Framework: (TensorFlow, Keras, PyTorch, MXNet): Pytorch
2. Framework version: 1.6.0
3. Horovod version: 0.21.3
4. MPI version: 4.0.3
5. CUDA version: 10.2
6. NCCL version: 2.7.6
7. Python version: 3.6
**Checklist:**
1. Did you search issues to find if somebody asked this question before? Yes.
2. If your question is about hang, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/running.rst)?
3. If your question is about docker, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/docker.rst)?
4. Did you check if you question is answered in the [troubleshooting guide] (https://github.com/horovod/horovod/blob/master/docs/troubleshooting.rst)? Yes
**Bug report:**
After all workers have report their status, the driver will execute the `_on_workers_recorded` function which get and update host slot info and activate workers if necessary. The workers wait until the `_on_workers_recorded` function is completed. When executing `_on_workers_recorded` function, if one READY worker becomes FAILED,the deadlock will caused.(e.g. `_on_workers_recorded` call host discovery script that maybe consume seconds and one ready worker failed during this time).
The deadlock details as follows:
1. If one ready worker(we call it `worker-A`) become failed during `_on_workers_recorded`, it will result in `_barrier.reset()` and holds the `self._lock`, relevant code at [here](https://github.com/horovod/horovod/blob/1a0a6f2c5ec536c44fd5292875064b65545de6e0/horovod/runner/elastic/registration.py#L98). So it hang in `_barrier.reset()` until `_on_workers_recorded` function finish.
2. But `_on_workers_recorded` function also try to acquire `self._lock` which already held by `worker-A` at [self.reset](https://github.com/horovod/horovod/blob/1a0a6f2c5ec536c44fd5292875064b65545de6e0/horovod/runner/elastic/registration.py#L52). Deadlock occurs!
**Steps to reproduce.**
1. In order to easily reproduce the problem, we sleep 15s at `wait_for_available_slots` in `horovod/runner/elastic/driver.py` file (It simulates the case that host discovery script consume a few seconds):
```
...
def wait_for_available_slots(self, min_np, min_hosts=1):
print(f"wait_for_available_slots sleep 15s")
time.sleep(15)
extra_message = ' An elastic job also requires that at least two hosts ' \
'are available to resolve compatible network interfaces. If you know which interfaces ' \
'are compatible in your network, set `--network-interface` to skip this check.' \
if min_hosts > 1 else ''
...
```
2. Run elastic horovod:
```
horovodrun -np 1 --host-discovery-script ./discovery_hosts.sh --network-interface eth1 --min-np 1 --log-level DEBUG --verbose python3 pytorch_synthetic_benchmark_elastic.py --num-iters=1000
```
3. After some iteration passed, we add a new worker in host-discovery-script to raise `HostsUpdatedInterrupt`. The driver will record all workers as ready and call `_activate_workers` to new a worker and go to `wait_for_available_slots`, finally sleeping 15s in `wait_for_available_slots`.
4. We immediately kill one worker and the driver updates this worker as failed. Driver blocks in `_barrier.reset()`, holding the `self._lock`.
5. After 15s, `_activate_workers` call `_worker_registry.reset()` to acquire `_lock` which already is held. Deadlock!
**Solution.**
I think this issue is completely caused by worker updates during `_on_workers_recorded`. Maybe should we prohibit any worker updates in `_on_workers_recorded` function? If any worker updated during this time, we delay this to next rendezvous.
</issue>
<code>
[start of horovod/runner/elastic/registration.py]
1 # Copyright 2020 Uber Technologies, Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15
16 import logging
17 import threading
18
19 from collections import defaultdict
20
21 from horovod.runner.elastic import constants
22
23 READY = 'READY'
24 SUCCESS = 'SUCCESS'
25 FAILURE = 'FAILURE'
26
27
28 class WorkerStateRegistry(object):
29 def __init__(self, driver, host_manager, reset_limit=None, verbose=False):
30 self._driver = driver
31 self._host_manager = host_manager
32 self._reset_limit = reset_limit
33 self._reset_count = 0
34 self._lock = threading.Lock()
35 self._states = {}
36 self._workers = defaultdict(set)
37 self._barrier = None
38 self._rendezvous_id = 0
39 self._verbose = verbose
40 self._size = 0
41
42 def get_recorded_slots(self):
43 return self._states.keys()
44
45 def get(self, state):
46 return self._workers[state]
47
48 def count(self, state):
49 return len(self._workers[state])
50
51 def reset(self, size):
52 with self._lock:
53 logging.info('reset workers: {}'.format(size))
54 self._states.clear()
55 self._workers.clear()
56 self._barrier = threading.Barrier(parties=size, action=self._action)
57 self._rendezvous_id += 1
58 self._size = size
59
60 def size(self):
61 return self._size
62
63 def last_rendezvous(self):
64 return self._rendezvous_id
65
66 def record_ready(self, host, slot):
67 return self._record_state(host, slot, READY)
68
69 def record_success(self, host, slot):
70 return self._record_state(host, slot, SUCCESS)
71
72 def record_failure(self, host, slot):
73 return self._record_state(host, slot, FAILURE)
74
75 def _record_state(self, host, slot, state):
76 if self._driver.finished():
77 logging.info('driver finished, ignoring registration: {}[{}] = {}'.format(host, slot, state))
78 return self._rendezvous_id
79
80 if self._host_manager.is_blacklisted(host):
81 logging.warning('host registers state %s but is already blacklisted, ignoring: %s', state, host)
82 return self._rendezvous_id
83
84 key = (host, slot)
85 with self._lock:
86 if key in self._states:
87 if state == FAILURE:
88 # Worker originally recorded itself as READY, but the worker failed while waiting at the barrier. As
89 # such, we need to update the state to FAILURE, and we don't want two threads coming from the same
90 # worker at the barrier.
91 #
92 # In order to ensure that the new failing thread can record results in cases of total job failure,
93 # we also need to block this thread by waiting on the barrier. This requires us to reset the barrier,
94 # as otherwise this worker will be double-counted (once for the READY thread and once for FAILURE),
95 # which would cause the barrier to complete too early.
96 logging.info('key exists, reset barrier: {}[{}] = {} -> {}'
97 .format(host, slot, self._states[key], state))
98 self._barrier.reset()
99 else:
100 logging.error('key exists and new state %s not FAILURE, '
101 'ignoring (current state is %s)', state, self._states[key])
102
103 if key not in self._states or state == FAILURE:
104 logging.info('record state: {}[{}] = {}'.format(host, slot, state))
105 self._states[key] = state
106 self._workers[state].add(key)
107
108 rendezvous_id = self._rendezvous_id
109
110 rendezvous_id = self._wait(key, state, rendezvous_id)
111 return rendezvous_id
112
113 def _wait(self, key, state, rendezvous_id):
114 while True:
115 try:
116 self._barrier.wait()
117 return rendezvous_id
118 except threading.BrokenBarrierError:
119 if self._barrier.broken:
120 # Timeout or other non-recoverable error, so exit
121 raise
122
123 # Barrier has been reset
124 with self._lock:
125 # Check to make sure the reset was not caused by a change of state for this key
126 rendezvous_id = self._rendezvous_id
127 saved_state = self._states.get(key, state)
128 if saved_state != state:
129 # This worker changed its state, so do not attempt to wait again to avoid double-counting
130 raise RuntimeError('State {} overridden by {}'.format(state, saved_state))
131
132 def _action(self):
133 self._on_workers_recorded()
134
135 def _on_workers_recorded(self):
136 logging.info('all {} workers recorded'.format(self.size()))
137
138 # Check for success state, if any process succeeded, shutdown all other processes
139 if self.count(SUCCESS) > 0:
140 logging.info('success count == {} -> stop running'.format(self.count(SUCCESS)))
141 self._driver.stop()
142 return
143
144 # Check that all processes failed, indicating that processing should stop
145 if self.count(FAILURE) == self._size:
146 logging.error('failure count == {} -> stop running'.format(self._size))
147 self._driver.stop()
148 return
149
150 # Check for failures, and add them to the blacklisted hosts list
151 failures = self.get(FAILURE)
152 for host, slot in failures:
153 self._host_manager.blacklist(host)
154
155 # If every active host is blacklisted, then treat this as job failure
156 if all([self._host_manager.is_blacklisted(host) for host, slot in self.get_recorded_slots()]):
157 logging.error('blacklisted slots count == {} -> stop running'.format(self._size))
158 self._driver.stop()
159 return
160
161 # Check that we have already reset the maximum number of allowed times
162 if self._reset_limit is not None and self._reset_count >= self._reset_limit:
163 logging.error('reset count {} has exceeded limit {} -> stop running'
164 .format(self._reset_count, self._reset_limit))
165 self._driver.stop(error_message=constants.RESET_LIMIT_EXCEEDED_MESSAGE.format(self._reset_limit))
166 return
167
168 try:
169 self._reset_count += 1
170 self._driver.resume()
171 except Exception:
172 logging.exception('failed to activate new hosts -> stop running')
173 self._driver.stop()
174
[end of horovod/runner/elastic/registration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/horovod/runner/elastic/registration.py b/horovod/runner/elastic/registration.py
--- a/horovod/runner/elastic/registration.py
+++ b/horovod/runner/elastic/registration.py
@@ -38,6 +38,8 @@
self._rendezvous_id = 0
self._verbose = verbose
self._size = 0
+ self._action_event = threading.Event()
+ self._action_event.set()
def get_recorded_slots(self):
return self._states.keys()
@@ -81,6 +83,9 @@
logging.warning('host registers state %s but is already blacklisted, ignoring: %s', state, host)
return self._rendezvous_id
+ # we should wait for _action finished if a _record_state called when _action is running
+ self._action_event.wait()
+
key = (host, slot)
with self._lock:
if key in self._states:
@@ -130,7 +135,9 @@
raise RuntimeError('State {} overridden by {}'.format(state, saved_state))
def _action(self):
+ self._action_event.clear()
self._on_workers_recorded()
+ self._action_event.set()
def _on_workers_recorded(self):
logging.info('all {} workers recorded'.format(self.size()))
|
{"golden_diff": "diff --git a/horovod/runner/elastic/registration.py b/horovod/runner/elastic/registration.py\n--- a/horovod/runner/elastic/registration.py\n+++ b/horovod/runner/elastic/registration.py\n@@ -38,6 +38,8 @@\n self._rendezvous_id = 0\n self._verbose = verbose\n self._size = 0\n+ self._action_event = threading.Event()\n+ self._action_event.set()\n \n def get_recorded_slots(self):\n return self._states.keys()\n@@ -81,6 +83,9 @@\n logging.warning('host registers state %s but is already blacklisted, ignoring: %s', state, host)\n return self._rendezvous_id\n \n+ # we should wait for _action finished if a _record_state called when _action is running\n+ self._action_event.wait()\n+\n key = (host, slot)\n with self._lock:\n if key in self._states:\n@@ -130,7 +135,9 @@\n raise RuntimeError('State {} overridden by {}'.format(state, saved_state))\n \n def _action(self):\n+ self._action_event.clear()\n self._on_workers_recorded()\n+ self._action_event.set()\n \n def _on_workers_recorded(self):\n logging.info('all {} workers recorded'.format(self.size()))\n", "issue": "\u3010ELASTIC HOROVOD\u3011It perhaps some deadlock when all workers are recorded.\n**Environment:**\r\n1. Framework: (TensorFlow, Keras, PyTorch, MXNet): Pytorch\r\n2. Framework version: 1.6.0\r\n3. Horovod version: 0.21.3\r\n4. MPI version: 4.0.3\r\n5. CUDA version: 10.2\r\n6. NCCL version: 2.7.6\r\n7. Python version: 3.6\r\n\r\n**Checklist:**\r\n1. Did you search issues to find if somebody asked this question before? Yes.\r\n2. If your question is about hang, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/running.rst)?\r\n3. If your question is about docker, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/docker.rst)?\r\n4. Did you check if you question is answered in the [troubleshooting guide] (https://github.com/horovod/horovod/blob/master/docs/troubleshooting.rst)? Yes\r\n\r\n**Bug report:**\r\nAfter all workers have report their status, the driver will execute the `_on_workers_recorded` function which get and update host slot info and activate workers if necessary. The workers wait until the `_on_workers_recorded` function is completed. When executing `_on_workers_recorded` function, if one READY worker becomes FAILED\uff0cthe deadlock will caused.(e.g. `_on_workers_recorded` call host discovery script that maybe consume seconds and one ready worker failed during this time). \r\n\r\nThe deadlock details as follows:\r\n1. If one ready worker(we call it `worker-A`) become failed during `_on_workers_recorded`, it will result in `_barrier.reset()` and holds the `self._lock`, relevant code at [here](https://github.com/horovod/horovod/blob/1a0a6f2c5ec536c44fd5292875064b65545de6e0/horovod/runner/elastic/registration.py#L98). So it hang in `_barrier.reset()` until `_on_workers_recorded` function finish.\r\n\r\n2. But `_on_workers_recorded` function also try to acquire `self._lock` which already held by `worker-A` at [self.reset](https://github.com/horovod/horovod/blob/1a0a6f2c5ec536c44fd5292875064b65545de6e0/horovod/runner/elastic/registration.py#L52). Deadlock occurs!\r\n\r\n**Steps to reproduce.**\r\n1. In order to easily reproduce the problem, we sleep 15s at `wait_for_available_slots` in `horovod/runner/elastic/driver.py` file (It simulates the case that host discovery script consume a few seconds):\r\n```\r\n...\r\n def wait_for_available_slots(self, min_np, min_hosts=1):\r\n print(f\"wait_for_available_slots sleep 15s\")\r\n time.sleep(15)\r\n\r\n extra_message = ' An elastic job also requires that at least two hosts ' \\\r\n 'are available to resolve compatible network interfaces. If you know which interfaces ' \\\r\n 'are compatible in your network, set `--network-interface` to skip this check.' \\\r\n if min_hosts > 1 else ''\r\n...\r\n```\r\n\r\n2. Run elastic horovod:\r\n```\r\nhorovodrun -np 1 --host-discovery-script ./discovery_hosts.sh --network-interface eth1 --min-np 1 --log-level DEBUG --verbose python3 pytorch_synthetic_benchmark_elastic.py --num-iters=1000\r\n```\r\n\r\n3. After some iteration passed, we add a new worker in host-discovery-script to raise `HostsUpdatedInterrupt`. The driver will record all workers as ready and call `_activate_workers` to new a worker and go to `wait_for_available_slots`, finally sleeping 15s in `wait_for_available_slots`.\r\n\r\n4. We immediately kill one worker and the driver updates this worker as failed. Driver blocks in `_barrier.reset()`, holding the `self._lock`.\r\n\r\n5. After 15s, `_activate_workers` call `_worker_registry.reset()` to acquire `_lock` which already is held. Deadlock! \r\n\r\n**Solution.**\r\nI think this issue is completely caused by worker updates during `_on_workers_recorded`. Maybe should we prohibit any worker updates in `_on_workers_recorded` function? If any worker updated during this time, we delay this to next rendezvous. \n", "before_files": [{"content": "# Copyright 2020 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport logging\nimport threading\n\nfrom collections import defaultdict\n\nfrom horovod.runner.elastic import constants\n\nREADY = 'READY'\nSUCCESS = 'SUCCESS'\nFAILURE = 'FAILURE'\n\n\nclass WorkerStateRegistry(object):\n def __init__(self, driver, host_manager, reset_limit=None, verbose=False):\n self._driver = driver\n self._host_manager = host_manager\n self._reset_limit = reset_limit\n self._reset_count = 0\n self._lock = threading.Lock()\n self._states = {}\n self._workers = defaultdict(set)\n self._barrier = None\n self._rendezvous_id = 0\n self._verbose = verbose\n self._size = 0\n\n def get_recorded_slots(self):\n return self._states.keys()\n\n def get(self, state):\n return self._workers[state]\n\n def count(self, state):\n return len(self._workers[state])\n\n def reset(self, size):\n with self._lock:\n logging.info('reset workers: {}'.format(size))\n self._states.clear()\n self._workers.clear()\n self._barrier = threading.Barrier(parties=size, action=self._action)\n self._rendezvous_id += 1\n self._size = size\n\n def size(self):\n return self._size\n\n def last_rendezvous(self):\n return self._rendezvous_id\n\n def record_ready(self, host, slot):\n return self._record_state(host, slot, READY)\n\n def record_success(self, host, slot):\n return self._record_state(host, slot, SUCCESS)\n\n def record_failure(self, host, slot):\n return self._record_state(host, slot, FAILURE)\n\n def _record_state(self, host, slot, state):\n if self._driver.finished():\n logging.info('driver finished, ignoring registration: {}[{}] = {}'.format(host, slot, state))\n return self._rendezvous_id\n\n if self._host_manager.is_blacklisted(host):\n logging.warning('host registers state %s but is already blacklisted, ignoring: %s', state, host)\n return self._rendezvous_id\n\n key = (host, slot)\n with self._lock:\n if key in self._states:\n if state == FAILURE:\n # Worker originally recorded itself as READY, but the worker failed while waiting at the barrier. As\n # such, we need to update the state to FAILURE, and we don't want two threads coming from the same\n # worker at the barrier.\n #\n # In order to ensure that the new failing thread can record results in cases of total job failure,\n # we also need to block this thread by waiting on the barrier. This requires us to reset the barrier,\n # as otherwise this worker will be double-counted (once for the READY thread and once for FAILURE),\n # which would cause the barrier to complete too early.\n logging.info('key exists, reset barrier: {}[{}] = {} -> {}'\n .format(host, slot, self._states[key], state))\n self._barrier.reset()\n else:\n logging.error('key exists and new state %s not FAILURE, '\n 'ignoring (current state is %s)', state, self._states[key])\n\n if key not in self._states or state == FAILURE:\n logging.info('record state: {}[{}] = {}'.format(host, slot, state))\n self._states[key] = state\n self._workers[state].add(key)\n\n rendezvous_id = self._rendezvous_id\n\n rendezvous_id = self._wait(key, state, rendezvous_id)\n return rendezvous_id\n\n def _wait(self, key, state, rendezvous_id):\n while True:\n try:\n self._barrier.wait()\n return rendezvous_id\n except threading.BrokenBarrierError:\n if self._barrier.broken:\n # Timeout or other non-recoverable error, so exit\n raise\n\n # Barrier has been reset\n with self._lock:\n # Check to make sure the reset was not caused by a change of state for this key\n rendezvous_id = self._rendezvous_id\n saved_state = self._states.get(key, state)\n if saved_state != state:\n # This worker changed its state, so do not attempt to wait again to avoid double-counting\n raise RuntimeError('State {} overridden by {}'.format(state, saved_state))\n\n def _action(self):\n self._on_workers_recorded()\n\n def _on_workers_recorded(self):\n logging.info('all {} workers recorded'.format(self.size()))\n\n # Check for success state, if any process succeeded, shutdown all other processes\n if self.count(SUCCESS) > 0:\n logging.info('success count == {} -> stop running'.format(self.count(SUCCESS)))\n self._driver.stop()\n return\n\n # Check that all processes failed, indicating that processing should stop\n if self.count(FAILURE) == self._size:\n logging.error('failure count == {} -> stop running'.format(self._size))\n self._driver.stop()\n return\n\n # Check for failures, and add them to the blacklisted hosts list\n failures = self.get(FAILURE)\n for host, slot in failures:\n self._host_manager.blacklist(host)\n\n # If every active host is blacklisted, then treat this as job failure\n if all([self._host_manager.is_blacklisted(host) for host, slot in self.get_recorded_slots()]):\n logging.error('blacklisted slots count == {} -> stop running'.format(self._size))\n self._driver.stop()\n return\n\n # Check that we have already reset the maximum number of allowed times\n if self._reset_limit is not None and self._reset_count >= self._reset_limit:\n logging.error('reset count {} has exceeded limit {} -> stop running'\n .format(self._reset_count, self._reset_limit))\n self._driver.stop(error_message=constants.RESET_LIMIT_EXCEEDED_MESSAGE.format(self._reset_limit))\n return\n\n try:\n self._reset_count += 1\n self._driver.resume()\n except Exception:\n logging.exception('failed to activate new hosts -> stop running')\n self._driver.stop()\n", "path": "horovod/runner/elastic/registration.py"}]}
| 3,481 | 309 |
gh_patches_debug_23363
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-1745
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scheduler options not honored in Torque
**Describe the bug**
If a `scheduler_option` that conflicts with a default request in the standard template is made via the `TorqueProvider`, it's currently ignored. It turns out that Torque honors the first line rather than the last in case of a conflicting option.
For eg:
```
#PBS -l nodes=1:ppn=1
#PBS -l nodes=1:ppn=16:xk # <--- from scheduler_options, will be ignored.
```
</issue>
<code>
[start of parsl/providers/torque/torque.py]
1 import logging
2 import os
3 import time
4
5 from parsl.channels import LocalChannel
6 from parsl.launchers import AprunLauncher
7 from parsl.providers.provider_base import JobState, JobStatus
8 from parsl.providers.torque.template import template_string
9 from parsl.providers.cluster_provider import ClusterProvider
10 from parsl.utils import RepresentationMixin
11
12 logger = logging.getLogger(__name__)
13
14 # From the man pages for qstat for PBS/Torque systems
15 translate_table = {
16 'B': JobState.RUNNING, # This state is returned for running array jobs
17 'R': JobState.RUNNING,
18 'C': JobState.COMPLETED, # Completed after having run
19 'E': JobState.COMPLETED, # Exiting after having run
20 'H': JobState.HELD, # Held
21 'Q': JobState.PENDING, # Queued, and eligible to run
22 'W': JobState.PENDING, # Job is waiting for it's execution time (-a option) to be reached
23 'S': JobState.HELD
24 } # Suspended
25
26
27 class TorqueProvider(ClusterProvider, RepresentationMixin):
28 """Torque Execution Provider
29
30 This provider uses sbatch to submit, squeue for status, and scancel to cancel
31 jobs. The sbatch script to be used is created from a template file in this
32 same module.
33
34 Parameters
35 ----------
36 channel : Channel
37 Channel for accessing this provider. Possible channels include
38 :class:`~parsl.channels.LocalChannel` (the default),
39 :class:`~parsl.channels.SSHChannel`, or
40 :class:`~parsl.channels.SSHInteractiveLoginChannel`.
41 account : str
42 Account the job will be charged against.
43 queue : str
44 Torque queue to request blocks from.
45 nodes_per_block : int
46 Nodes to provision per block.
47 init_blocks : int
48 Number of blocks to provision at the start of the run. Default is 1.
49 min_blocks : int
50 Minimum number of blocks to maintain. Default is 0.
51 max_blocks : int
52 Maximum number of blocks to maintain.
53 parallelism : float
54 Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
55 scaling where as many resources as possible are used; parallelism close to 0 represents
56 the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
57 walltime : str
58 Walltime requested per block in HH:MM:SS.
59 scheduler_options : str
60 String to prepend to the #PBS blocks in the submit script to the scheduler.
61 worker_init : str
62 Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.
63 launcher : Launcher
64 Launcher for this provider. Possible launchers include
65 :class:`~parsl.launchers.AprunLauncher` (the default), or
66 :class:`~parsl.launchers.SingleNodeLauncher`,
67
68 """
69 def __init__(self,
70 channel=LocalChannel(),
71 account=None,
72 queue=None,
73 scheduler_options='',
74 worker_init='',
75 nodes_per_block=1,
76 init_blocks=1,
77 min_blocks=0,
78 max_blocks=1,
79 parallelism=1,
80 launcher=AprunLauncher(),
81 walltime="00:20:00",
82 cmd_timeout=120):
83 label = 'torque'
84 super().__init__(label,
85 channel,
86 nodes_per_block,
87 init_blocks,
88 min_blocks,
89 max_blocks,
90 parallelism,
91 walltime,
92 launcher,
93 cmd_timeout=cmd_timeout)
94
95 self.account = account
96 self.queue = queue
97 self.scheduler_options = scheduler_options
98 self.worker_init = worker_init
99 self.provisioned_blocks = 0
100 self.template_string = template_string
101
102 # Dictionary that keeps track of jobs, keyed on job_id
103 self.resources = {}
104
105 def _status(self):
106 ''' Internal: Do not call. Returns the status list for a list of job_ids
107
108 Args:
109 self
110
111 Returns:
112 [status...] : Status list of all jobs
113 '''
114
115 job_ids = list(self.resources.keys())
116 job_id_list = ' '.join(self.resources.keys())
117
118 jobs_missing = list(self.resources.keys())
119
120 retcode, stdout, stderr = self.execute_wait("qstat {0}".format(job_id_list))
121 for line in stdout.split('\n'):
122 parts = line.split()
123 if not parts or parts[0].upper().startswith('JOB') or parts[0].startswith('---'):
124 continue
125 job_id = parts[0] # likely truncated
126 for long_job_id in job_ids:
127 if long_job_id.startswith(job_id):
128 logger.debug('coerced job_id %s -> %s', job_id, long_job_id)
129 job_id = long_job_id
130 break
131 state = translate_table.get(parts[4], JobState.UNKNOWN)
132 self.resources[job_id]['status'] = JobStatus(state)
133 jobs_missing.remove(job_id)
134
135 # squeue does not report on jobs that are not running. So we are filling in the
136 # blanks for missing jobs, we might lose some information about why the jobs failed.
137 for missing_job in jobs_missing:
138 self.resources[missing_job]['status'] = JobStatus(JobState.COMPLETED)
139
140 def submit(self, command, tasks_per_node, job_name="parsl.torque"):
141 ''' Submits the command onto an Local Resource Manager job.
142 Submit returns an ID that corresponds to the task that was just submitted.
143
144 If tasks_per_node < 1 : ! This is illegal. tasks_per_node should be integer
145
146 If tasks_per_node == 1:
147 A single node is provisioned
148
149 If tasks_per_node > 1 :
150 tasks_per_node number of nodes are provisioned.
151
152 Args:
153 - command :(String) Commandline invocation to be made on the remote side.
154 - tasks_per_node (int) : command invocations to be launched per node
155
156 Kwargs:
157 - job_name (String): Name for job, must be unique
158
159 Returns:
160 - None: At capacity, cannot provision more
161 - job_id: (string) Identifier for the job
162
163 '''
164
165 if self.provisioned_blocks >= self.max_blocks:
166 logger.warning("[%s] at capacity, cannot add more blocks now", self.label)
167 return None
168
169 # Set job name
170 job_name = "parsl.{0}.{1}".format(job_name, time.time())
171
172 # Set script path
173 script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
174 script_path = os.path.abspath(script_path)
175
176 logger.debug("Requesting nodes_per_block:%s tasks_per_node:%s", self.nodes_per_block,
177 tasks_per_node)
178
179 job_config = {}
180 # TODO : script_path might need to change to accommodate script dir set via channels
181 job_config["submit_script_dir"] = self.channel.script_dir
182 job_config["nodes"] = self.nodes_per_block
183 job_config["task_blocks"] = self.nodes_per_block * tasks_per_node
184 job_config["nodes_per_block"] = self.nodes_per_block
185 job_config["tasks_per_node"] = tasks_per_node
186 job_config["walltime"] = self.walltime
187 job_config["scheduler_options"] = self.scheduler_options
188 job_config["worker_init"] = self.worker_init
189 job_config["user_script"] = command
190
191 # Wrap the command
192 job_config["user_script"] = self.launcher(command,
193 tasks_per_node,
194 self.nodes_per_block)
195
196 logger.debug("Writing submit script")
197 self._write_submit_script(self.template_string, script_path, job_name, job_config)
198
199 channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
200
201 submit_options = ''
202 if self.queue is not None:
203 submit_options = '{0} -q {1}'.format(submit_options, self.queue)
204 if self.account is not None:
205 submit_options = '{0} -A {1}'.format(submit_options, self.account)
206
207 launch_cmd = "qsub {0} {1}".format(submit_options, channel_script_path)
208 retcode, stdout, stderr = self.execute_wait(launch_cmd)
209
210 job_id = None
211 if retcode == 0:
212 for line in stdout.split('\n'):
213 if line.strip():
214 job_id = line.strip()
215 self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}
216 else:
217 message = "Command '{}' failed with return code {}".format(launch_cmd, retcode)
218 if (stdout is not None) and (stderr is not None):
219 message += "\nstderr:{}\nstdout{}".format(stderr.strip(), stdout.strip())
220 logger.error(message)
221
222 return job_id
223
224 def cancel(self, job_ids):
225 ''' Cancels the jobs specified by a list of job ids
226
227 Args:
228 job_ids : [<job_id> ...]
229
230 Returns :
231 [True/False...] : If the cancel operation fails the entire list will be False.
232 '''
233
234 job_id_list = ' '.join(job_ids)
235 retcode, stdout, stderr = self.execute_wait("qdel {0}".format(job_id_list))
236 rets = None
237 if retcode == 0:
238 for jid in job_ids:
239 self.resources[jid]['status'] = JobStatus(JobState.COMPLETED) # Setting state to exiting
240 rets = [True for i in job_ids]
241 else:
242 rets = [False for i in job_ids]
243
244 return rets
245
246 @property
247 def status_polling_interval(self):
248 return 60
249
250
251 if __name__ == "__main__":
252
253 print("None")
254
[end of parsl/providers/torque/torque.py]
[start of parsl/providers/torque/template.py]
1 template_string = '''#!/bin/bash
2
3 #PBS -S /bin/bash
4 #PBS -N ${jobname}
5 #PBS -m n
6 #PBS -l walltime=$walltime
7 #PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}
8 #PBS -o ${submit_script_dir}/${jobname}.submit.stdout
9 #PBS -e ${submit_script_dir}/${jobname}.submit.stderr
10 ${scheduler_options}
11
12 ${worker_init}
13
14 export JOBNAME="${jobname}"
15
16 ${user_script}
17
18 '''
19
[end of parsl/providers/torque/template.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsl/providers/torque/template.py b/parsl/providers/torque/template.py
--- a/parsl/providers/torque/template.py
+++ b/parsl/providers/torque/template.py
@@ -1,5 +1,6 @@
template_string = '''#!/bin/bash
+${scheduler_options}
#PBS -S /bin/bash
#PBS -N ${jobname}
#PBS -m n
@@ -7,7 +8,6 @@
#PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}
#PBS -o ${submit_script_dir}/${jobname}.submit.stdout
#PBS -e ${submit_script_dir}/${jobname}.submit.stderr
-${scheduler_options}
${worker_init}
diff --git a/parsl/providers/torque/torque.py b/parsl/providers/torque/torque.py
--- a/parsl/providers/torque/torque.py
+++ b/parsl/providers/torque/torque.py
@@ -58,6 +58,7 @@
Walltime requested per block in HH:MM:SS.
scheduler_options : str
String to prepend to the #PBS blocks in the submit script to the scheduler.
+ WARNING: scheduler_options should only be given #PBS strings, and should not have trailing newlines.
worker_init : str
Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.
launcher : Launcher
|
{"golden_diff": "diff --git a/parsl/providers/torque/template.py b/parsl/providers/torque/template.py\n--- a/parsl/providers/torque/template.py\n+++ b/parsl/providers/torque/template.py\n@@ -1,5 +1,6 @@\n template_string = '''#!/bin/bash\n \n+${scheduler_options}\n #PBS -S /bin/bash\n #PBS -N ${jobname}\n #PBS -m n\n@@ -7,7 +8,6 @@\n #PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}\n #PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n #PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n-${scheduler_options}\n \n ${worker_init}\n \ndiff --git a/parsl/providers/torque/torque.py b/parsl/providers/torque/torque.py\n--- a/parsl/providers/torque/torque.py\n+++ b/parsl/providers/torque/torque.py\n@@ -58,6 +58,7 @@\n Walltime requested per block in HH:MM:SS.\n scheduler_options : str\n String to prepend to the #PBS blocks in the submit script to the scheduler.\n+ WARNING: scheduler_options should only be given #PBS strings, and should not have trailing newlines.\n worker_init : str\n Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\n launcher : Launcher\n", "issue": "Scheduler options not honored in Torque\n**Describe the bug**\r\nIf a `scheduler_option` that conflicts with a default request in the standard template is made via the `TorqueProvider`, it's currently ignored. It turns out that Torque honors the first line rather than the last in case of a conflicting option. \r\n\r\nFor eg:\r\n```\r\n#PBS -l nodes=1:ppn=1\r\n#PBS -l nodes=1:ppn=16:xk # <--- from scheduler_options, will be ignored.\r\n```\r\n\n", "before_files": [{"content": "import logging\nimport os\nimport time\n\nfrom parsl.channels import LocalChannel\nfrom parsl.launchers import AprunLauncher\nfrom parsl.providers.provider_base import JobState, JobStatus\nfrom parsl.providers.torque.template import template_string\nfrom parsl.providers.cluster_provider import ClusterProvider\nfrom parsl.utils import RepresentationMixin\n\nlogger = logging.getLogger(__name__)\n\n# From the man pages for qstat for PBS/Torque systems\ntranslate_table = {\n 'B': JobState.RUNNING, # This state is returned for running array jobs\n 'R': JobState.RUNNING,\n 'C': JobState.COMPLETED, # Completed after having run\n 'E': JobState.COMPLETED, # Exiting after having run\n 'H': JobState.HELD, # Held\n 'Q': JobState.PENDING, # Queued, and eligible to run\n 'W': JobState.PENDING, # Job is waiting for it's execution time (-a option) to be reached\n 'S': JobState.HELD\n} # Suspended\n\n\nclass TorqueProvider(ClusterProvider, RepresentationMixin):\n \"\"\"Torque Execution Provider\n\n This provider uses sbatch to submit, squeue for status, and scancel to cancel\n jobs. The sbatch script to be used is created from a template file in this\n same module.\n\n Parameters\n ----------\n channel : Channel\n Channel for accessing this provider. Possible channels include\n :class:`~parsl.channels.LocalChannel` (the default),\n :class:`~parsl.channels.SSHChannel`, or\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n account : str\n Account the job will be charged against.\n queue : str\n Torque queue to request blocks from.\n nodes_per_block : int\n Nodes to provision per block.\n init_blocks : int\n Number of blocks to provision at the start of the run. Default is 1.\n min_blocks : int\n Minimum number of blocks to maintain. Default is 0.\n max_blocks : int\n Maximum number of blocks to maintain.\n parallelism : float\n Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive\n scaling where as many resources as possible are used; parallelism close to 0 represents\n the opposite situation in which as few resources as possible (i.e., min_blocks) are used.\n walltime : str\n Walltime requested per block in HH:MM:SS.\n scheduler_options : str\n String to prepend to the #PBS blocks in the submit script to the scheduler.\n worker_init : str\n Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\n launcher : Launcher\n Launcher for this provider. Possible launchers include\n :class:`~parsl.launchers.AprunLauncher` (the default), or\n :class:`~parsl.launchers.SingleNodeLauncher`,\n\n \"\"\"\n def __init__(self,\n channel=LocalChannel(),\n account=None,\n queue=None,\n scheduler_options='',\n worker_init='',\n nodes_per_block=1,\n init_blocks=1,\n min_blocks=0,\n max_blocks=1,\n parallelism=1,\n launcher=AprunLauncher(),\n walltime=\"00:20:00\",\n cmd_timeout=120):\n label = 'torque'\n super().__init__(label,\n channel,\n nodes_per_block,\n init_blocks,\n min_blocks,\n max_blocks,\n parallelism,\n walltime,\n launcher,\n cmd_timeout=cmd_timeout)\n\n self.account = account\n self.queue = queue\n self.scheduler_options = scheduler_options\n self.worker_init = worker_init\n self.provisioned_blocks = 0\n self.template_string = template_string\n\n # Dictionary that keeps track of jobs, keyed on job_id\n self.resources = {}\n\n def _status(self):\n ''' Internal: Do not call. Returns the status list for a list of job_ids\n\n Args:\n self\n\n Returns:\n [status...] : Status list of all jobs\n '''\n\n job_ids = list(self.resources.keys())\n job_id_list = ' '.join(self.resources.keys())\n\n jobs_missing = list(self.resources.keys())\n\n retcode, stdout, stderr = self.execute_wait(\"qstat {0}\".format(job_id_list))\n for line in stdout.split('\\n'):\n parts = line.split()\n if not parts or parts[0].upper().startswith('JOB') or parts[0].startswith('---'):\n continue\n job_id = parts[0] # likely truncated\n for long_job_id in job_ids:\n if long_job_id.startswith(job_id):\n logger.debug('coerced job_id %s -> %s', job_id, long_job_id)\n job_id = long_job_id\n break\n state = translate_table.get(parts[4], JobState.UNKNOWN)\n self.resources[job_id]['status'] = JobStatus(state)\n jobs_missing.remove(job_id)\n\n # squeue does not report on jobs that are not running. So we are filling in the\n # blanks for missing jobs, we might lose some information about why the jobs failed.\n for missing_job in jobs_missing:\n self.resources[missing_job]['status'] = JobStatus(JobState.COMPLETED)\n\n def submit(self, command, tasks_per_node, job_name=\"parsl.torque\"):\n ''' Submits the command onto an Local Resource Manager job.\n Submit returns an ID that corresponds to the task that was just submitted.\n\n If tasks_per_node < 1 : ! This is illegal. tasks_per_node should be integer\n\n If tasks_per_node == 1:\n A single node is provisioned\n\n If tasks_per_node > 1 :\n tasks_per_node number of nodes are provisioned.\n\n Args:\n - command :(String) Commandline invocation to be made on the remote side.\n - tasks_per_node (int) : command invocations to be launched per node\n\n Kwargs:\n - job_name (String): Name for job, must be unique\n\n Returns:\n - None: At capacity, cannot provision more\n - job_id: (string) Identifier for the job\n\n '''\n\n if self.provisioned_blocks >= self.max_blocks:\n logger.warning(\"[%s] at capacity, cannot add more blocks now\", self.label)\n return None\n\n # Set job name\n job_name = \"parsl.{0}.{1}\".format(job_name, time.time())\n\n # Set script path\n script_path = \"{0}/{1}.submit\".format(self.script_dir, job_name)\n script_path = os.path.abspath(script_path)\n\n logger.debug(\"Requesting nodes_per_block:%s tasks_per_node:%s\", self.nodes_per_block,\n tasks_per_node)\n\n job_config = {}\n # TODO : script_path might need to change to accommodate script dir set via channels\n job_config[\"submit_script_dir\"] = self.channel.script_dir\n job_config[\"nodes\"] = self.nodes_per_block\n job_config[\"task_blocks\"] = self.nodes_per_block * tasks_per_node\n job_config[\"nodes_per_block\"] = self.nodes_per_block\n job_config[\"tasks_per_node\"] = tasks_per_node\n job_config[\"walltime\"] = self.walltime\n job_config[\"scheduler_options\"] = self.scheduler_options\n job_config[\"worker_init\"] = self.worker_init\n job_config[\"user_script\"] = command\n\n # Wrap the command\n job_config[\"user_script\"] = self.launcher(command,\n tasks_per_node,\n self.nodes_per_block)\n\n logger.debug(\"Writing submit script\")\n self._write_submit_script(self.template_string, script_path, job_name, job_config)\n\n channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)\n\n submit_options = ''\n if self.queue is not None:\n submit_options = '{0} -q {1}'.format(submit_options, self.queue)\n if self.account is not None:\n submit_options = '{0} -A {1}'.format(submit_options, self.account)\n\n launch_cmd = \"qsub {0} {1}\".format(submit_options, channel_script_path)\n retcode, stdout, stderr = self.execute_wait(launch_cmd)\n\n job_id = None\n if retcode == 0:\n for line in stdout.split('\\n'):\n if line.strip():\n job_id = line.strip()\n self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}\n else:\n message = \"Command '{}' failed with return code {}\".format(launch_cmd, retcode)\n if (stdout is not None) and (stderr is not None):\n message += \"\\nstderr:{}\\nstdout{}\".format(stderr.strip(), stdout.strip())\n logger.error(message)\n\n return job_id\n\n def cancel(self, job_ids):\n ''' Cancels the jobs specified by a list of job ids\n\n Args:\n job_ids : [<job_id> ...]\n\n Returns :\n [True/False...] : If the cancel operation fails the entire list will be False.\n '''\n\n job_id_list = ' '.join(job_ids)\n retcode, stdout, stderr = self.execute_wait(\"qdel {0}\".format(job_id_list))\n rets = None\n if retcode == 0:\n for jid in job_ids:\n self.resources[jid]['status'] = JobStatus(JobState.COMPLETED) # Setting state to exiting\n rets = [True for i in job_ids]\n else:\n rets = [False for i in job_ids]\n\n return rets\n\n @property\n def status_polling_interval(self):\n return 60\n\n\nif __name__ == \"__main__\":\n\n print(\"None\")\n", "path": "parsl/providers/torque/torque.py"}, {"content": "template_string = '''#!/bin/bash\n\n#PBS -S /bin/bash\n#PBS -N ${jobname}\n#PBS -m n\n#PBS -l walltime=$walltime\n#PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}\n#PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n#PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n${scheduler_options}\n\n${worker_init}\n\nexport JOBNAME=\"${jobname}\"\n\n${user_script}\n\n'''\n", "path": "parsl/providers/torque/template.py"}]}
| 3,612 | 307 |
gh_patches_debug_1092
|
rasdani/github-patches
|
git_diff
|
psychopy__psychopy-2333
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Demos -> Hardware -> testSoundLatency.py not working in v3.0.6
Running Demo -> Hardware -> testSoundLatency.py results in the following error message:
```
##### Running: C:\Program Files (x86)\PsychoPy3\lib\site-packages\psychopy\demos\coder\hardware\testSoundLatency.py #####
pygame 1.9.4
Hello from the pygame community. https://www.pygame.org/contribute.html
Traceback (most recent call last):
File "C:\Program Files (x86)\PsychoPy3\lib\site-packages\psychopy\demos\coder\hardware\testSoundLatency.py", line 16, in <module>
from labjack import u3
ModuleNotFoundError: No module named 'labjack'
```
Windows 7, 64 bit, PsychoPy 3.0.6 64 bit standalone
</issue>
<code>
[start of psychopy/demos/coder/hardware/labjack_u3.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 Demo for using labjack DAC devices
6
7 See also
8 http: //labjack.com/support/labjackpython
9 but note that the version shipped with standalone PsychoPy
10 has u3 (and others below an umbrella called labjack) so the import
11 line is slightly different to the documentation on LabJack's website
12 """
13
14 from __future__ import absolute_import, division, print_function
15
16 from builtins import range
17 from psychopy import visual, core, event, sound
18 from labjack import u3
19
20 # sound.setAudioAPI('pyaudio')
21
22 win = visual.Window([800, 800])
23 stim = visual.GratingStim(win, color=-1, sf=0)
24 snd = sound.Sound(880)
25 print(snd)
26 # setup labjack U3
27 ports = u3.U3()
28 FIO4 = 6004 # the address of line FIO4
29
30 while True:
31 # do this repeatedly for timing tests
32 ports.writeRegister(FIO4, 0) # start low
33
34 # draw black square
35 stim.draw()
36 win.flip()
37
38 # wait for a key press
39 if 'q' in event.waitKeys():
40 break
41
42 # set to white, flip window and raise level port FIO4
43 stim.setColor(1)
44 stim.draw()
45 win.flip()
46 ports.writeRegister(FIO4, 1)
47 snd.play()
48 for frameN in range(4):
49 stim.draw()
50 win.flip()
51
52 # set color back to black and set FIO4 to low again
53 stim.setColor(-1)
54 stim.draw()
55 win.flip()
56 ports.writeRegister(FIO4, 0)
57
58 win.close()
59 core.quit()
60
61 # The contents of this file are in the public domain.
62
[end of psychopy/demos/coder/hardware/labjack_u3.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/psychopy/demos/coder/hardware/labjack_u3.py b/psychopy/demos/coder/hardware/labjack_u3.py
--- a/psychopy/demos/coder/hardware/labjack_u3.py
+++ b/psychopy/demos/coder/hardware/labjack_u3.py
@@ -15,7 +15,10 @@
from builtins import range
from psychopy import visual, core, event, sound
-from labjack import u3
+try:
+ from labjack import u3
+except ImportError:
+ import u3
# sound.setAudioAPI('pyaudio')
|
{"golden_diff": "diff --git a/psychopy/demos/coder/hardware/labjack_u3.py b/psychopy/demos/coder/hardware/labjack_u3.py\n--- a/psychopy/demos/coder/hardware/labjack_u3.py\n+++ b/psychopy/demos/coder/hardware/labjack_u3.py\n@@ -15,7 +15,10 @@\n \n from builtins import range\n from psychopy import visual, core, event, sound\n-from labjack import u3\n+try:\n+ from labjack import u3\n+except ImportError:\n+ import u3\n \n # sound.setAudioAPI('pyaudio')\n", "issue": "Demos -> Hardware -> testSoundLatency.py not working in v3.0.6\nRunning Demo -> Hardware -> testSoundLatency.py results in the following error message:\r\n```\r\n##### Running: C:\\Program Files (x86)\\PsychoPy3\\lib\\site-packages\\psychopy\\demos\\coder\\hardware\\testSoundLatency.py #####\r\npygame 1.9.4\r\nHello from the pygame community. https://www.pygame.org/contribute.html\r\nTraceback (most recent call last):\r\n File \"C:\\Program Files (x86)\\PsychoPy3\\lib\\site-packages\\psychopy\\demos\\coder\\hardware\\testSoundLatency.py\", line 16, in <module>\r\n from labjack import u3\r\nModuleNotFoundError: No module named 'labjack'\r\n```\r\nWindows 7, 64 bit, PsychoPy 3.0.6 64 bit standalone\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDemo for using labjack DAC devices\n\nSee also\n http: //labjack.com/support/labjackpython\nbut note that the version shipped with standalone PsychoPy\nhas u3 (and others below an umbrella called labjack) so the import\nline is slightly different to the documentation on LabJack's website\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom builtins import range\nfrom psychopy import visual, core, event, sound\nfrom labjack import u3\n\n# sound.setAudioAPI('pyaudio')\n\nwin = visual.Window([800, 800])\nstim = visual.GratingStim(win, color=-1, sf=0)\nsnd = sound.Sound(880)\nprint(snd)\n# setup labjack U3\nports = u3.U3()\nFIO4 = 6004 # the address of line FIO4\n\nwhile True:\n # do this repeatedly for timing tests\n ports.writeRegister(FIO4, 0) # start low\n\n # draw black square\n stim.draw()\n win.flip()\n\n # wait for a key press\n if 'q' in event.waitKeys():\n break\n\n # set to white, flip window and raise level port FIO4\n stim.setColor(1)\n stim.draw()\n win.flip()\n ports.writeRegister(FIO4, 1)\n snd.play()\n for frameN in range(4):\n stim.draw()\n win.flip()\n\n # set color back to black and set FIO4 to low again\n stim.setColor(-1)\n stim.draw()\n win.flip()\n ports.writeRegister(FIO4, 0)\n\nwin.close()\ncore.quit()\n\n# The contents of this file are in the public domain.\n", "path": "psychopy/demos/coder/hardware/labjack_u3.py"}]}
| 1,266 | 138 |
gh_patches_debug_18743
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-649
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support for ipv6 address format
**Describe the bug**:
Exception is thrown when python agent is trying to parse url, that contains ipv6 address.
```
ValueError: too many values to unpack (expected 2)
File "elasticapm/contrib/django/middleware/__init__.py", line 162, in process_response
"request",
File "elasticapm/traces.py", line 333, in set_context
data = data()
File "elasticapm/contrib/django/middleware/__init__.py", line 160, in <lambda>
request, capture_body=self.client.config.capture_body in ("all", "transactions")
File "elasticapm/contrib/django/client.py", line 137, in get_data_from_request
result["url"] = get_url_dict(url)
File "elasticapm/utils/__init__.py", line 84, in get_url_dict
hostname, port = netloc.split(":")
```
**To Reproduce**
Not sure how can I manually replicate it, but below is the request details that is send to our webapp.
```
curl \
--compressed \
-H "Accept: */*" \
-H "Accept-Encoding: gzip, deflate" \
-H "Connection: close" \
-H "Host: [::ffff:a9fe:a9fe]:80" \
-H "User-Agent: AWS Security Scanner" \
-H "X-Forwarded-For: 44.224.22.196, 10.255.0.2" \
-H "X-Forwarded-Port: 443" \
-H "X-Forwarded-Proto: https" \
-H "X-Real-Ip: 10.255.0.2" \
"http://[::ffff:a9fe:a9fe]/latest/dynamic/instance-identity/document"
```
**Expected behavior**:
The url containing hostname in ipv6 address, should be parsed correctly and python agent should successfully process request.
**Environment (please complete the following information)**
- OS: Linux
- Python version: 3.5
- Framework and version [e.g. Django 2.1]: 2.1
- APM Server version: 6.7.1
- Agent version: 5.3.0
</issue>
<code>
[start of elasticapm/utils/__init__.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31 import base64
32 import os
33 import re
34 from functools import partial
35
36 from elasticapm.conf import constants
37 from elasticapm.utils import compat, encoding
38
39 try:
40 from functools import partialmethod
41
42 partial_types = (partial, partialmethod)
43 except ImportError:
44 # Python 2
45 partial_types = (partial,)
46
47
48 default_ports = {"https": 443, "http": 80, "postgresql": 5432}
49
50
51 def varmap(func, var, context=None, name=None):
52 """
53 Executes ``func(key_name, value)`` on all values,
54 recursively discovering dict and list scoped
55 values.
56 """
57 if context is None:
58 context = set()
59 objid = id(var)
60 if objid in context:
61 return func(name, "<...>")
62 context.add(objid)
63 if isinstance(var, dict):
64 ret = func(name, dict((k, varmap(func, v, context, k)) for k, v in compat.iteritems(var)))
65 elif isinstance(var, (list, tuple)):
66 ret = func(name, [varmap(func, f, context, name) for f in var])
67 else:
68 ret = func(name, var)
69 context.remove(objid)
70 return ret
71
72
73 def get_name_from_func(func):
74 # partials don't have `__module__` or `__name__`, so we use the values from the "inner" function
75 if isinstance(func, partial_types):
76 return "partial({})".format(get_name_from_func(func.func))
77 elif hasattr(func, "_partialmethod") and hasattr(func._partialmethod, "func"):
78 return "partial({})".format(get_name_from_func(func._partialmethod.func))
79
80 module = func.__module__
81
82 if hasattr(func, "__name__"):
83 view_name = func.__name__
84 else: # Fall back if there's no __name__
85 view_name = func.__class__.__name__
86
87 return "{0}.{1}".format(module, view_name)
88
89
90 def build_name_with_http_method_prefix(name, request):
91 return " ".join((request.method, name)) if name else name
92
93
94 def is_master_process():
95 # currently only recognizes uwsgi master process
96 try:
97 import uwsgi
98
99 return os.getpid() == uwsgi.masterpid()
100 except ImportError:
101 return False
102
103
104 def get_url_dict(url):
105 scheme, netloc, path, params, query, fragment = compat.urlparse.urlparse(url)
106 if ":" in netloc:
107 hostname, port = netloc.split(":")
108 else:
109 hostname, port = (netloc, None)
110 url_dict = {
111 "full": encoding.keyword_field(url),
112 "protocol": scheme + ":",
113 "hostname": encoding.keyword_field(hostname),
114 "pathname": encoding.keyword_field(path),
115 }
116 if port:
117 url_dict["port"] = port
118 if query:
119 url_dict["search"] = encoding.keyword_field("?" + query)
120 return url_dict
121
122
123 def sanitize_url(url):
124 if "@" not in url:
125 return url
126 parts = compat.urlparse.urlparse(url)
127 return url.replace("%s:%s" % (parts.username, parts.password), "%s:%s" % (parts.username, constants.MASK))
128
129
130 def read_pem_file(file_obj):
131 cert = b""
132 for line in file_obj:
133 if line.startswith(b"-----BEGIN CERTIFICATE-----"):
134 break
135 for line in file_obj:
136 if not line.startswith(b"-----END CERTIFICATE-----"):
137 cert += line.strip()
138 return base64.b64decode(cert)
139
140
141 def starmatch_to_regex(pattern):
142 i, n = 0, len(pattern)
143 res = []
144 while i < n:
145 c = pattern[i]
146 i = i + 1
147 if c == "*":
148 res.append(".*")
149 else:
150 res.append(re.escape(c))
151 return re.compile(r"(?:%s)\Z" % "".join(res), re.IGNORECASE | re.DOTALL)
152
[end of elasticapm/utils/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticapm/utils/__init__.py b/elasticapm/utils/__init__.py
--- a/elasticapm/utils/__init__.py
+++ b/elasticapm/utils/__init__.py
@@ -102,21 +102,21 @@
def get_url_dict(url):
- scheme, netloc, path, params, query, fragment = compat.urlparse.urlparse(url)
- if ":" in netloc:
- hostname, port = netloc.split(":")
- else:
- hostname, port = (netloc, None)
+ parse_result = compat.urlparse.urlparse(url)
+
url_dict = {
"full": encoding.keyword_field(url),
- "protocol": scheme + ":",
- "hostname": encoding.keyword_field(hostname),
- "pathname": encoding.keyword_field(path),
+ "protocol": parse_result.scheme + ":",
+ "hostname": encoding.keyword_field(parse_result.hostname),
+ "pathname": encoding.keyword_field(parse_result.path),
}
+
+ port = None if parse_result.port is None else str(parse_result.port)
+
if port:
url_dict["port"] = port
- if query:
- url_dict["search"] = encoding.keyword_field("?" + query)
+ if parse_result.query:
+ url_dict["search"] = encoding.keyword_field("?" + parse_result.query)
return url_dict
|
{"golden_diff": "diff --git a/elasticapm/utils/__init__.py b/elasticapm/utils/__init__.py\n--- a/elasticapm/utils/__init__.py\n+++ b/elasticapm/utils/__init__.py\n@@ -102,21 +102,21 @@\n \n \n def get_url_dict(url):\n- scheme, netloc, path, params, query, fragment = compat.urlparse.urlparse(url)\n- if \":\" in netloc:\n- hostname, port = netloc.split(\":\")\n- else:\n- hostname, port = (netloc, None)\n+ parse_result = compat.urlparse.urlparse(url)\n+\n url_dict = {\n \"full\": encoding.keyword_field(url),\n- \"protocol\": scheme + \":\",\n- \"hostname\": encoding.keyword_field(hostname),\n- \"pathname\": encoding.keyword_field(path),\n+ \"protocol\": parse_result.scheme + \":\",\n+ \"hostname\": encoding.keyword_field(parse_result.hostname),\n+ \"pathname\": encoding.keyword_field(parse_result.path),\n }\n+\n+ port = None if parse_result.port is None else str(parse_result.port)\n+\n if port:\n url_dict[\"port\"] = port\n- if query:\n- url_dict[\"search\"] = encoding.keyword_field(\"?\" + query)\n+ if parse_result.query:\n+ url_dict[\"search\"] = encoding.keyword_field(\"?\" + parse_result.query)\n return url_dict\n", "issue": "Support for ipv6 address format\n**Describe the bug**: \r\n\r\nException is thrown when python agent is trying to parse url, that contains ipv6 address.\r\n```\r\nValueError: too many values to unpack (expected 2)\r\n File \"elasticapm/contrib/django/middleware/__init__.py\", line 162, in process_response\r\n \"request\",\r\n File \"elasticapm/traces.py\", line 333, in set_context\r\n data = data()\r\n File \"elasticapm/contrib/django/middleware/__init__.py\", line 160, in <lambda>\r\n request, capture_body=self.client.config.capture_body in (\"all\", \"transactions\")\r\n File \"elasticapm/contrib/django/client.py\", line 137, in get_data_from_request\r\n result[\"url\"] = get_url_dict(url)\r\n File \"elasticapm/utils/__init__.py\", line 84, in get_url_dict\r\n hostname, port = netloc.split(\":\")\r\n```\r\n\r\n**To Reproduce**\r\n\r\nNot sure how can I manually replicate it, but below is the request details that is send to our webapp.\r\n\r\n```\r\ncurl \\\r\n --compressed \\\r\n -H \"Accept: */*\" \\\r\n -H \"Accept-Encoding: gzip, deflate\" \\\r\n -H \"Connection: close\" \\\r\n -H \"Host: [::ffff:a9fe:a9fe]:80\" \\\r\n -H \"User-Agent: AWS Security Scanner\" \\\r\n -H \"X-Forwarded-For: 44.224.22.196, 10.255.0.2\" \\\r\n -H \"X-Forwarded-Port: 443\" \\\r\n -H \"X-Forwarded-Proto: https\" \\\r\n -H \"X-Real-Ip: 10.255.0.2\" \\\r\n \"http://[::ffff:a9fe:a9fe]/latest/dynamic/instance-identity/document\"\r\n``` \r\n\r\n**Expected behavior**:\r\n\r\nThe url containing hostname in ipv6 address, should be parsed correctly and python agent should successfully process request.\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux\r\n- Python version: 3.5\r\n- Framework and version [e.g. Django 2.1]: 2.1\r\n- APM Server version: 6.7.1\r\n- Agent version: 5.3.0\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\nimport base64\nimport os\nimport re\nfrom functools import partial\n\nfrom elasticapm.conf import constants\nfrom elasticapm.utils import compat, encoding\n\ntry:\n from functools import partialmethod\n\n partial_types = (partial, partialmethod)\nexcept ImportError:\n # Python 2\n partial_types = (partial,)\n\n\ndefault_ports = {\"https\": 443, \"http\": 80, \"postgresql\": 5432}\n\n\ndef varmap(func, var, context=None, name=None):\n \"\"\"\n Executes ``func(key_name, value)`` on all values,\n recursively discovering dict and list scoped\n values.\n \"\"\"\n if context is None:\n context = set()\n objid = id(var)\n if objid in context:\n return func(name, \"<...>\")\n context.add(objid)\n if isinstance(var, dict):\n ret = func(name, dict((k, varmap(func, v, context, k)) for k, v in compat.iteritems(var)))\n elif isinstance(var, (list, tuple)):\n ret = func(name, [varmap(func, f, context, name) for f in var])\n else:\n ret = func(name, var)\n context.remove(objid)\n return ret\n\n\ndef get_name_from_func(func):\n # partials don't have `__module__` or `__name__`, so we use the values from the \"inner\" function\n if isinstance(func, partial_types):\n return \"partial({})\".format(get_name_from_func(func.func))\n elif hasattr(func, \"_partialmethod\") and hasattr(func._partialmethod, \"func\"):\n return \"partial({})\".format(get_name_from_func(func._partialmethod.func))\n\n module = func.__module__\n\n if hasattr(func, \"__name__\"):\n view_name = func.__name__\n else: # Fall back if there's no __name__\n view_name = func.__class__.__name__\n\n return \"{0}.{1}\".format(module, view_name)\n\n\ndef build_name_with_http_method_prefix(name, request):\n return \" \".join((request.method, name)) if name else name\n\n\ndef is_master_process():\n # currently only recognizes uwsgi master process\n try:\n import uwsgi\n\n return os.getpid() == uwsgi.masterpid()\n except ImportError:\n return False\n\n\ndef get_url_dict(url):\n scheme, netloc, path, params, query, fragment = compat.urlparse.urlparse(url)\n if \":\" in netloc:\n hostname, port = netloc.split(\":\")\n else:\n hostname, port = (netloc, None)\n url_dict = {\n \"full\": encoding.keyword_field(url),\n \"protocol\": scheme + \":\",\n \"hostname\": encoding.keyword_field(hostname),\n \"pathname\": encoding.keyword_field(path),\n }\n if port:\n url_dict[\"port\"] = port\n if query:\n url_dict[\"search\"] = encoding.keyword_field(\"?\" + query)\n return url_dict\n\n\ndef sanitize_url(url):\n if \"@\" not in url:\n return url\n parts = compat.urlparse.urlparse(url)\n return url.replace(\"%s:%s\" % (parts.username, parts.password), \"%s:%s\" % (parts.username, constants.MASK))\n\n\ndef read_pem_file(file_obj):\n cert = b\"\"\n for line in file_obj:\n if line.startswith(b\"-----BEGIN CERTIFICATE-----\"):\n break\n for line in file_obj:\n if not line.startswith(b\"-----END CERTIFICATE-----\"):\n cert += line.strip()\n return base64.b64decode(cert)\n\n\ndef starmatch_to_regex(pattern):\n i, n = 0, len(pattern)\n res = []\n while i < n:\n c = pattern[i]\n i = i + 1\n if c == \"*\":\n res.append(\".*\")\n else:\n res.append(re.escape(c))\n return re.compile(r\"(?:%s)\\Z\" % \"\".join(res), re.IGNORECASE | re.DOTALL)\n", "path": "elasticapm/utils/__init__.py"}]}
| 2,626 | 304 |
gh_patches_debug_33778
|
rasdani/github-patches
|
git_diff
|
praw-dev__praw-1957
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs: Font color of method names is unreasonably white on a white background when using dark theme
### Describe the Documentation Issue
Hey Praw maintainers, thanks for the great work.
I'm about to use this API and I'm really happy with what I've found so far.
The only sad part is I'll have to read the documentation on light theme. This is because of the issue in the title, pictured below, or [directly in the site but turn on **dark mode**](https://praw.readthedocs.io/en/stable/code_overview/reddit_instance.html#praw.Reddit.request):

### Attributes
- [X] Yes
### Location of the issue
https://praw.readthedocs.io/en/stable/code_overview/reddit_instance.html#praw.Reddit.request
### What did you expect to see?
method names a bit easier to read
### What did you actually see?
method names hard to read
### Proposed Fix
Gotta be a code color somewhere or a css rule to fix it
### Operating System/Web Browser
_No response_
### Anything else?
_No response_
</issue>
<code>
[start of setup.py]
1 """praw setup.py"""
2
3 import re
4 from codecs import open
5 from os import path
6
7 from setuptools import find_packages, setup
8
9 PACKAGE_NAME = "praw"
10 HERE = path.abspath(path.dirname(__file__))
11 with open(path.join(HERE, "README.rst"), encoding="utf-8") as fp:
12 README = fp.read()
13 with open(path.join(HERE, PACKAGE_NAME, "const.py"), encoding="utf-8") as fp:
14 VERSION = re.search('__version__ = "([^"]+)"', fp.read()).group(1)
15
16 extras = {
17 "ci": ["coveralls"],
18 "dev": ["packaging"],
19 "lint": ["pre-commit"],
20 "readthedocs": ["sphinx", "sphinx-rtd-dark-mode", "sphinx_rtd_theme"],
21 "test": [
22 "betamax >=0.8, <0.9",
23 "betamax-matchers >=0.3.0, <0.5",
24 "pytest >=2.7.3",
25 "requests >=2.20.1, <3",
26 "urllib3 ==1.26.*, <2",
27 ],
28 }
29 extras["lint"] += extras["readthedocs"]
30 extras["dev"] += extras["lint"] + extras["test"]
31
32 setup(
33 name=PACKAGE_NAME,
34 author="Bryce Boe",
35 author_email="[email protected]",
36 python_requires="~=3.7",
37 classifiers=[
38 "Development Status :: 5 - Production/Stable",
39 "Environment :: Console",
40 "Intended Audience :: Developers",
41 "License :: OSI Approved :: BSD License",
42 "Natural Language :: English",
43 "Operating System :: OS Independent",
44 "Programming Language :: Python",
45 "Programming Language :: Python :: 3",
46 "Programming Language :: Python :: 3.7",
47 "Programming Language :: Python :: 3.8",
48 "Programming Language :: Python :: 3.9",
49 "Programming Language :: Python :: 3.10",
50 "Programming Language :: Python :: 3.11",
51 "Topic :: Utilities",
52 ],
53 description=(
54 'PRAW, an acronym for "Python Reddit API Wrapper", is a python package that'
55 " allows for simple access to Reddit's API."
56 ),
57 extras_require=extras,
58 install_requires=[
59 "prawcore >=2.1, <3",
60 "update_checker >=0.18",
61 "websocket-client >=0.54.0",
62 ],
63 keywords="reddit api wrapper",
64 license="Simplified BSD License",
65 long_description=README,
66 package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini", "images/*.png"]},
67 packages=find_packages(exclude=["tests", "tests.*", "tools", "tools.*"]),
68 project_urls={
69 "Change Log": "https://praw.readthedocs.io/en/latest/package_info/change_log.html",
70 "Documentation": "https://praw.readthedocs.io/",
71 "Issue Tracker": "https://github.com/praw-dev/praw/issues",
72 "Source Code": "https://github.com/praw-dev/praw",
73 },
74 version=VERSION,
75 )
76
[end of setup.py]
[start of docs/conf.py]
1 import os
2 import sys
3 from datetime import datetime
4
5 # Do not touch these. They use the local PRAW over the global PRAW.
6 sys.path.insert(0, ".")
7 sys.path.insert(1, "..")
8
9 from praw import __version__ # noqa: E402
10
11 copyright = datetime.today().strftime("%Y, Bryce Boe")
12 exclude_patterns = ["_build"]
13 extensions = [
14 "sphinx.ext.autodoc",
15 "sphinx.ext.intersphinx",
16 "sphinx_rtd_dark_mode",
17 "sphinx_rtd_theme",
18 ]
19 html_static_path = ["_static"]
20 html_theme = "sphinx_rtd_theme"
21 html_theme_options = {"collapse_navigation": True}
22 htmlhelp_basename = "PRAW"
23 intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
24 master_doc = "index"
25 nitpick_ignore = [
26 ("py:class", "IO"),
27 ("py:class", "prawcore.requestor.Requestor"),
28 ("py:class", "praw.models.redditors.PartialRedditor"),
29 ]
30 nitpicky = True
31 project = "PRAW"
32 pygments_style = "sphinx"
33 release = __version__
34 source_suffix = ".rst"
35 suppress_warnings = ["image.nonlocal_uri"]
36 version = ".".join(__version__.split(".", 2)[:2])
37
38 # Use RTD theme locally
39 if not os.environ.get("READTHEDOCS"):
40 import sphinx_rtd_theme
41
42 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
43
44
45 def skip(app, what, name, obj, skip, options):
46 if name in {
47 "__call__",
48 "__contains__",
49 "__getitem__",
50 "__init__",
51 "__iter__",
52 "__len__",
53 }:
54 return False
55 return skip
56
57
58 def setup(app):
59 app.connect("autodoc-skip-member", skip)
60 app.add_css_file("theme_override.css")
61
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,4 +1,3 @@
-import os
import sys
from datetime import datetime
@@ -13,12 +12,8 @@
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
- "sphinx_rtd_dark_mode",
- "sphinx_rtd_theme",
]
-html_static_path = ["_static"]
-html_theme = "sphinx_rtd_theme"
-html_theme_options = {"collapse_navigation": True}
+html_theme = "furo"
htmlhelp_basename = "PRAW"
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
master_doc = "index"
@@ -35,12 +30,6 @@
suppress_warnings = ["image.nonlocal_uri"]
version = ".".join(__version__.split(".", 2)[:2])
-# Use RTD theme locally
-if not os.environ.get("READTHEDOCS"):
- import sphinx_rtd_theme
-
- html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
-
def skip(app, what, name, obj, skip, options):
if name in {
@@ -57,4 +46,3 @@
def setup(app):
app.connect("autodoc-skip-member", skip)
- app.add_css_file("theme_override.css")
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@
"ci": ["coveralls"],
"dev": ["packaging"],
"lint": ["pre-commit"],
- "readthedocs": ["sphinx", "sphinx-rtd-dark-mode", "sphinx_rtd_theme"],
+ "readthedocs": ["furo", "sphinx"],
"test": [
"betamax >=0.8, <0.9",
"betamax-matchers >=0.3.0, <0.5",
@@ -51,7 +51,7 @@
"Topic :: Utilities",
],
description=(
- 'PRAW, an acronym for "Python Reddit API Wrapper", is a python package that'
+ 'PRAW, an acronym for "Python Reddit API Wrapper", is a Python package that'
" allows for simple access to Reddit's API."
),
extras_require=extras,
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -1,4 +1,3 @@\n-import os\n import sys\n from datetime import datetime\n \n@@ -13,12 +12,8 @@\n extensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n- \"sphinx_rtd_dark_mode\",\n- \"sphinx_rtd_theme\",\n ]\n-html_static_path = [\"_static\"]\n-html_theme = \"sphinx_rtd_theme\"\n-html_theme_options = {\"collapse_navigation\": True}\n+html_theme = \"furo\"\n htmlhelp_basename = \"PRAW\"\n intersphinx_mapping = {\"python\": (\"https://docs.python.org/3\", None)}\n master_doc = \"index\"\n@@ -35,12 +30,6 @@\n suppress_warnings = [\"image.nonlocal_uri\"]\n version = \".\".join(__version__.split(\".\", 2)[:2])\n \n-# Use RTD theme locally\n-if not os.environ.get(\"READTHEDOCS\"):\n- import sphinx_rtd_theme\n-\n- html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n-\n \n def skip(app, what, name, obj, skip, options):\n if name in {\n@@ -57,4 +46,3 @@\n \n def setup(app):\n app.connect(\"autodoc-skip-member\", skip)\n- app.add_css_file(\"theme_override.css\")\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -17,7 +17,7 @@\n \"ci\": [\"coveralls\"],\n \"dev\": [\"packaging\"],\n \"lint\": [\"pre-commit\"],\n- \"readthedocs\": [\"sphinx\", \"sphinx-rtd-dark-mode\", \"sphinx_rtd_theme\"],\n+ \"readthedocs\": [\"furo\", \"sphinx\"],\n \"test\": [\n \"betamax >=0.8, <0.9\",\n \"betamax-matchers >=0.3.0, <0.5\",\n@@ -51,7 +51,7 @@\n \"Topic :: Utilities\",\n ],\n description=(\n- 'PRAW, an acronym for \"Python Reddit API Wrapper\", is a python package that'\n+ 'PRAW, an acronym for \"Python Reddit API Wrapper\", is a Python package that'\n \" allows for simple access to Reddit's API.\"\n ),\n extras_require=extras,\n", "issue": "Docs: Font color of method names is unreasonably white on a white background when using dark theme\n### Describe the Documentation Issue\n\nHey Praw maintainers, thanks for the great work.\r\nI'm about to use this API and I'm really happy with what I've found so far.\r\nThe only sad part is I'll have to read the documentation on light theme. This is because of the issue in the title, pictured below, or [directly in the site but turn on **dark mode**](https://praw.readthedocs.io/en/stable/code_overview/reddit_instance.html#praw.Reddit.request):\r\n\n\n### Attributes\n\n- [X] Yes\n\n### Location of the issue\n\nhttps://praw.readthedocs.io/en/stable/code_overview/reddit_instance.html#praw.Reddit.request\n\n### What did you expect to see?\n\nmethod names a bit easier to read\n\n### What did you actually see?\n\nmethod names hard to read\n\n### Proposed Fix\n\nGotta be a code color somewhere or a css rule to fix it\n\n### Operating System/Web Browser\n\n_No response_\n\n### Anything else?\n\n_No response_\n", "before_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nPACKAGE_NAME = \"praw\"\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, \"README.rst\"), encoding=\"utf-8\") as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, \"const.py\"), encoding=\"utf-8\") as fp:\n VERSION = re.search('__version__ = \"([^\"]+)\"', fp.read()).group(1)\n\nextras = {\n \"ci\": [\"coveralls\"],\n \"dev\": [\"packaging\"],\n \"lint\": [\"pre-commit\"],\n \"readthedocs\": [\"sphinx\", \"sphinx-rtd-dark-mode\", \"sphinx_rtd_theme\"],\n \"test\": [\n \"betamax >=0.8, <0.9\",\n \"betamax-matchers >=0.3.0, <0.5\",\n \"pytest >=2.7.3\",\n \"requests >=2.20.1, <3\",\n \"urllib3 ==1.26.*, <2\",\n ],\n}\nextras[\"lint\"] += extras[\"readthedocs\"]\nextras[\"dev\"] += extras[\"lint\"] + extras[\"test\"]\n\nsetup(\n name=PACKAGE_NAME,\n author=\"Bryce Boe\",\n author_email=\"[email protected]\",\n python_requires=\"~=3.7\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Utilities\",\n ],\n description=(\n 'PRAW, an acronym for \"Python Reddit API Wrapper\", is a python package that'\n \" allows for simple access to Reddit's API.\"\n ),\n extras_require=extras,\n install_requires=[\n \"prawcore >=2.1, <3\",\n \"update_checker >=0.18\",\n \"websocket-client >=0.54.0\",\n ],\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\", \"images/*.png\"]},\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n project_urls={\n \"Change Log\": \"https://praw.readthedocs.io/en/latest/package_info/change_log.html\",\n \"Documentation\": \"https://praw.readthedocs.io/\",\n \"Issue Tracker\": \"https://github.com/praw-dev/praw/issues\",\n \"Source Code\": \"https://github.com/praw-dev/praw\",\n },\n version=VERSION,\n)\n", "path": "setup.py"}, {"content": "import os\nimport sys\nfrom datetime import datetime\n\n# Do not touch these. They use the local PRAW over the global PRAW.\nsys.path.insert(0, \".\")\nsys.path.insert(1, \"..\")\n\nfrom praw import __version__ # noqa: E402\n\ncopyright = datetime.today().strftime(\"%Y, Bryce Boe\")\nexclude_patterns = [\"_build\"]\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx_rtd_dark_mode\",\n \"sphinx_rtd_theme\",\n]\nhtml_static_path = [\"_static\"]\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_options = {\"collapse_navigation\": True}\nhtmlhelp_basename = \"PRAW\"\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3\", None)}\nmaster_doc = \"index\"\nnitpick_ignore = [\n (\"py:class\", \"IO\"),\n (\"py:class\", \"prawcore.requestor.Requestor\"),\n (\"py:class\", \"praw.models.redditors.PartialRedditor\"),\n]\nnitpicky = True\nproject = \"PRAW\"\npygments_style = \"sphinx\"\nrelease = __version__\nsource_suffix = \".rst\"\nsuppress_warnings = [\"image.nonlocal_uri\"]\nversion = \".\".join(__version__.split(\".\", 2)[:2])\n\n# Use RTD theme locally\nif not os.environ.get(\"READTHEDOCS\"):\n import sphinx_rtd_theme\n\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\ndef skip(app, what, name, obj, skip, options):\n if name in {\n \"__call__\",\n \"__contains__\",\n \"__getitem__\",\n \"__init__\",\n \"__iter__\",\n \"__len__\",\n }:\n return False\n return skip\n\n\ndef setup(app):\n app.connect(\"autodoc-skip-member\", skip)\n app.add_css_file(\"theme_override.css\")\n", "path": "docs/conf.py"}]}
| 2,211 | 533 |
gh_patches_debug_7880
|
rasdani/github-patches
|
git_diff
|
locustio__locust-841
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't rely on obsolete msgpack-python
msgpack-python looks obsolete -> https://pypi.org/project/msgpack-python/
"This package is deprecated. Install msgpack instead."
but msgpack doesn't provide pythonegg(msgpack-python).
Please consider switching to msgpack directly instead.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 import ast
3 import os
4 import re
5
6 from setuptools import find_packages, setup
7
8 # parse version from locust/__init__.py
9 _version_re = re.compile(r'__version__\s+=\s+(.*)')
10 _init_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), "locust", "__init__.py")
11 with open(_init_file, 'rb') as f:
12 version = str(ast.literal_eval(_version_re.search(
13 f.read().decode('utf-8')).group(1)))
14
15 setup(
16 name='locustio',
17 version=version,
18 description="Website load testing framework",
19 long_description="""Locust is a python utility for doing easy, distributed load testing of a web site""",
20 classifiers=[
21 "Topic :: Software Development :: Testing :: Traffic Generation",
22 "Development Status :: 4 - Beta",
23 "License :: OSI Approved :: MIT License",
24 "Operating System :: OS Independent",
25 "Programming Language :: Python",
26 "Programming Language :: Python :: 2",
27 "Programming Language :: Python :: 2.7",
28 "Programming Language :: Python :: 3",
29 "Programming Language :: Python :: 3.4",
30 "Programming Language :: Python :: 3.5",
31 "Programming Language :: Python :: 3.6",
32 "Intended Audience :: Developers",
33 "Intended Audience :: System Administrators",
34 ],
35 keywords='',
36 author='Jonatan Heyman, Carl Bystrom, Joakim Hamrén, Hugo Heyman',
37 author_email='',
38 url='https://locust.io/',
39 license='MIT',
40 packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
41 include_package_data=True,
42 zip_safe=False,
43 install_requires=["gevent>=1.2.2", "flask>=0.10.1", "requests>=2.9.1", "msgpack-python>=0.4.2", "six>=1.10.0", "pyzmq>=16.0.2"],
44 test_suite="locust.test",
45 tests_require=['mock'],
46 entry_points={
47 'console_scripts': [
48 'locust = locust.main:main',
49 ]
50 },
51 )
52
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -40,7 +40,7 @@
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
- install_requires=["gevent>=1.2.2", "flask>=0.10.1", "requests>=2.9.1", "msgpack-python>=0.4.2", "six>=1.10.0", "pyzmq>=16.0.2"],
+ install_requires=["gevent>=1.2.2", "flask>=0.10.1", "requests>=2.9.1", "msgpack>=0.4.2", "six>=1.10.0", "pyzmq>=16.0.2"],
test_suite="locust.test",
tests_require=['mock'],
entry_points={
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -40,7 +40,7 @@\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=False,\n- install_requires=[\"gevent>=1.2.2\", \"flask>=0.10.1\", \"requests>=2.9.1\", \"msgpack-python>=0.4.2\", \"six>=1.10.0\", \"pyzmq>=16.0.2\"],\n+ install_requires=[\"gevent>=1.2.2\", \"flask>=0.10.1\", \"requests>=2.9.1\", \"msgpack>=0.4.2\", \"six>=1.10.0\", \"pyzmq>=16.0.2\"],\n test_suite=\"locust.test\",\n tests_require=['mock'],\n entry_points={\n", "issue": "Don't rely on obsolete msgpack-python\n\r\nmsgpack-python looks obsolete -> https://pypi.org/project/msgpack-python/\r\n\"This package is deprecated. Install msgpack instead.\"\r\n\r\nbut msgpack doesn't provide pythonegg(msgpack-python).\r\n\r\nPlease consider switching to msgpack directly instead.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\n\nfrom setuptools import find_packages, setup\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n_init_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"locust\", \"__init__.py\")\nwith open(_init_file, 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\nsetup(\n name='locustio',\n version=version,\n description=\"Website load testing framework\",\n long_description=\"\"\"Locust is a python utility for doing easy, distributed load testing of a web site\"\"\",\n classifiers=[\n \"Topic :: Software Development :: Testing :: Traffic Generation\",\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n ],\n keywords='',\n author='Jonatan Heyman, Carl Bystrom, Joakim Hamr\u00e9n, Hugo Heyman',\n author_email='',\n url='https://locust.io/',\n license='MIT',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\"gevent>=1.2.2\", \"flask>=0.10.1\", \"requests>=2.9.1\", \"msgpack-python>=0.4.2\", \"six>=1.10.0\", \"pyzmq>=16.0.2\"],\n test_suite=\"locust.test\",\n tests_require=['mock'],\n entry_points={\n 'console_scripts': [\n 'locust = locust.main:main',\n ]\n },\n)\n", "path": "setup.py"}]}
| 1,167 | 210 |
gh_patches_debug_28502
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-1560
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Colon in CTF name breaks emails
This is because of:
https://tools.ietf.org/html/rfc5322#section-2.2
This can probably be fixed with `"HE:tech" <[email protected]>`.
</issue>
<code>
[start of CTFd/utils/email/smtp.py]
1 import smtplib
2 from email.message import EmailMessage
3 from socket import timeout
4
5 from CTFd.utils import get_app_config, get_config
6
7
8 def get_smtp(host, port, username=None, password=None, TLS=None, SSL=None, auth=None):
9 if SSL is None:
10 smtp = smtplib.SMTP(host, port, timeout=3)
11 else:
12 smtp = smtplib.SMTP_SSL(host, port, timeout=3)
13
14 if TLS:
15 smtp.starttls()
16
17 if auth:
18 smtp.login(username, password)
19 return smtp
20
21
22 def sendmail(addr, text, subject):
23 ctf_name = get_config("ctf_name")
24 mailfrom_addr = get_config("mailfrom_addr") or get_app_config("MAILFROM_ADDR")
25 mailfrom_addr = "{} <{}>".format(ctf_name, mailfrom_addr)
26
27 data = {
28 "host": get_config("mail_server") or get_app_config("MAIL_SERVER"),
29 "port": int(get_config("mail_port") or get_app_config("MAIL_PORT")),
30 }
31 username = get_config("mail_username") or get_app_config("MAIL_USERNAME")
32 password = get_config("mail_password") or get_app_config("MAIL_PASSWORD")
33 TLS = get_config("mail_tls") or get_app_config("MAIL_TLS")
34 SSL = get_config("mail_ssl") or get_app_config("MAIL_SSL")
35 auth = get_config("mail_useauth") or get_app_config("MAIL_USEAUTH")
36
37 if username:
38 data["username"] = username
39 if password:
40 data["password"] = password
41 if TLS:
42 data["TLS"] = TLS
43 if SSL:
44 data["SSL"] = SSL
45 if auth:
46 data["auth"] = auth
47
48 try:
49 smtp = get_smtp(**data)
50
51 msg = EmailMessage()
52 msg.set_content(text)
53
54 msg["Subject"] = subject
55 msg["From"] = mailfrom_addr
56 msg["To"] = addr
57
58 smtp.send_message(msg)
59
60 smtp.quit()
61 return True, "Email sent"
62 except smtplib.SMTPException as e:
63 return False, str(e)
64 except timeout:
65 return False, "SMTP server connection timed out"
66 except Exception as e:
67 return False, str(e)
68
[end of CTFd/utils/email/smtp.py]
[start of CTFd/utils/email/mailgun.py]
1 import requests
2
3 from CTFd.utils import get_app_config, get_config
4
5
6 def sendmail(addr, text, subject):
7 ctf_name = get_config("ctf_name")
8 mailfrom_addr = get_config("mailfrom_addr") or get_app_config("MAILFROM_ADDR")
9 mailfrom_addr = "{} <{}>".format(ctf_name, mailfrom_addr)
10
11 mailgun_base_url = get_config("mailgun_base_url") or get_app_config(
12 "MAILGUN_BASE_URL"
13 )
14 mailgun_api_key = get_config("mailgun_api_key") or get_app_config("MAILGUN_API_KEY")
15 try:
16 r = requests.post(
17 mailgun_base_url + "/messages",
18 auth=("api", mailgun_api_key),
19 data={
20 "from": mailfrom_addr,
21 "to": [addr],
22 "subject": subject,
23 "text": text,
24 },
25 timeout=1.0,
26 )
27 except requests.RequestException as e:
28 return (
29 False,
30 "{error} exception occured while handling your request".format(
31 error=type(e).__name__
32 ),
33 )
34
35 if r.status_code == 200:
36 return True, "Email sent"
37 else:
38 return False, "Mailgun settings are incorrect"
39
[end of CTFd/utils/email/mailgun.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/CTFd/utils/email/mailgun.py b/CTFd/utils/email/mailgun.py
--- a/CTFd/utils/email/mailgun.py
+++ b/CTFd/utils/email/mailgun.py
@@ -1,3 +1,5 @@
+from email.utils import formataddr
+
import requests
from CTFd.utils import get_app_config, get_config
@@ -6,7 +8,7 @@
def sendmail(addr, text, subject):
ctf_name = get_config("ctf_name")
mailfrom_addr = get_config("mailfrom_addr") or get_app_config("MAILFROM_ADDR")
- mailfrom_addr = "{} <{}>".format(ctf_name, mailfrom_addr)
+ mailfrom_addr = formataddr((ctf_name, mailfrom_addr))
mailgun_base_url = get_config("mailgun_base_url") or get_app_config(
"MAILGUN_BASE_URL"
diff --git a/CTFd/utils/email/smtp.py b/CTFd/utils/email/smtp.py
--- a/CTFd/utils/email/smtp.py
+++ b/CTFd/utils/email/smtp.py
@@ -1,5 +1,6 @@
import smtplib
from email.message import EmailMessage
+from email.utils import formataddr
from socket import timeout
from CTFd.utils import get_app_config, get_config
@@ -22,7 +23,7 @@
def sendmail(addr, text, subject):
ctf_name = get_config("ctf_name")
mailfrom_addr = get_config("mailfrom_addr") or get_app_config("MAILFROM_ADDR")
- mailfrom_addr = "{} <{}>".format(ctf_name, mailfrom_addr)
+ mailfrom_addr = formataddr((ctf_name, mailfrom_addr))
data = {
"host": get_config("mail_server") or get_app_config("MAIL_SERVER"),
|
{"golden_diff": "diff --git a/CTFd/utils/email/mailgun.py b/CTFd/utils/email/mailgun.py\n--- a/CTFd/utils/email/mailgun.py\n+++ b/CTFd/utils/email/mailgun.py\n@@ -1,3 +1,5 @@\n+from email.utils import formataddr\n+\n import requests\n \n from CTFd.utils import get_app_config, get_config\n@@ -6,7 +8,7 @@\n def sendmail(addr, text, subject):\n ctf_name = get_config(\"ctf_name\")\n mailfrom_addr = get_config(\"mailfrom_addr\") or get_app_config(\"MAILFROM_ADDR\")\n- mailfrom_addr = \"{} <{}>\".format(ctf_name, mailfrom_addr)\n+ mailfrom_addr = formataddr((ctf_name, mailfrom_addr))\n \n mailgun_base_url = get_config(\"mailgun_base_url\") or get_app_config(\n \"MAILGUN_BASE_URL\"\ndiff --git a/CTFd/utils/email/smtp.py b/CTFd/utils/email/smtp.py\n--- a/CTFd/utils/email/smtp.py\n+++ b/CTFd/utils/email/smtp.py\n@@ -1,5 +1,6 @@\n import smtplib\n from email.message import EmailMessage\n+from email.utils import formataddr\n from socket import timeout\n \n from CTFd.utils import get_app_config, get_config\n@@ -22,7 +23,7 @@\n def sendmail(addr, text, subject):\n ctf_name = get_config(\"ctf_name\")\n mailfrom_addr = get_config(\"mailfrom_addr\") or get_app_config(\"MAILFROM_ADDR\")\n- mailfrom_addr = \"{} <{}>\".format(ctf_name, mailfrom_addr)\n+ mailfrom_addr = formataddr((ctf_name, mailfrom_addr))\n \n data = {\n \"host\": get_config(\"mail_server\") or get_app_config(\"MAIL_SERVER\"),\n", "issue": "Colon in CTF name breaks emails\nThis is because of:\r\n\r\nhttps://tools.ietf.org/html/rfc5322#section-2.2\r\n\r\nThis can probably be fixed with `\"HE:tech\" <[email protected]>`.\n", "before_files": [{"content": "import smtplib\nfrom email.message import EmailMessage\nfrom socket import timeout\n\nfrom CTFd.utils import get_app_config, get_config\n\n\ndef get_smtp(host, port, username=None, password=None, TLS=None, SSL=None, auth=None):\n if SSL is None:\n smtp = smtplib.SMTP(host, port, timeout=3)\n else:\n smtp = smtplib.SMTP_SSL(host, port, timeout=3)\n\n if TLS:\n smtp.starttls()\n\n if auth:\n smtp.login(username, password)\n return smtp\n\n\ndef sendmail(addr, text, subject):\n ctf_name = get_config(\"ctf_name\")\n mailfrom_addr = get_config(\"mailfrom_addr\") or get_app_config(\"MAILFROM_ADDR\")\n mailfrom_addr = \"{} <{}>\".format(ctf_name, mailfrom_addr)\n\n data = {\n \"host\": get_config(\"mail_server\") or get_app_config(\"MAIL_SERVER\"),\n \"port\": int(get_config(\"mail_port\") or get_app_config(\"MAIL_PORT\")),\n }\n username = get_config(\"mail_username\") or get_app_config(\"MAIL_USERNAME\")\n password = get_config(\"mail_password\") or get_app_config(\"MAIL_PASSWORD\")\n TLS = get_config(\"mail_tls\") or get_app_config(\"MAIL_TLS\")\n SSL = get_config(\"mail_ssl\") or get_app_config(\"MAIL_SSL\")\n auth = get_config(\"mail_useauth\") or get_app_config(\"MAIL_USEAUTH\")\n\n if username:\n data[\"username\"] = username\n if password:\n data[\"password\"] = password\n if TLS:\n data[\"TLS\"] = TLS\n if SSL:\n data[\"SSL\"] = SSL\n if auth:\n data[\"auth\"] = auth\n\n try:\n smtp = get_smtp(**data)\n\n msg = EmailMessage()\n msg.set_content(text)\n\n msg[\"Subject\"] = subject\n msg[\"From\"] = mailfrom_addr\n msg[\"To\"] = addr\n\n smtp.send_message(msg)\n\n smtp.quit()\n return True, \"Email sent\"\n except smtplib.SMTPException as e:\n return False, str(e)\n except timeout:\n return False, \"SMTP server connection timed out\"\n except Exception as e:\n return False, str(e)\n", "path": "CTFd/utils/email/smtp.py"}, {"content": "import requests\n\nfrom CTFd.utils import get_app_config, get_config\n\n\ndef sendmail(addr, text, subject):\n ctf_name = get_config(\"ctf_name\")\n mailfrom_addr = get_config(\"mailfrom_addr\") or get_app_config(\"MAILFROM_ADDR\")\n mailfrom_addr = \"{} <{}>\".format(ctf_name, mailfrom_addr)\n\n mailgun_base_url = get_config(\"mailgun_base_url\") or get_app_config(\n \"MAILGUN_BASE_URL\"\n )\n mailgun_api_key = get_config(\"mailgun_api_key\") or get_app_config(\"MAILGUN_API_KEY\")\n try:\n r = requests.post(\n mailgun_base_url + \"/messages\",\n auth=(\"api\", mailgun_api_key),\n data={\n \"from\": mailfrom_addr,\n \"to\": [addr],\n \"subject\": subject,\n \"text\": text,\n },\n timeout=1.0,\n )\n except requests.RequestException as e:\n return (\n False,\n \"{error} exception occured while handling your request\".format(\n error=type(e).__name__\n ),\n )\n\n if r.status_code == 200:\n return True, \"Email sent\"\n else:\n return False, \"Mailgun settings are incorrect\"\n", "path": "CTFd/utils/email/mailgun.py"}]}
| 1,578 | 399 |
gh_patches_debug_32865
|
rasdani/github-patches
|
git_diff
|
ycm-core__ycmd-448
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
replace `builtins` with `future.builtins`, see Valloric/YouCompleteMe/issues/2024
Basically, I just run:
`find . -type f -iname '*.py' -print0 | xargs -0 -P 2 -n 1 sed -i '' -e 's/from builtins/from future.builtins/g'`
This patch fixed YouCompleteMe on my machine (Mac OS 10.11.3).
> VIM - Vi IMproved 7.4 (2013 Aug 10, compiled Mar 30 2016 11:47:02)
> MacOS X (unix) version
> Included patches: 1-1655
> Compiled by Homebrew
> Huge version without GUI. Features included (+) or not (-):
> +acl +farsi +mouse_netterm +tag_binary
> +arabic +file_in_path +mouse_sgr +tag_old_static
> +autocmd +find_in_path -mouse_sysmouse -tag_any_white
> -balloon_eval +float +mouse_urxvt -tcl
> -browse +folding +mouse_xterm +terminfo
> ++builtin_terms -footer +multi_byte +termresponse
> +byte_offset +fork() +multi_lang +textobjects
> +channel -gettext -mzscheme +timers
> +cindent -hangul_input +netbeans_intg +title
> -clientserver +iconv +packages -toolbar
> +clipboard +insert_expand +path_extra +user_commands
> +cmdline_compl +job +perl +vertsplit
> +cmdline_hist +jumplist +persistent_undo +virtualedit
> +cmdline_info +keymap +postscript +visual
> +comments +langmap +printer +visualextra
> +conceal +libcall +profile +viminfo
> +cryptv +linebreak +python +vreplace
> +cscope +lispindent -python3 +wildignore
> +cursorbind +listcmds +quickfix +wildmenu
> +cursorshape +localmap +reltime +windows
> +dialog_con -lua +rightleft +writebackup
> +diff +menu +ruby -X11
> +digraphs +mksession +scrollbind -xfontset
> -dnd +modify_fname +signs -xim
> -ebcdic +mouse +smartindent -xsmp
> +emacs_tags -mouseshape +startuptime -xterm_clipboard
> +eval +mouse_dec +statusline -xterm_save
> +ex_extra -mouse_gpm -sun_workshop -xpm
> +extra_search -mouse_jsbterm +syntax
> system vimrc file: "$VIM/vimrc"
> user vimrc file: "$HOME/.vimrc"
> 2nd user vimrc file: "~/.vim/vimrc"
> user exrc file: "$HOME/.exrc"
> fall-back for $VIM: "/usr/local/share/vim"
> Compilation: /usr/bin/clang -c -I. -Iproto -DHAVE_CONFIG_H -F/usr/local/Frameworks -DMACOS_X_UNIX -Os -w -pipe -march=native -mmacosx-version-min=10.11 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
> Linking: /usr/bin/clang -L. -fstack-protector -L/usr/local/lib -L/usr/local/opt/libyaml/lib -L/usr/local/opt/openssl/lib -L/usr/local/opt/readline/lib -L/usr/local/lib -F/usr/local/Frameworks -Wl,-headerpad_max_install_names -o vim -lm -lncurses -liconv -framework Cocoa -fstack-protector -L/System/Library/Perl/5.18/darwin-thread-multi-2level/CORE -lperl -F/usr/local/Cellar/python/2.7.11/Frameworks -framework Python -lruby.2.3.0 -lobjc -L/usr/local/Cellar/ruby/2.3.0/lib
<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.io/review_button.svg" height="35" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/valloric/ycmd/446)
<!-- Reviewable:end -->
</issue>
<code>
[start of ycmd/server_utils.py]
1 # Copyright (C) 2013 Google Inc.
2 #
3 # This file is part of ycmd.
4 #
5 # ycmd is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # ycmd is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with ycmd. If not, see <http://www.gnu.org/licenses/>.
17
18 from __future__ import unicode_literals
19 from __future__ import print_function
20 from __future__ import division
21 from __future__ import absolute_import
22 # No other imports from `future` because this module is loaded before we have
23 # put our submodules in sys.path
24
25 import sys
26 import os
27 import io
28
29 VERSION_FILENAME = 'CORE_VERSION'
30 CORE_NOT_COMPATIBLE_MESSAGE = (
31 'ycmd can\'t run: ycm_core lib too old, PLEASE RECOMPILE'
32 )
33
34 DIR_OF_CURRENT_SCRIPT = os.path.dirname( os.path.abspath( __file__ ) )
35
36
37 def SetUpPythonPath():
38 sys.path.insert( 0, os.path.join( DIR_OF_CURRENT_SCRIPT, '..' ) )
39
40 AddNearestThirdPartyFoldersToSysPath( __file__ )
41
42
43 def ExpectedCoreVersion():
44 filepath = os.path.join( DIR_OF_CURRENT_SCRIPT, '..', VERSION_FILENAME )
45 with io.open( filepath, encoding = 'utf8' ) as f:
46 return int( f.read() )
47
48
49 def CompatibleWithCurrentCoreVersion():
50 import ycm_core
51 try:
52 current_core_version = ycm_core.YcmCoreVersion()
53 except AttributeError:
54 return False
55 return ExpectedCoreVersion() == current_core_version
56
57
58 def AncestorFolders( path ):
59 folder = os.path.normpath( path )
60 while True:
61 parent = os.path.dirname( folder )
62 if parent == folder:
63 break
64 folder = parent
65 yield folder
66
67
68 def PathToNearestThirdPartyFolder( path ):
69 for folder in AncestorFolders( path ):
70 path_to_third_party = os.path.join( folder, 'third_party' )
71 if os.path.isdir( path_to_third_party ):
72 return path_to_third_party
73 return None
74
75
76 def AddNearestThirdPartyFoldersToSysPath( filepath ):
77 path_to_third_party = PathToNearestThirdPartyFolder( filepath )
78 if not path_to_third_party:
79 raise RuntimeError(
80 'No third_party folder found for: {0}'.format( filepath ) )
81
82 # NOTE: Any hacks for loading modules that can't be imported without custom
83 # logic need to be reproduced in run_tests.py as well.
84 for folder in os.listdir( path_to_third_party ):
85 # python-future needs special handling. Not only does it store the modules
86 # under its 'src' folder, but SOME of its modules are only meant to be
87 # accessible under py2, not py3. This is because these modules (like
88 # `queue`) are implementations of modules present in the py3 standard
89 # library. So to work around issues, we place the python-future last on
90 # sys.path so that they can be overriden by the standard library.
91 if folder == 'python-future':
92 folder = os.path.join( folder, 'src' )
93 sys.path.append( os.path.realpath( os.path.join( path_to_third_party,
94 folder ) ) )
95 continue
96 sys.path.insert( 0, os.path.realpath( os.path.join( path_to_third_party,
97 folder ) ) )
98
[end of ycmd/server_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ycmd/server_utils.py b/ycmd/server_utils.py
--- a/ycmd/server_utils.py
+++ b/ycmd/server_utils.py
@@ -25,6 +25,7 @@
import sys
import os
import io
+import re
VERSION_FILENAME = 'CORE_VERSION'
CORE_NOT_COMPATIBLE_MESSAGE = (
@@ -32,6 +33,7 @@
)
DIR_OF_CURRENT_SCRIPT = os.path.dirname( os.path.abspath( __file__ ) )
+DIR_PACKAGES_REGEX = re.compile( '(site|dist)-packages$' )
def SetUpPythonPath():
@@ -86,11 +88,19 @@
# under its 'src' folder, but SOME of its modules are only meant to be
# accessible under py2, not py3. This is because these modules (like
# `queue`) are implementations of modules present in the py3 standard
- # library. So to work around issues, we place the python-future last on
- # sys.path so that they can be overriden by the standard library.
+ # library. Furthermore, we need to be sure that they are not overriden by
+ # already installed packages (for example, the 'builtins' module from
+ # 'pies2overrides' or a different version of 'python-future'). To work
+ # around these issues, we place the python-future just before the first
+ # path ending with 'site-packages' (or 'dist-packages' for Debian-like
+ # distributions) so that its modules can be overridden by the standard
+ # library but not by installed packages.
if folder == 'python-future':
folder = os.path.join( folder, 'src' )
- sys.path.append( os.path.realpath( os.path.join( path_to_third_party,
+ packages_indices = ( sys.path.index( path ) for path in sys.path
+ if DIR_PACKAGES_REGEX.search( path ) )
+ sys.path.insert( next( packages_indices, len( sys.path ) ),
+ os.path.realpath( os.path.join( path_to_third_party,
folder ) ) )
continue
sys.path.insert( 0, os.path.realpath( os.path.join( path_to_third_party,
|
{"golden_diff": "diff --git a/ycmd/server_utils.py b/ycmd/server_utils.py\n--- a/ycmd/server_utils.py\n+++ b/ycmd/server_utils.py\n@@ -25,6 +25,7 @@\n import sys\n import os\n import io\n+import re\n \n VERSION_FILENAME = 'CORE_VERSION'\n CORE_NOT_COMPATIBLE_MESSAGE = (\n@@ -32,6 +33,7 @@\n )\n \n DIR_OF_CURRENT_SCRIPT = os.path.dirname( os.path.abspath( __file__ ) )\n+DIR_PACKAGES_REGEX = re.compile( '(site|dist)-packages$' )\n \n \n def SetUpPythonPath():\n@@ -86,11 +88,19 @@\n # under its 'src' folder, but SOME of its modules are only meant to be\n # accessible under py2, not py3. This is because these modules (like\n # `queue`) are implementations of modules present in the py3 standard\n- # library. So to work around issues, we place the python-future last on\n- # sys.path so that they can be overriden by the standard library.\n+ # library. Furthermore, we need to be sure that they are not overriden by\n+ # already installed packages (for example, the 'builtins' module from\n+ # 'pies2overrides' or a different version of 'python-future'). To work\n+ # around these issues, we place the python-future just before the first\n+ # path ending with 'site-packages' (or 'dist-packages' for Debian-like\n+ # distributions) so that its modules can be overridden by the standard\n+ # library but not by installed packages.\n if folder == 'python-future':\n folder = os.path.join( folder, 'src' )\n- sys.path.append( os.path.realpath( os.path.join( path_to_third_party,\n+ packages_indices = ( sys.path.index( path ) for path in sys.path\n+ if DIR_PACKAGES_REGEX.search( path ) )\n+ sys.path.insert( next( packages_indices, len( sys.path ) ),\n+ os.path.realpath( os.path.join( path_to_third_party,\n folder ) ) )\n continue\n sys.path.insert( 0, os.path.realpath( os.path.join( path_to_third_party,\n", "issue": "replace `builtins` with `future.builtins`, see Valloric/YouCompleteMe/issues/2024\nBasically, I just run:\n`find . -type f -iname '*.py' -print0 | xargs -0 -P 2 -n 1 sed -i '' -e 's/from builtins/from future.builtins/g'`\nThis patch fixed YouCompleteMe on my machine (Mac OS 10.11.3).\n\n> VIM - Vi IMproved 7.4 (2013 Aug 10, compiled Mar 30 2016 11:47:02)\n> MacOS X (unix) version\n> Included patches: 1-1655\n> Compiled by Homebrew\n> Huge version without GUI. Features included (+) or not (-):\n> +acl +farsi +mouse_netterm +tag_binary\n> +arabic +file_in_path +mouse_sgr +tag_old_static\n> +autocmd +find_in_path -mouse_sysmouse -tag_any_white\n> -balloon_eval +float +mouse_urxvt -tcl\n> -browse +folding +mouse_xterm +terminfo\n> ++builtin_terms -footer +multi_byte +termresponse\n> +byte_offset +fork() +multi_lang +textobjects\n> +channel -gettext -mzscheme +timers\n> +cindent -hangul_input +netbeans_intg +title\n> -clientserver +iconv +packages -toolbar\n> +clipboard +insert_expand +path_extra +user_commands\n> +cmdline_compl +job +perl +vertsplit\n> +cmdline_hist +jumplist +persistent_undo +virtualedit\n> +cmdline_info +keymap +postscript +visual\n> +comments +langmap +printer +visualextra\n> +conceal +libcall +profile +viminfo\n> +cryptv +linebreak +python +vreplace\n> +cscope +lispindent -python3 +wildignore\n> +cursorbind +listcmds +quickfix +wildmenu\n> +cursorshape +localmap +reltime +windows\n> +dialog_con -lua +rightleft +writebackup\n> +diff +menu +ruby -X11\n> +digraphs +mksession +scrollbind -xfontset\n> -dnd +modify_fname +signs -xim\n> -ebcdic +mouse +smartindent -xsmp\n> +emacs_tags -mouseshape +startuptime -xterm_clipboard\n> +eval +mouse_dec +statusline -xterm_save\n> +ex_extra -mouse_gpm -sun_workshop -xpm\n> +extra_search -mouse_jsbterm +syntax \n> system vimrc file: \"$VIM/vimrc\"\n> user vimrc file: \"$HOME/.vimrc\"\n> 2nd user vimrc file: \"~/.vim/vimrc\"\n> user exrc file: \"$HOME/.exrc\"\n> fall-back for $VIM: \"/usr/local/share/vim\"\n> Compilation: /usr/bin/clang -c -I. -Iproto -DHAVE_CONFIG_H -F/usr/local/Frameworks -DMACOS_X_UNIX -Os -w -pipe -march=native -mmacosx-version-min=10.11 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1 \n> Linking: /usr/bin/clang -L. -fstack-protector -L/usr/local/lib -L/usr/local/opt/libyaml/lib -L/usr/local/opt/openssl/lib -L/usr/local/opt/readline/lib -L/usr/local/lib -F/usr/local/Frameworks -Wl,-headerpad_max_install_names -o vim -lm -lncurses -liconv -framework Cocoa -fstack-protector -L/System/Library/Perl/5.18/darwin-thread-multi-2level/CORE -lperl -F/usr/local/Cellar/python/2.7.11/Frameworks -framework Python -lruby.2.3.0 -lobjc -L/usr/local/Cellar/ruby/2.3.0/lib\n\n<!-- Reviewable:start -->\n\n---\n\nThis change is [<img src=\"https://reviewable.io/review_button.svg\" height=\"35\" align=\"absmiddle\" alt=\"Reviewable\"/>](https://reviewable.io/reviews/valloric/ycmd/446)\n\n<!-- Reviewable:end -->\n\n", "before_files": [{"content": "# Copyright (C) 2013 Google Inc.\n#\n# This file is part of ycmd.\n#\n# ycmd is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ycmd is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with ycmd. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n# No other imports from `future` because this module is loaded before we have\n# put our submodules in sys.path\n\nimport sys\nimport os\nimport io\n\nVERSION_FILENAME = 'CORE_VERSION'\nCORE_NOT_COMPATIBLE_MESSAGE = (\n 'ycmd can\\'t run: ycm_core lib too old, PLEASE RECOMPILE'\n)\n\nDIR_OF_CURRENT_SCRIPT = os.path.dirname( os.path.abspath( __file__ ) )\n\n\ndef SetUpPythonPath():\n sys.path.insert( 0, os.path.join( DIR_OF_CURRENT_SCRIPT, '..' ) )\n\n AddNearestThirdPartyFoldersToSysPath( __file__ )\n\n\ndef ExpectedCoreVersion():\n filepath = os.path.join( DIR_OF_CURRENT_SCRIPT, '..', VERSION_FILENAME )\n with io.open( filepath, encoding = 'utf8' ) as f:\n return int( f.read() )\n\n\ndef CompatibleWithCurrentCoreVersion():\n import ycm_core\n try:\n current_core_version = ycm_core.YcmCoreVersion()\n except AttributeError:\n return False\n return ExpectedCoreVersion() == current_core_version\n\n\ndef AncestorFolders( path ):\n folder = os.path.normpath( path )\n while True:\n parent = os.path.dirname( folder )\n if parent == folder:\n break\n folder = parent\n yield folder\n\n\ndef PathToNearestThirdPartyFolder( path ):\n for folder in AncestorFolders( path ):\n path_to_third_party = os.path.join( folder, 'third_party' )\n if os.path.isdir( path_to_third_party ):\n return path_to_third_party\n return None\n\n\ndef AddNearestThirdPartyFoldersToSysPath( filepath ):\n path_to_third_party = PathToNearestThirdPartyFolder( filepath )\n if not path_to_third_party:\n raise RuntimeError(\n 'No third_party folder found for: {0}'.format( filepath ) )\n\n # NOTE: Any hacks for loading modules that can't be imported without custom\n # logic need to be reproduced in run_tests.py as well.\n for folder in os.listdir( path_to_third_party ):\n # python-future needs special handling. Not only does it store the modules\n # under its 'src' folder, but SOME of its modules are only meant to be\n # accessible under py2, not py3. This is because these modules (like\n # `queue`) are implementations of modules present in the py3 standard\n # library. So to work around issues, we place the python-future last on\n # sys.path so that they can be overriden by the standard library.\n if folder == 'python-future':\n folder = os.path.join( folder, 'src' )\n sys.path.append( os.path.realpath( os.path.join( path_to_third_party,\n folder ) ) )\n continue\n sys.path.insert( 0, os.path.realpath( os.path.join( path_to_third_party,\n folder ) ) )\n", "path": "ycmd/server_utils.py"}]}
| 2,611 | 494 |
gh_patches_debug_29426
|
rasdani/github-patches
|
git_diff
|
jupyterhub__jupyterhub-1820
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Running jupyterhub upgrade-db with PostgreSQL database fails
**How to reproduce the issue**
Run `jupyterhub upgrade-db` with a PostgreSQL database to upgrade to 99a28a4418e1.
**What you expected to happen**
Successful schema update.
**What actually happens**
It fails with an sqlalchemy `ProgrammingError` message that originates here:
https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/alembic/versions/99a28a4418e1_user_created.py#L40
in particular I think that should be `IS NOT NULL` not just `NOT NULL`. I substituted this live and it allowed the upgrade to proceed.
**Share what version of JupyterHub you are using**
Latest master.
</issue>
<code>
[start of jupyterhub/alembic/versions/99a28a4418e1_user_created.py]
1 """user.created and spawner.started
2
3 Revision ID: 99a28a4418e1
4 Revises: 56cc5a70207e
5 Create Date: 2018-03-21 14:27:17.466841
6
7 """
8
9 # revision identifiers, used by Alembic.
10 revision = '99a28a4418e1'
11 down_revision = '56cc5a70207e'
12 branch_labels = None
13 depends_on = None
14
15
16 from alembic import op
17 import sqlalchemy as sa
18
19 from datetime import datetime
20
21 def upgrade():
22 op.add_column('users', sa.Column('created', sa.DateTime, nullable=True))
23 c = op.get_bind()
24 # fill created date with current time
25 now = datetime.utcnow()
26 c.execute("""
27 UPDATE users
28 SET created='%s'
29 """ % (now,)
30 )
31
32 tables = c.engine.table_names()
33
34 if 'spawners' in tables:
35 op.add_column('spawners', sa.Column('started', sa.DateTime, nullable=True))
36 # fill started value with now for running servers
37 c.execute("""
38 UPDATE spawners
39 SET started='%s'
40 WHERE server_id NOT NULL
41 """ % (now,)
42 )
43
44
45 def downgrade():
46 op.drop_column('users', 'created')
47 op.drop_column('spawners', 'started')
48
[end of jupyterhub/alembic/versions/99a28a4418e1_user_created.py]
[start of jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py]
1 """token tracking
2
3 Revision ID: 56cc5a70207e
4 Revises: 1cebaf56856c
5 Create Date: 2017-12-19 15:21:09.300513
6
7 """
8
9 # revision identifiers, used by Alembic.
10 revision = '56cc5a70207e'
11 down_revision = '1cebaf56856c'
12 branch_labels = None
13 depends_on = None
14
15 from alembic import op
16 import sqlalchemy as sa
17
18
19 def upgrade():
20 tables = op.get_bind().engine.table_names()
21 op.add_column('api_tokens', sa.Column('created', sa.DateTime(), nullable=True))
22 op.add_column('api_tokens', sa.Column('last_activity', sa.DateTime(), nullable=True))
23 op.add_column('api_tokens', sa.Column('note', sa.Unicode(length=1023), nullable=True))
24 if 'oauth_access_tokens' in tables:
25 op.add_column('oauth_access_tokens', sa.Column('created', sa.DateTime(), nullable=True))
26 op.add_column('oauth_access_tokens', sa.Column('last_activity', sa.DateTime(), nullable=True))
27 op.create_foreign_key(None, 'oauth_access_tokens', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')
28 op.create_foreign_key(None, 'oauth_codes', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')
29
30
31 def downgrade():
32 op.drop_constraint(None, 'oauth_codes', type_='foreignkey')
33 op.drop_constraint(None, 'oauth_access_tokens', type_='foreignkey')
34 op.drop_column('oauth_access_tokens', 'last_activity')
35 op.drop_column('oauth_access_tokens', 'created')
36 op.drop_column('api_tokens', 'note')
37 op.drop_column('api_tokens', 'last_activity')
38 op.drop_column('api_tokens', 'created')
39
[end of jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py b/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py
--- a/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py
+++ b/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py
@@ -15,6 +15,9 @@
from alembic import op
import sqlalchemy as sa
+import logging
+logger = logging.getLogger('alembic')
+
def upgrade():
tables = op.get_bind().engine.table_names()
@@ -24,8 +27,11 @@
if 'oauth_access_tokens' in tables:
op.add_column('oauth_access_tokens', sa.Column('created', sa.DateTime(), nullable=True))
op.add_column('oauth_access_tokens', sa.Column('last_activity', sa.DateTime(), nullable=True))
- op.create_foreign_key(None, 'oauth_access_tokens', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')
- op.create_foreign_key(None, 'oauth_codes', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')
+ if op.get_context().dialect.name == 'sqlite':
+ logger.warning("sqlite cannot use ALTER TABLE to create foreign keys. Upgrade will be incomplete.")
+ else:
+ op.create_foreign_key(None, 'oauth_access_tokens', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')
+ op.create_foreign_key(None, 'oauth_codes', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')
def downgrade():
diff --git a/jupyterhub/alembic/versions/99a28a4418e1_user_created.py b/jupyterhub/alembic/versions/99a28a4418e1_user_created.py
--- a/jupyterhub/alembic/versions/99a28a4418e1_user_created.py
+++ b/jupyterhub/alembic/versions/99a28a4418e1_user_created.py
@@ -37,7 +37,7 @@
c.execute("""
UPDATE spawners
SET started='%s'
- WHERE server_id NOT NULL
+ WHERE server_id IS NOT NULL
""" % (now,)
)
|
{"golden_diff": "diff --git a/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py b/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py\n--- a/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py\n+++ b/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py\n@@ -15,6 +15,9 @@\n from alembic import op\n import sqlalchemy as sa\n \n+import logging\n+logger = logging.getLogger('alembic')\n+\n \n def upgrade():\n tables = op.get_bind().engine.table_names()\n@@ -24,8 +27,11 @@\n if 'oauth_access_tokens' in tables:\n op.add_column('oauth_access_tokens', sa.Column('created', sa.DateTime(), nullable=True))\n op.add_column('oauth_access_tokens', sa.Column('last_activity', sa.DateTime(), nullable=True))\n- op.create_foreign_key(None, 'oauth_access_tokens', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')\n- op.create_foreign_key(None, 'oauth_codes', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')\n+ if op.get_context().dialect.name == 'sqlite':\n+ logger.warning(\"sqlite cannot use ALTER TABLE to create foreign keys. Upgrade will be incomplete.\")\n+ else:\n+ op.create_foreign_key(None, 'oauth_access_tokens', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')\n+ op.create_foreign_key(None, 'oauth_codes', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')\n \n \n def downgrade():\ndiff --git a/jupyterhub/alembic/versions/99a28a4418e1_user_created.py b/jupyterhub/alembic/versions/99a28a4418e1_user_created.py\n--- a/jupyterhub/alembic/versions/99a28a4418e1_user_created.py\n+++ b/jupyterhub/alembic/versions/99a28a4418e1_user_created.py\n@@ -37,7 +37,7 @@\n c.execute(\"\"\"\n UPDATE spawners\n SET started='%s'\n- WHERE server_id NOT NULL\n+ WHERE server_id IS NOT NULL\n \"\"\" % (now,)\n )\n", "issue": "Running jupyterhub upgrade-db with PostgreSQL database fails\n**How to reproduce the issue**\r\n\r\nRun `jupyterhub upgrade-db` with a PostgreSQL database to upgrade to 99a28a4418e1.\r\n\r\n**What you expected to happen**\r\n\r\nSuccessful schema update.\r\n\r\n**What actually happens**\r\n\r\nIt fails with an sqlalchemy `ProgrammingError` message that originates here:\r\n\r\nhttps://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/alembic/versions/99a28a4418e1_user_created.py#L40\r\n\r\nin particular I think that should be `IS NOT NULL` not just `NOT NULL`. I substituted this live and it allowed the upgrade to proceed.\r\n\r\n**Share what version of JupyterHub you are using**\r\n\r\nLatest master.\n", "before_files": [{"content": "\"\"\"user.created and spawner.started\n\nRevision ID: 99a28a4418e1\nRevises: 56cc5a70207e\nCreate Date: 2018-03-21 14:27:17.466841\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '99a28a4418e1'\ndown_revision = '56cc5a70207e'\nbranch_labels = None\ndepends_on = None\n\n\nfrom alembic import op\nimport sqlalchemy as sa\n\nfrom datetime import datetime\n\ndef upgrade():\n op.add_column('users', sa.Column('created', sa.DateTime, nullable=True))\n c = op.get_bind()\n # fill created date with current time\n now = datetime.utcnow()\n c.execute(\"\"\"\n UPDATE users\n SET created='%s'\n \"\"\" % (now,)\n )\n\n tables = c.engine.table_names()\n\n if 'spawners' in tables:\n op.add_column('spawners', sa.Column('started', sa.DateTime, nullable=True))\n # fill started value with now for running servers\n c.execute(\"\"\"\n UPDATE spawners\n SET started='%s'\n WHERE server_id NOT NULL\n \"\"\" % (now,)\n )\n\n\ndef downgrade():\n op.drop_column('users', 'created')\n op.drop_column('spawners', 'started')\n", "path": "jupyterhub/alembic/versions/99a28a4418e1_user_created.py"}, {"content": "\"\"\"token tracking\n\nRevision ID: 56cc5a70207e\nRevises: 1cebaf56856c\nCreate Date: 2017-12-19 15:21:09.300513\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '56cc5a70207e'\ndown_revision = '1cebaf56856c'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n tables = op.get_bind().engine.table_names()\n op.add_column('api_tokens', sa.Column('created', sa.DateTime(), nullable=True))\n op.add_column('api_tokens', sa.Column('last_activity', sa.DateTime(), nullable=True))\n op.add_column('api_tokens', sa.Column('note', sa.Unicode(length=1023), nullable=True))\n if 'oauth_access_tokens' in tables:\n op.add_column('oauth_access_tokens', sa.Column('created', sa.DateTime(), nullable=True))\n op.add_column('oauth_access_tokens', sa.Column('last_activity', sa.DateTime(), nullable=True))\n op.create_foreign_key(None, 'oauth_access_tokens', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')\n op.create_foreign_key(None, 'oauth_codes', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')\n\n\ndef downgrade():\n op.drop_constraint(None, 'oauth_codes', type_='foreignkey')\n op.drop_constraint(None, 'oauth_access_tokens', type_='foreignkey')\n op.drop_column('oauth_access_tokens', 'last_activity')\n op.drop_column('oauth_access_tokens', 'created')\n op.drop_column('api_tokens', 'note')\n op.drop_column('api_tokens', 'last_activity')\n op.drop_column('api_tokens', 'created')\n", "path": "jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py"}]}
| 1,692 | 547 |
gh_patches_debug_29951
|
rasdani/github-patches
|
git_diff
|
mlflow__mlflow-9878
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] FutureWarning: pyarrow.hdfs.connect is deprecated
### Willingness to contribute
Yes. I can contribute a fix for this bug independently.
### MLflow version
2.2.2
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: CentOS 7
- **Python version**: 3.9.6
- **pyarrow version**: 5.0.0
### Describe the problem
When loading a model from the MLflow Model Registry (backend store is HDFS) I get the following warning:
```
legobricks/lib/python3.9/site-packages/mlflow/store/artifact/hdfs_artifact_repo.py:188: FutureWarning: pyarrow.hdfs.connect is deprecated as of 2.0.0, please use pyarrow.fs.HadoopFileSystem instead.
connected = pa.hdfs.connect(
```
### Steps to reproduce the bug
1. Create a conda env with mlflow and pyarrow installed
2. Start Mlflow with hdfs as backend store
3. Train a model and save it
4. Store the trained model in the model registry
5. Load model from the registry
### Code to generate data required to reproduce the bug
_No response_
### Is the console panel in DevTools showing errors relevant to the bug?
_No response_
### Does the network panel in DevTools contain failed requests relevant to the bug?
_No response_
</issue>
<code>
[start of mlflow/store/artifact/hdfs_artifact_repo.py]
1 import os
2 import posixpath
3 import tempfile
4 import urllib.parse
5 from contextlib import contextmanager
6
7 from mlflow.entities import FileInfo
8 from mlflow.environment_variables import (
9 MLFLOW_KERBEROS_TICKET_CACHE,
10 MLFLOW_KERBEROS_USER,
11 MLFLOW_PYARROW_EXTRA_CONF,
12 )
13 from mlflow.exceptions import MlflowException
14 from mlflow.store.artifact.artifact_repo import ArtifactRepository
15 from mlflow.utils.file_utils import mkdir, relative_path_to_artifact_path
16
17
18 class HdfsArtifactRepository(ArtifactRepository):
19 """
20 Stores artifacts on HDFS.
21
22 This repository is used with URIs of the form ``hdfs:/<path>``. The repository can only be used
23 together with the RestStore.
24 """
25
26 def __init__(self, artifact_uri):
27 self.scheme, self.host, self.port, self.path = _resolve_connection_params(artifact_uri)
28 super().__init__(artifact_uri)
29
30 def log_artifact(self, local_file, artifact_path=None):
31 """
32 Log artifact in hdfs.
33 :param local_file: source file path
34 :param artifact_path: when specified will attempt to write under artifact_uri/artifact_path
35 """
36 hdfs_base_path = _resolve_base_path(self.path, artifact_path)
37
38 with hdfs_system(scheme=self.scheme, host=self.host, port=self.port) as hdfs:
39 _, file_name = os.path.split(local_file)
40 destination = posixpath.join(hdfs_base_path, file_name)
41 with open(local_file, "rb") as f:
42 hdfs.upload(destination, f)
43
44 def log_artifacts(self, local_dir, artifact_path=None):
45 """
46 Log artifacts in hdfs.
47 Missing remote sub-directories will be created if needed.
48 :param local_dir: source dir path
49 :param artifact_path: when specified will attempt to write under artifact_uri/artifact_path
50 """
51 hdfs_base_path = _resolve_base_path(self.path, artifact_path)
52
53 with hdfs_system(scheme=self.scheme, host=self.host, port=self.port) as hdfs:
54 if not hdfs.exists(hdfs_base_path):
55 hdfs.mkdir(hdfs_base_path)
56
57 for subdir_path, _, files in os.walk(local_dir):
58 relative_path = _relative_path_local(local_dir, subdir_path)
59
60 hdfs_subdir_path = (
61 posixpath.join(hdfs_base_path, relative_path)
62 if relative_path
63 else hdfs_base_path
64 )
65
66 if not hdfs.exists(hdfs_subdir_path):
67 hdfs.mkdir(hdfs_subdir_path)
68
69 for each_file in files:
70 source = os.path.join(subdir_path, each_file)
71 destination = posixpath.join(hdfs_subdir_path, each_file)
72 with open(source, "rb") as f:
73 hdfs.upload(destination, f)
74
75 def list_artifacts(self, path=None):
76 """
77 Lists files and directories under artifacts directory for the current run_id.
78 (self.path contains the base path - hdfs:/some/path/run_id/artifacts)
79
80 :param path: Relative source path. Possible subdirectory existing under
81 hdfs:/some/path/run_id/artifacts
82 :return: List of FileInfos under given path
83 """
84 hdfs_base_path = _resolve_base_path(self.path, path)
85
86 with hdfs_system(scheme=self.scheme, host=self.host, port=self.port) as hdfs:
87 paths = []
88 if hdfs.exists(hdfs_base_path):
89 for file_detail in hdfs.ls(hdfs_base_path, detail=True):
90 file_name = file_detail.get("name")
91
92 # file_name is hdfs_base_path and not a child of that path
93 if file_name == hdfs_base_path:
94 continue
95
96 # Strip off anything that comes before the artifact root e.g. hdfs://name
97 offset = file_name.index(self.path)
98 rel_path = _relative_path_remote(self.path, file_name[offset:])
99 is_dir = file_detail.get("kind") == "directory"
100 size = file_detail.get("size")
101 paths.append(FileInfo(rel_path, is_dir=is_dir, file_size=size))
102 return sorted(paths, key=lambda f: paths)
103
104 def _walk_path(self, hdfs, hdfs_path):
105 if hdfs.exists(hdfs_path):
106 if hdfs.isdir(hdfs_path):
107 for subdir, _, files in hdfs.walk(hdfs_path):
108 if subdir != hdfs_path:
109 yield subdir, hdfs.isdir(subdir), hdfs.info(subdir).get("size")
110 for f in files:
111 file_path = posixpath.join(subdir, f)
112 yield file_path, hdfs.isdir(file_path), hdfs.info(file_path).get("size")
113 else:
114 yield hdfs_path, False, hdfs.info(hdfs_path).get("size")
115
116 def download_artifacts(self, artifact_path, dst_path=None):
117 """
118 Download an artifact file or directory to a local directory/file if applicable, and
119 return a local path for it.
120 The caller is responsible for managing the lifecycle of the downloaded artifacts.
121
122 (self.path contains the base path - hdfs:/some/path/run_id/artifacts)
123
124 :param artifact_path: Relative source path to the desired artifacts file or directory.
125 :param dst_path: Absolute path of the local filesystem destination directory to which
126 to download the specified artifacts. This directory must already
127 exist. If unspecified, the artifacts will be downloaded to a new,
128 uniquely-named
129 directory on the local filesystem.
130
131 :return: Absolute path of the local filesystem location containing the downloaded
132 artifacts - file/directory.
133 """
134
135 hdfs_base_path = _resolve_base_path(self.path, artifact_path)
136 if dst_path and os.path.exists(dst_path):
137 local_dir = os.path.abspath(dst_path)
138 else:
139 local_dir = _tmp_dir(dst_path)
140
141 with hdfs_system(scheme=self.scheme, host=self.host, port=self.port) as hdfs:
142 if not hdfs.isdir(hdfs_base_path):
143 local_path = os.path.join(local_dir, os.path.normpath(artifact_path))
144 _download_hdfs_file(hdfs, hdfs_base_path, local_path)
145 return local_path
146
147 for path, is_dir, _ in self._walk_path(hdfs, hdfs_base_path):
148 relative_path = _relative_path_remote(hdfs_base_path, path)
149 local_path = os.path.join(local_dir, relative_path) if relative_path else local_dir
150
151 if is_dir:
152 mkdir(local_path)
153 else:
154 _download_hdfs_file(hdfs, path, local_path)
155 return local_dir
156
157 def _download_file(self, remote_file_path, local_path):
158 raise MlflowException("This is not implemented. Should never be called.")
159
160 def delete_artifacts(self, artifact_path=None):
161 path = posixpath.join(self.path, artifact_path) if artifact_path else self.path
162 with hdfs_system(scheme=self.scheme, host=self.host, port=self.port) as hdfs:
163 hdfs.delete(path, recursive=True)
164
165
166 @contextmanager
167 def hdfs_system(scheme, host, port):
168 """
169 hdfs system context - Attempt to establish the connection to hdfs
170 and yields HadoopFileSystem
171
172 :param scheme: scheme or use hdfs:// as default
173 :param host: hostname or when relaying on the core-site.xml config use 'default'
174 :param port: port or when relaying on the core-site.xml config use 0
175 """
176 import pyarrow as pa
177
178 kerb_ticket = MLFLOW_KERBEROS_TICKET_CACHE.get()
179 kerberos_user = MLFLOW_KERBEROS_USER.get()
180 extra_conf = _parse_extra_conf(MLFLOW_PYARROW_EXTRA_CONF.get())
181
182 if host:
183 host = scheme + "://" + host
184 else:
185 host = "default"
186
187 connected = pa.hdfs.connect(
188 host=host,
189 port=port or 0,
190 user=kerberos_user,
191 kerb_ticket=kerb_ticket,
192 extra_conf=extra_conf,
193 )
194 yield connected
195 connected.close()
196
197
198 def _resolve_connection_params(artifact_uri):
199 parsed = urllib.parse.urlparse(artifact_uri)
200
201 return parsed.scheme, parsed.hostname, parsed.port, parsed.path
202
203
204 def _resolve_base_path(path, artifact_path):
205 if path == artifact_path:
206 return path
207 if artifact_path:
208 return posixpath.join(path, artifact_path)
209 return path
210
211
212 def _relative_path(base_dir, subdir_path, path_module):
213 relative_path = path_module.relpath(subdir_path, base_dir)
214 return relative_path if relative_path != "." else None
215
216
217 def _relative_path_local(base_dir, subdir_path):
218 rel_path = _relative_path(base_dir, subdir_path, os.path)
219 return relative_path_to_artifact_path(rel_path) if rel_path is not None else None
220
221
222 def _relative_path_remote(base_dir, subdir_path):
223 return _relative_path(base_dir, subdir_path, posixpath)
224
225
226 def _tmp_dir(local_path):
227 return os.path.abspath(tempfile.mkdtemp(dir=local_path))
228
229
230 def _download_hdfs_file(hdfs, remote_file_path, local_file_path):
231 # Ensure all required directories exist. Without doing this nested files can't be downloaded.
232 dirs = os.path.dirname(local_file_path)
233 if not os.path.exists(dirs):
234 os.makedirs(dirs)
235 with open(local_file_path, "wb") as f:
236 f.write(hdfs.open(remote_file_path, "rb").read())
237
238
239 def _parse_extra_conf(extra_conf):
240 if extra_conf:
241
242 def as_pair(config):
243 key, val = config.split("=")
244 return key, val
245
246 list_of_key_val = [as_pair(conf) for conf in extra_conf.split(",")]
247 return dict(list_of_key_val)
248 return None
249
[end of mlflow/store/artifact/hdfs_artifact_repo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mlflow/store/artifact/hdfs_artifact_repo.py b/mlflow/store/artifact/hdfs_artifact_repo.py
--- a/mlflow/store/artifact/hdfs_artifact_repo.py
+++ b/mlflow/store/artifact/hdfs_artifact_repo.py
@@ -4,6 +4,8 @@
import urllib.parse
from contextlib import contextmanager
+import packaging.version
+
from mlflow.entities import FileInfo
from mlflow.environment_variables import (
MLFLOW_KERBEROS_TICKET_CACHE,
@@ -173,7 +175,7 @@
:param host: hostname or when relaying on the core-site.xml config use 'default'
:param port: port or when relaying on the core-site.xml config use 0
"""
- import pyarrow as pa
+ import pyarrow
kerb_ticket = MLFLOW_KERBEROS_TICKET_CACHE.get()
kerberos_user = MLFLOW_KERBEROS_USER.get()
@@ -183,14 +185,22 @@
host = scheme + "://" + host
else:
host = "default"
-
- connected = pa.hdfs.connect(
- host=host,
- port=port or 0,
- user=kerberos_user,
- kerb_ticket=kerb_ticket,
- extra_conf=extra_conf,
- )
+ if packaging.version.parse(pyarrow.__version__) < packaging.version.parse("2.0.0"):
+ connected = pyarrow.fs.HadoopFileSystem(
+ host=host,
+ port=port or 0,
+ user=kerberos_user,
+ kerb_ticket=kerb_ticket,
+ extra_conf=extra_conf,
+ )
+ else:
+ connected = pyarrow.hdfs.connect(
+ host=host,
+ port=port or 0,
+ user=kerberos_user,
+ kerb_ticket=kerb_ticket,
+ extra_conf=extra_conf,
+ )
yield connected
connected.close()
|
{"golden_diff": "diff --git a/mlflow/store/artifact/hdfs_artifact_repo.py b/mlflow/store/artifact/hdfs_artifact_repo.py\n--- a/mlflow/store/artifact/hdfs_artifact_repo.py\n+++ b/mlflow/store/artifact/hdfs_artifact_repo.py\n@@ -4,6 +4,8 @@\n import urllib.parse\n from contextlib import contextmanager\n \n+import packaging.version\n+\n from mlflow.entities import FileInfo\n from mlflow.environment_variables import (\n MLFLOW_KERBEROS_TICKET_CACHE,\n@@ -173,7 +175,7 @@\n :param host: hostname or when relaying on the core-site.xml config use 'default'\n :param port: port or when relaying on the core-site.xml config use 0\n \"\"\"\n- import pyarrow as pa\n+ import pyarrow\n \n kerb_ticket = MLFLOW_KERBEROS_TICKET_CACHE.get()\n kerberos_user = MLFLOW_KERBEROS_USER.get()\n@@ -183,14 +185,22 @@\n host = scheme + \"://\" + host\n else:\n host = \"default\"\n-\n- connected = pa.hdfs.connect(\n- host=host,\n- port=port or 0,\n- user=kerberos_user,\n- kerb_ticket=kerb_ticket,\n- extra_conf=extra_conf,\n- )\n+ if packaging.version.parse(pyarrow.__version__) < packaging.version.parse(\"2.0.0\"):\n+ connected = pyarrow.fs.HadoopFileSystem(\n+ host=host,\n+ port=port or 0,\n+ user=kerberos_user,\n+ kerb_ticket=kerb_ticket,\n+ extra_conf=extra_conf,\n+ )\n+ else:\n+ connected = pyarrow.hdfs.connect(\n+ host=host,\n+ port=port or 0,\n+ user=kerberos_user,\n+ kerb_ticket=kerb_ticket,\n+ extra_conf=extra_conf,\n+ )\n yield connected\n connected.close()\n", "issue": "[BUG] FutureWarning: pyarrow.hdfs.connect is deprecated\n### Willingness to contribute\n\nYes. I can contribute a fix for this bug independently.\n\n### MLflow version\n\n2.2.2\n\n### System information\n\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: CentOS 7\r\n- **Python version**: 3.9.6\r\n- **pyarrow version**: 5.0.0\r\n\n\n### Describe the problem\n\nWhen loading a model from the MLflow Model Registry (backend store is HDFS) I get the following warning:\r\n\r\n```\r\nlegobricks/lib/python3.9/site-packages/mlflow/store/artifact/hdfs_artifact_repo.py:188: FutureWarning: pyarrow.hdfs.connect is deprecated as of 2.0.0, please use pyarrow.fs.HadoopFileSystem instead.\r\n connected = pa.hdfs.connect(\r\n```\n\n### Steps to reproduce the bug\n\n1. Create a conda env with mlflow and pyarrow installed\r\n2. Start Mlflow with hdfs as backend store\r\n3. Train a model and save it\r\n4. Store the trained model in the model registry\r\n5. Load model from the registry\n\n### Code to generate data required to reproduce the bug\n\n_No response_\n\n### Is the console panel in DevTools showing errors relevant to the bug?\n\n_No response_\n\n### Does the network panel in DevTools contain failed requests relevant to the bug?\n\n_No response_\n", "before_files": [{"content": "import os\nimport posixpath\nimport tempfile\nimport urllib.parse\nfrom contextlib import contextmanager\n\nfrom mlflow.entities import FileInfo\nfrom mlflow.environment_variables import (\n MLFLOW_KERBEROS_TICKET_CACHE,\n MLFLOW_KERBEROS_USER,\n MLFLOW_PYARROW_EXTRA_CONF,\n)\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.store.artifact.artifact_repo import ArtifactRepository\nfrom mlflow.utils.file_utils import mkdir, relative_path_to_artifact_path\n\n\nclass HdfsArtifactRepository(ArtifactRepository):\n \"\"\"\n Stores artifacts on HDFS.\n\n This repository is used with URIs of the form ``hdfs:/<path>``. The repository can only be used\n together with the RestStore.\n \"\"\"\n\n def __init__(self, artifact_uri):\n self.scheme, self.host, self.port, self.path = _resolve_connection_params(artifact_uri)\n super().__init__(artifact_uri)\n\n def log_artifact(self, local_file, artifact_path=None):\n \"\"\"\n Log artifact in hdfs.\n :param local_file: source file path\n :param artifact_path: when specified will attempt to write under artifact_uri/artifact_path\n \"\"\"\n hdfs_base_path = _resolve_base_path(self.path, artifact_path)\n\n with hdfs_system(scheme=self.scheme, host=self.host, port=self.port) as hdfs:\n _, file_name = os.path.split(local_file)\n destination = posixpath.join(hdfs_base_path, file_name)\n with open(local_file, \"rb\") as f:\n hdfs.upload(destination, f)\n\n def log_artifacts(self, local_dir, artifact_path=None):\n \"\"\"\n Log artifacts in hdfs.\n Missing remote sub-directories will be created if needed.\n :param local_dir: source dir path\n :param artifact_path: when specified will attempt to write under artifact_uri/artifact_path\n \"\"\"\n hdfs_base_path = _resolve_base_path(self.path, artifact_path)\n\n with hdfs_system(scheme=self.scheme, host=self.host, port=self.port) as hdfs:\n if not hdfs.exists(hdfs_base_path):\n hdfs.mkdir(hdfs_base_path)\n\n for subdir_path, _, files in os.walk(local_dir):\n relative_path = _relative_path_local(local_dir, subdir_path)\n\n hdfs_subdir_path = (\n posixpath.join(hdfs_base_path, relative_path)\n if relative_path\n else hdfs_base_path\n )\n\n if not hdfs.exists(hdfs_subdir_path):\n hdfs.mkdir(hdfs_subdir_path)\n\n for each_file in files:\n source = os.path.join(subdir_path, each_file)\n destination = posixpath.join(hdfs_subdir_path, each_file)\n with open(source, \"rb\") as f:\n hdfs.upload(destination, f)\n\n def list_artifacts(self, path=None):\n \"\"\"\n Lists files and directories under artifacts directory for the current run_id.\n (self.path contains the base path - hdfs:/some/path/run_id/artifacts)\n\n :param path: Relative source path. Possible subdirectory existing under\n hdfs:/some/path/run_id/artifacts\n :return: List of FileInfos under given path\n \"\"\"\n hdfs_base_path = _resolve_base_path(self.path, path)\n\n with hdfs_system(scheme=self.scheme, host=self.host, port=self.port) as hdfs:\n paths = []\n if hdfs.exists(hdfs_base_path):\n for file_detail in hdfs.ls(hdfs_base_path, detail=True):\n file_name = file_detail.get(\"name\")\n\n # file_name is hdfs_base_path and not a child of that path\n if file_name == hdfs_base_path:\n continue\n\n # Strip off anything that comes before the artifact root e.g. hdfs://name\n offset = file_name.index(self.path)\n rel_path = _relative_path_remote(self.path, file_name[offset:])\n is_dir = file_detail.get(\"kind\") == \"directory\"\n size = file_detail.get(\"size\")\n paths.append(FileInfo(rel_path, is_dir=is_dir, file_size=size))\n return sorted(paths, key=lambda f: paths)\n\n def _walk_path(self, hdfs, hdfs_path):\n if hdfs.exists(hdfs_path):\n if hdfs.isdir(hdfs_path):\n for subdir, _, files in hdfs.walk(hdfs_path):\n if subdir != hdfs_path:\n yield subdir, hdfs.isdir(subdir), hdfs.info(subdir).get(\"size\")\n for f in files:\n file_path = posixpath.join(subdir, f)\n yield file_path, hdfs.isdir(file_path), hdfs.info(file_path).get(\"size\")\n else:\n yield hdfs_path, False, hdfs.info(hdfs_path).get(\"size\")\n\n def download_artifacts(self, artifact_path, dst_path=None):\n \"\"\"\n Download an artifact file or directory to a local directory/file if applicable, and\n return a local path for it.\n The caller is responsible for managing the lifecycle of the downloaded artifacts.\n\n (self.path contains the base path - hdfs:/some/path/run_id/artifacts)\n\n :param artifact_path: Relative source path to the desired artifacts file or directory.\n :param dst_path: Absolute path of the local filesystem destination directory to which\n to download the specified artifacts. This directory must already\n exist. If unspecified, the artifacts will be downloaded to a new,\n uniquely-named\n directory on the local filesystem.\n\n :return: Absolute path of the local filesystem location containing the downloaded\n artifacts - file/directory.\n \"\"\"\n\n hdfs_base_path = _resolve_base_path(self.path, artifact_path)\n if dst_path and os.path.exists(dst_path):\n local_dir = os.path.abspath(dst_path)\n else:\n local_dir = _tmp_dir(dst_path)\n\n with hdfs_system(scheme=self.scheme, host=self.host, port=self.port) as hdfs:\n if not hdfs.isdir(hdfs_base_path):\n local_path = os.path.join(local_dir, os.path.normpath(artifact_path))\n _download_hdfs_file(hdfs, hdfs_base_path, local_path)\n return local_path\n\n for path, is_dir, _ in self._walk_path(hdfs, hdfs_base_path):\n relative_path = _relative_path_remote(hdfs_base_path, path)\n local_path = os.path.join(local_dir, relative_path) if relative_path else local_dir\n\n if is_dir:\n mkdir(local_path)\n else:\n _download_hdfs_file(hdfs, path, local_path)\n return local_dir\n\n def _download_file(self, remote_file_path, local_path):\n raise MlflowException(\"This is not implemented. Should never be called.\")\n\n def delete_artifacts(self, artifact_path=None):\n path = posixpath.join(self.path, artifact_path) if artifact_path else self.path\n with hdfs_system(scheme=self.scheme, host=self.host, port=self.port) as hdfs:\n hdfs.delete(path, recursive=True)\n\n\n@contextmanager\ndef hdfs_system(scheme, host, port):\n \"\"\"\n hdfs system context - Attempt to establish the connection to hdfs\n and yields HadoopFileSystem\n\n :param scheme: scheme or use hdfs:// as default\n :param host: hostname or when relaying on the core-site.xml config use 'default'\n :param port: port or when relaying on the core-site.xml config use 0\n \"\"\"\n import pyarrow as pa\n\n kerb_ticket = MLFLOW_KERBEROS_TICKET_CACHE.get()\n kerberos_user = MLFLOW_KERBEROS_USER.get()\n extra_conf = _parse_extra_conf(MLFLOW_PYARROW_EXTRA_CONF.get())\n\n if host:\n host = scheme + \"://\" + host\n else:\n host = \"default\"\n\n connected = pa.hdfs.connect(\n host=host,\n port=port or 0,\n user=kerberos_user,\n kerb_ticket=kerb_ticket,\n extra_conf=extra_conf,\n )\n yield connected\n connected.close()\n\n\ndef _resolve_connection_params(artifact_uri):\n parsed = urllib.parse.urlparse(artifact_uri)\n\n return parsed.scheme, parsed.hostname, parsed.port, parsed.path\n\n\ndef _resolve_base_path(path, artifact_path):\n if path == artifact_path:\n return path\n if artifact_path:\n return posixpath.join(path, artifact_path)\n return path\n\n\ndef _relative_path(base_dir, subdir_path, path_module):\n relative_path = path_module.relpath(subdir_path, base_dir)\n return relative_path if relative_path != \".\" else None\n\n\ndef _relative_path_local(base_dir, subdir_path):\n rel_path = _relative_path(base_dir, subdir_path, os.path)\n return relative_path_to_artifact_path(rel_path) if rel_path is not None else None\n\n\ndef _relative_path_remote(base_dir, subdir_path):\n return _relative_path(base_dir, subdir_path, posixpath)\n\n\ndef _tmp_dir(local_path):\n return os.path.abspath(tempfile.mkdtemp(dir=local_path))\n\n\ndef _download_hdfs_file(hdfs, remote_file_path, local_file_path):\n # Ensure all required directories exist. Without doing this nested files can't be downloaded.\n dirs = os.path.dirname(local_file_path)\n if not os.path.exists(dirs):\n os.makedirs(dirs)\n with open(local_file_path, \"wb\") as f:\n f.write(hdfs.open(remote_file_path, \"rb\").read())\n\n\ndef _parse_extra_conf(extra_conf):\n if extra_conf:\n\n def as_pair(config):\n key, val = config.split(\"=\")\n return key, val\n\n list_of_key_val = [as_pair(conf) for conf in extra_conf.split(\",\")]\n return dict(list_of_key_val)\n return None\n", "path": "mlflow/store/artifact/hdfs_artifact_repo.py"}]}
| 3,625 | 440 |
gh_patches_debug_4243
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-499
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support web proxy https_proxy environment variable
**Is your feature request related to a problem? Please describe.**
checkov does not run behind a web proxy. Instead, it hangs indefinitely trying to connect to https://www.bridgecrew.cloud/api/v1
**Describe the solution you'd like**
Add web proxy support for proxy indicated in https_proxy environment variable.
**Describe alternatives you've considered**
None
**Additional context**
Before patch behind a web proxy:
```
$ time timeout 5m checkov -d .
real 5m0.007s
user 0m0.504s
sys 0m0.082s
$
```
After patch behind a web proxy:
```
$ time timeout 5m checkov -d .
_ _
___| |__ ___ ___| | _______ __
/ __| '_ \ / _ \/ __| |/ / _ \ \ / /
| (__| | | | __/ (__| < (_) \ V /
\___|_| |_|\___|\___|_|\_\___/ \_/
by bridgecrew.io | version: 1.0.484
real 0m1.547s
user 0m0.938s
sys 0m0.089s
$
```
</issue>
<code>
[start of checkov/common/bridgecrew/platform_integration.py]
1 import json
2 import logging
3 import os
4 from json import JSONDecodeError
5 from time import sleep
6
7 import boto3
8 import dpath.util
9 import urllib3
10 from botocore.exceptions import ClientError
11 from urllib3.exceptions import HTTPError
12
13 from checkov.common.bridgecrew.platform_errors import BridgecrewAuthError
14 from checkov.common.models.consts import SUPPORTED_FILE_EXTENSIONS
15 from .wrapper import reduce_scan_reports, persist_checks_results, enrich_and_persist_checks_metadata
16
17 UNAUTHORIZED_MESSAGE = 'User is not authorized to access this resource with an explicit deny'
18
19 DEFAULT_REGION = "us-west-2"
20 http = urllib3.PoolManager()
21
22
23 class BcPlatformIntegration(object):
24 def __init__(self):
25 self.bc_api_key = None
26 self.s3_client = None
27 self.bucket = None
28 self.credentials = None
29 self.repo_path = None
30 self.repo_id = None
31 self.timestamp = None
32 self.scan_reports = []
33 self.bc_api_url = os.getenv('BC_API_URL', "https://www.bridgecrew.cloud/api/v1")
34 self.bc_source = os.getenv('BC_SOURCE', "cli")
35 self.integrations_api_url = f"{self.bc_api_url}/integrations/types/checkov"
36 self.guidelines_api_url = f"{self.bc_api_url}/guidelines"
37
38 def setup_bridgecrew_credentials(self, bc_api_key, repo_id):
39 """
40 Setup credentials against Bridgecrew's platform.
41 :param repo_id: Identity string of the scanned repository, of the form <repo_owner>/<repo_name>
42 :param bc_api_key: Bridgecrew issued API key
43 """
44 self.bc_api_key = bc_api_key
45 self.repo_id = repo_id
46 try:
47 request = http.request("POST", self.integrations_api_url, body=json.dumps({"repoId": repo_id}),
48 headers={"Authorization": bc_api_key, "Content-Type": "application/json"})
49 response = json.loads(request.data.decode("utf8"))
50 if 'Message' in response:
51 if response['Message'] == UNAUTHORIZED_MESSAGE:
52 raise BridgecrewAuthError()
53 repo_full_path = response["path"]
54 self.bucket, self.repo_path = repo_full_path.split("/", 1)
55 self.timestamp = self.repo_path.split("/")[-1]
56 self.credentials = response["creds"]
57 self.s3_client = boto3.client("s3",
58 aws_access_key_id=self.credentials["AccessKeyId"],
59 aws_secret_access_key=self.credentials["SecretAccessKey"],
60 aws_session_token=self.credentials["SessionToken"],
61 region_name=DEFAULT_REGION
62 )
63 sleep(10) # Wait for the policy to update
64 except HTTPError as e:
65 logging.error(f"Failed to get customer assumed role\n{e}")
66 raise e
67 except ClientError as e:
68 logging.error(f"Failed to initiate client with credentials {self.credentials}\n{e}")
69 raise e
70 except JSONDecodeError as e:
71 logging.error(f"Response of {self.integrations_api_url} is not a valid JSON\n{e}")
72 raise e
73
74 def is_integration_configured(self):
75 """
76 Checks if Bridgecrew integration is fully configured.
77 :return: True if the integration is configured, False otherwise
78 """
79 return all([self.repo_path, self.credentials, self.s3_client])
80
81 def persist_repository(self, root_dir):
82 """
83 Persist the repository found on root_dir path to Bridgecrew's platform
84 :param root_dir: Absolute path of the directory containing the repository root level
85 """
86 for root_path, d_names, f_names in os.walk(root_dir):
87 for file_path in f_names:
88 _, file_extension = os.path.splitext(file_path)
89 if file_extension in SUPPORTED_FILE_EXTENSIONS:
90 full_file_path = os.path.join(root_path, file_path)
91 relative_file_path = os.path.relpath(full_file_path, root_dir)
92 self._persist_file(full_file_path, relative_file_path)
93
94 def persist_scan_results(self, scan_reports):
95 """
96 Persist checkov's scan result into bridgecrew's platform.
97 :param scan_reports: List of checkov scan reports
98 """
99 self.scan_reports = scan_reports
100 reduced_scan_reports = reduce_scan_reports(scan_reports)
101 checks_metadata_paths = enrich_and_persist_checks_metadata(scan_reports, self.s3_client, self.bucket,
102 self.repo_path)
103 dpath.util.merge(reduced_scan_reports, checks_metadata_paths)
104 persist_checks_results(reduced_scan_reports, self.s3_client, self.bucket, self.repo_path)
105
106 def commit_repository(self, branch):
107 """
108 :param branch: branch to be persisted
109 Finalize the repository's scanning in bridgecrew's platform.
110 """
111 request = None
112 try:
113 request = http.request("PUT", f"{self.integrations_api_url}?source={self.bc_source}",
114 body=json.dumps({"path": self.repo_path, "branch": branch}),
115 headers={"Authorization": self.bc_api_key, "Content-Type": "application/json"})
116 response = json.loads(request.data.decode("utf8"))
117 except HTTPError as e:
118 logging.error(f"Failed to commit repository {self.repo_path}\n{e}")
119 raise e
120 except JSONDecodeError as e:
121 logging.error(f"Response of {self.integrations_api_url} is not a valid JSON\n{e}")
122 raise e
123 finally:
124 if request.status == 201 and response["result"] == "Success":
125 logging.info(f"Finalize repository {self.repo_id} in bridgecrew's platform")
126 else:
127 raise Exception(f"Failed to finalize repository {self.repo_id} in bridgecrew's platform\n{response}")
128
129 def _persist_file(self, full_file_path, relative_file_path):
130 tries = 4
131 curr_try = 0
132 file_object_key = os.path.join(self.repo_path, relative_file_path)
133 while curr_try < tries:
134 try:
135 self.s3_client.upload_file(full_file_path, self.bucket, file_object_key)
136 return
137 except ClientError as e:
138 if e.response.get('Error', {}).get('Code') == 'AccessDenied':
139 sleep(5)
140 curr_try += 1
141 else:
142 logging.error(f"failed to persist file {full_file_path} into S3 bucket {self.bucket}\n{e}")
143 raise e
144 except Exception as e:
145 logging.error(f"failed to persist file {full_file_path} into S3 bucket {self.bucket}\n{e}")
146 raise e
147 if curr_try == tries:
148 logging.error(f"failed to persist file {full_file_path} into S3 bucket {self.bucket} - gut AccessDenied {tries} times")
149
150 def get_guidelines(self) -> dict:
151 try:
152 request = http.request("GET", self.guidelines_api_url)
153 response = json.loads(request.data.decode("utf8"))
154 guidelines_map = response["guidelines"]
155 logging.debug(f"Got guidelines form Bridgecrew BE")
156 return guidelines_map
157 except Exception as e:
158 logging.debug(f"Failed to get the guidelines from {self.guidelines_api_url}, error:\n{e}")
159 return {}
160
[end of checkov/common/bridgecrew/platform_integration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/common/bridgecrew/platform_integration.py b/checkov/common/bridgecrew/platform_integration.py
--- a/checkov/common/bridgecrew/platform_integration.py
+++ b/checkov/common/bridgecrew/platform_integration.py
@@ -17,7 +17,11 @@
UNAUTHORIZED_MESSAGE = 'User is not authorized to access this resource with an explicit deny'
DEFAULT_REGION = "us-west-2"
-http = urllib3.PoolManager()
+
+try:
+ http = urllib3.ProxyManager(os.environ['https_proxy'])
+except KeyError:
+ http = urllib3.PoolManager()
class BcPlatformIntegration(object):
|
{"golden_diff": "diff --git a/checkov/common/bridgecrew/platform_integration.py b/checkov/common/bridgecrew/platform_integration.py\n--- a/checkov/common/bridgecrew/platform_integration.py\n+++ b/checkov/common/bridgecrew/platform_integration.py\n@@ -17,7 +17,11 @@\n UNAUTHORIZED_MESSAGE = 'User is not authorized to access this resource with an explicit deny'\n \n DEFAULT_REGION = \"us-west-2\"\n-http = urllib3.PoolManager()\n+\n+try:\n+ http = urllib3.ProxyManager(os.environ['https_proxy'])\n+except KeyError:\n+ http = urllib3.PoolManager()\n \n \n class BcPlatformIntegration(object):\n", "issue": "Support web proxy https_proxy environment variable\n**Is your feature request related to a problem? Please describe.**\r\ncheckov does not run behind a web proxy. Instead, it hangs indefinitely trying to connect to https://www.bridgecrew.cloud/api/v1\r\n\r\n**Describe the solution you'd like**\r\nAdd web proxy support for proxy indicated in https_proxy environment variable.\r\n\r\n**Describe alternatives you've considered**\r\nNone\r\n\r\n**Additional context**\r\nBefore patch behind a web proxy:\r\n```\r\n$ time timeout 5m checkov -d .\r\n\r\nreal 5m0.007s\r\nuser 0m0.504s\r\nsys 0m0.082s\r\n$\r\n```\r\n\r\nAfter patch behind a web proxy:\r\n```\r\n$ time timeout 5m checkov -d .\r\n\r\n _ _\r\n ___| |__ ___ ___| | _______ __\r\n / __| '_ \\ / _ \\/ __| |/ / _ \\ \\ / /\r\n | (__| | | | __/ (__| < (_) \\ V /\r\n \\___|_| |_|\\___|\\___|_|\\_\\___/ \\_/\r\n\r\nby bridgecrew.io | version: 1.0.484\r\n\r\n\r\nreal 0m1.547s\r\nuser 0m0.938s\r\nsys 0m0.089s\r\n$\r\n```\r\n\r\n\n", "before_files": [{"content": "import json\nimport logging\nimport os\nfrom json import JSONDecodeError\nfrom time import sleep\n\nimport boto3\nimport dpath.util\nimport urllib3\nfrom botocore.exceptions import ClientError\nfrom urllib3.exceptions import HTTPError\n\nfrom checkov.common.bridgecrew.platform_errors import BridgecrewAuthError\nfrom checkov.common.models.consts import SUPPORTED_FILE_EXTENSIONS\nfrom .wrapper import reduce_scan_reports, persist_checks_results, enrich_and_persist_checks_metadata\n\nUNAUTHORIZED_MESSAGE = 'User is not authorized to access this resource with an explicit deny'\n\nDEFAULT_REGION = \"us-west-2\"\nhttp = urllib3.PoolManager()\n\n\nclass BcPlatformIntegration(object):\n def __init__(self):\n self.bc_api_key = None\n self.s3_client = None\n self.bucket = None\n self.credentials = None\n self.repo_path = None\n self.repo_id = None\n self.timestamp = None\n self.scan_reports = []\n self.bc_api_url = os.getenv('BC_API_URL', \"https://www.bridgecrew.cloud/api/v1\")\n self.bc_source = os.getenv('BC_SOURCE', \"cli\")\n self.integrations_api_url = f\"{self.bc_api_url}/integrations/types/checkov\"\n self.guidelines_api_url = f\"{self.bc_api_url}/guidelines\"\n\n def setup_bridgecrew_credentials(self, bc_api_key, repo_id):\n \"\"\"\n Setup credentials against Bridgecrew's platform.\n :param repo_id: Identity string of the scanned repository, of the form <repo_owner>/<repo_name>\n :param bc_api_key: Bridgecrew issued API key\n \"\"\"\n self.bc_api_key = bc_api_key\n self.repo_id = repo_id\n try:\n request = http.request(\"POST\", self.integrations_api_url, body=json.dumps({\"repoId\": repo_id}),\n headers={\"Authorization\": bc_api_key, \"Content-Type\": \"application/json\"})\n response = json.loads(request.data.decode(\"utf8\"))\n if 'Message' in response:\n if response['Message'] == UNAUTHORIZED_MESSAGE:\n raise BridgecrewAuthError()\n repo_full_path = response[\"path\"]\n self.bucket, self.repo_path = repo_full_path.split(\"/\", 1)\n self.timestamp = self.repo_path.split(\"/\")[-1]\n self.credentials = response[\"creds\"]\n self.s3_client = boto3.client(\"s3\",\n aws_access_key_id=self.credentials[\"AccessKeyId\"],\n aws_secret_access_key=self.credentials[\"SecretAccessKey\"],\n aws_session_token=self.credentials[\"SessionToken\"],\n region_name=DEFAULT_REGION\n )\n sleep(10) # Wait for the policy to update\n except HTTPError as e:\n logging.error(f\"Failed to get customer assumed role\\n{e}\")\n raise e\n except ClientError as e:\n logging.error(f\"Failed to initiate client with credentials {self.credentials}\\n{e}\")\n raise e\n except JSONDecodeError as e:\n logging.error(f\"Response of {self.integrations_api_url} is not a valid JSON\\n{e}\")\n raise e\n\n def is_integration_configured(self):\n \"\"\"\n Checks if Bridgecrew integration is fully configured.\n :return: True if the integration is configured, False otherwise\n \"\"\"\n return all([self.repo_path, self.credentials, self.s3_client])\n\n def persist_repository(self, root_dir):\n \"\"\"\n Persist the repository found on root_dir path to Bridgecrew's platform\n :param root_dir: Absolute path of the directory containing the repository root level\n \"\"\"\n for root_path, d_names, f_names in os.walk(root_dir):\n for file_path in f_names:\n _, file_extension = os.path.splitext(file_path)\n if file_extension in SUPPORTED_FILE_EXTENSIONS:\n full_file_path = os.path.join(root_path, file_path)\n relative_file_path = os.path.relpath(full_file_path, root_dir)\n self._persist_file(full_file_path, relative_file_path)\n\n def persist_scan_results(self, scan_reports):\n \"\"\"\n Persist checkov's scan result into bridgecrew's platform.\n :param scan_reports: List of checkov scan reports\n \"\"\"\n self.scan_reports = scan_reports\n reduced_scan_reports = reduce_scan_reports(scan_reports)\n checks_metadata_paths = enrich_and_persist_checks_metadata(scan_reports, self.s3_client, self.bucket,\n self.repo_path)\n dpath.util.merge(reduced_scan_reports, checks_metadata_paths)\n persist_checks_results(reduced_scan_reports, self.s3_client, self.bucket, self.repo_path)\n\n def commit_repository(self, branch):\n \"\"\"\n :param branch: branch to be persisted\n Finalize the repository's scanning in bridgecrew's platform.\n \"\"\"\n request = None\n try:\n request = http.request(\"PUT\", f\"{self.integrations_api_url}?source={self.bc_source}\",\n body=json.dumps({\"path\": self.repo_path, \"branch\": branch}),\n headers={\"Authorization\": self.bc_api_key, \"Content-Type\": \"application/json\"})\n response = json.loads(request.data.decode(\"utf8\"))\n except HTTPError as e:\n logging.error(f\"Failed to commit repository {self.repo_path}\\n{e}\")\n raise e\n except JSONDecodeError as e:\n logging.error(f\"Response of {self.integrations_api_url} is not a valid JSON\\n{e}\")\n raise e\n finally:\n if request.status == 201 and response[\"result\"] == \"Success\":\n logging.info(f\"Finalize repository {self.repo_id} in bridgecrew's platform\")\n else:\n raise Exception(f\"Failed to finalize repository {self.repo_id} in bridgecrew's platform\\n{response}\")\n\n def _persist_file(self, full_file_path, relative_file_path):\n tries = 4\n curr_try = 0\n file_object_key = os.path.join(self.repo_path, relative_file_path)\n while curr_try < tries:\n try:\n self.s3_client.upload_file(full_file_path, self.bucket, file_object_key)\n return\n except ClientError as e:\n if e.response.get('Error', {}).get('Code') == 'AccessDenied':\n sleep(5)\n curr_try += 1\n else:\n logging.error(f\"failed to persist file {full_file_path} into S3 bucket {self.bucket}\\n{e}\")\n raise e\n except Exception as e:\n logging.error(f\"failed to persist file {full_file_path} into S3 bucket {self.bucket}\\n{e}\")\n raise e\n if curr_try == tries:\n logging.error(f\"failed to persist file {full_file_path} into S3 bucket {self.bucket} - gut AccessDenied {tries} times\")\n\n def get_guidelines(self) -> dict:\n try:\n request = http.request(\"GET\", self.guidelines_api_url)\n response = json.loads(request.data.decode(\"utf8\"))\n guidelines_map = response[\"guidelines\"]\n logging.debug(f\"Got guidelines form Bridgecrew BE\")\n return guidelines_map\n except Exception as e:\n logging.debug(f\"Failed to get the guidelines from {self.guidelines_api_url}, error:\\n{e}\")\n return {}\n", "path": "checkov/common/bridgecrew/platform_integration.py"}]}
| 2,741 | 135 |
gh_patches_debug_27300
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-464
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Date Train-Test Leakage displays datetimes as object, should be human readable

output of DateTrainTestLeakage is currently `datetime.datetime(...)` should be `1992/02/13 13:23....` instead
</issue>
<code>
[start of deepchecks/utils/strings.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """String functions."""
12 import random
13 from string import ascii_uppercase, digits
14 import typing as t
15 import re
16 from collections import defaultdict
17 from decimal import Decimal
18 from copy import copy
19
20 import pandas as pd
21 from pandas.core.dtypes.common import is_numeric_dtype
22
23 from deepchecks.utils.typing import Hashable
24 from deepchecks.utils.validation import ensure_hashable_or_mutable_sequence
25
26
27 __all__ = [
28 'string_baseform',
29 'get_base_form_to_variants_dict',
30 'split_camel_case',
31 'split_and_keep',
32 'split_by_order',
33 'is_string_column',
34 'format_percent',
35 'format_number',
36 'format_list',
37 'format_columns_for_condition',
38 'get_random_string'
39 ]
40
41
42 def get_random_string(n: int = 5):
43 """Return random string at the given size.
44
45 Args:
46 n (int): the size of the string to return.
47 Returns:
48 (str): a random string
49 """
50 return ''.join(random.choices(ascii_uppercase + digits, k=n))
51
52
53 def string_baseform(string: str) -> str:
54 """Remove special characters from given string, leaving only a-z, A-Z, 0-9 characters.
55
56 Args:
57 string (str): string to remove special characters from
58
59 Returns:
60 (str): string without special characters
61 """
62 if not isinstance(string, str):
63 return string
64 return re.sub('[^A-Za-z0-9]+', '', string).lower()
65
66
67 def is_string_column(column: pd.Series) -> bool:
68 """Determine whether a pandas series is string type."""
69 if is_numeric_dtype(column):
70 return False
71 try:
72 pd.to_numeric(column)
73 return False
74 except ValueError:
75 return True
76 # Non string objects like pd.Timestamp results in TypeError
77 except TypeError:
78 return False
79
80
81 def split_camel_case(string: str) -> str:
82 """Split string where there are capital letters and enter space instead.
83
84 Args:
85 string (str): string to change
86 """
87 return ' '.join(re.findall('[A-Z][^A-Z]*', string))
88
89
90 def get_base_form_to_variants_dict(uniques: t.Iterable[str]) -> t.Dict[str, t.Set[str]]:
91 """Create dict of base-form of the uniques to their values.
92
93 function gets a set of strings, and returns a dictionary of shape Dict[str, Set]
94 the key being the "base_form" (a clean version of the string),
95 and the value being a set of all existing original values.
96 This is done using the StringCategory class.
97 """
98 base_form_to_variants = defaultdict(set)
99 for item in uniques:
100 base_form_to_variants[string_baseform(item)].add(item)
101 return base_form_to_variants
102
103
104 def str_min_find(s: str, substr_list: t.Iterable[str]) -> t.Tuple[int, str]:
105 """
106 Find the minimal first occurence of a substring in a string, and return both the index and substring.
107
108 Args:
109 s (str): The string in which we look for substrings
110 substr_list: list of substrings to find
111
112 Returns:
113 min_find (int): index of minimal first occurence of substring
114 min_substr (str): the substring that occures in said index
115
116 """
117 min_find = -1
118 min_substr = ''
119 for substr in substr_list:
120 first_find = s.find(substr)
121 if first_find != -1 and (first_find < min_find or min_find == -1):
122 min_find = first_find
123 min_substr = substr
124 return min_find, min_substr
125
126
127 def split_and_keep(s: str, separators: t.Union[str, t.Iterable[str]]) -> t.List[str]:
128 """
129 Split string by a another substring into a list. Like str.split(), but keeps the separator occurrences in the list.
130
131 Args:
132 s (str): the string to split
133 separators (str): the substring to split by
134
135 Returns:
136 List[str]: list of substrings, including the separator occurrences in string
137
138 """
139 if isinstance(separators, str):
140 separators = [separators]
141
142 split_s = []
143 while len(s) != 0:
144 i, substr = str_min_find(s=s, substr_list=separators)
145 if i == 0:
146 split_s.append(substr)
147 s = s[len(substr):]
148 elif i == -1:
149 split_s.append(s)
150 break
151 else:
152 pre, _ = s.split(substr, 1)
153 split_s.append(pre)
154 s = s[len(pre):]
155 return split_s
156
157
158 def split_by_order(s: str, separators: t.Iterable[str], keep: bool = True) -> t.List[str]:
159 """
160 Split string by a a list of substrings, each used once as a separator.
161
162 Args:
163 s (str): the string to split
164 separators (List[str]): list of substrings to split by
165 keep (bool): whether to keep the separators in list as well. Default is True.
166
167 Returns:
168 List[str]: list of substrings
169 """
170 split_s = []
171 separators = list(copy(separators))
172 while len(s) != 0:
173 if len(separators) > 0:
174 sep = separators[0]
175 if s.find(sep) == 0:
176 if keep is True:
177 split_s.append(sep)
178 s = s[len(sep):]
179 separators.pop(0)
180 else:
181 pre, _ = s.split(sep, 1)
182 split_s.append(pre)
183 s = s[len(pre):]
184 else:
185 split_s.append(s)
186 break
187 return split_s
188
189
190 def format_percent(ratio: float, floating_point: int = 2) -> str:
191 """Format percent for elegant display.
192
193 Args:
194 ratio (float): Ratio to be displayed as percent
195 floating_point (int): Number of floating points to display
196
197 Returns:
198 String of ratio as percent
199 """
200 result: str
201 if ratio < 0:
202 ratio = -ratio
203 prefix = '-'
204 else:
205 prefix = ''
206
207 if int(ratio) == ratio:
208 result = f'{int(ratio) * 100}%'
209 elif ratio > 1:
210 result = f'{ratio * 100:.{floating_point}f}%'
211 elif ratio < 10**(-(2+floating_point)):
212 result = f'{Decimal(ratio * 100):.{floating_point}E}%'
213 elif ratio > (1-10**(-(2+floating_point))):
214 if floating_point > 0:
215 result = f'99.{"".join(["9"]*floating_point)}%'
216 else:
217 result = '99%'
218 else:
219 result = f'{ratio:.{floating_point}%}'
220
221 return prefix + result
222
223
224 def format_number(x, floating_point: int = 2) -> str:
225 """Format number for elegant display.
226
227 Args:
228 x (): Number to be displayed
229 floating_point (int): Number of floating points to display
230
231 Returns:
232 String of beautified number
233 """
234 def add_commas(x):
235 return f'{x:,}' # yes this actually formats the number 1000 to "1,000"
236
237 # 0 is lost in the next if case, so we have it here as a special use-case
238 if x == 0:
239 return '0'
240
241 # If x is a very small number, that would be rounded to 0, we would prefer to return it as the format 1.0E-3.
242 if abs(x) < 10 ** (-floating_point):
243 return f'{Decimal(x):.{floating_point}E}'
244
245 # If x is an integer, or if x when rounded is an integer (e.g. 1.999999), then return as integer:
246 if round(x) == round(x, floating_point):
247 return add_commas(round(x))
248
249 # If not, return as a float, but don't print unnecessary zeros at end:
250 else:
251 ret_x = round(x, floating_point)
252 return add_commas(ret_x).rstrip('0')
253
254
255 def format_list(l: t.List[Hashable], max_elements_to_show: int = 10, max_string_length: int = 40) -> str:
256 """Format columns properties for display in condition name.
257
258 Args:
259 l (List): list to print.
260 max_elements_to_show (int): max elemnts to print before terminating.
261 max_string_length (int): max string length before terminating.
262
263 Return:
264 String of beautified list
265 """
266 string_list = [str(i) for i in l[:max_elements_to_show]]
267 output = ', '.join(string_list)
268
269 if len(output) > max_string_length:
270 return output[:max_string_length] + '...'
271
272 if len(l) > max_elements_to_show:
273 return output + ', ...'
274
275 return output
276
277
278 def format_columns_for_condition(
279 columns: t.Union[Hashable, t.List[Hashable], None] = None,
280 ignore_columns: t.Union[Hashable, t.List[Hashable], None] = None
281 ) -> str:
282 """Format columns properties for display in condition name.
283
284 Args:
285 columns (Union[Hashable, List[Hashable], None]):
286 columns to include into resulting string
287 ignore_columns (Union[Hashable, List[Hashable], None]):
288 columns to not include into resulting string
289
290 Returns: formatted string of columns
291 """
292 if columns is not None:
293 columns = ensure_hashable_or_mutable_sequence(columns)
294 return f'columns: {",".join(map(str, columns))}'
295 elif ignore_columns is not None:
296 ignore_columns = ensure_hashable_or_mutable_sequence(ignore_columns)
297 return f'all columns ignoring: {",".join(map(str, ignore_columns))}'
298 else:
299 return 'all columns'
300
[end of deepchecks/utils/strings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/deepchecks/utils/strings.py b/deepchecks/utils/strings.py
--- a/deepchecks/utils/strings.py
+++ b/deepchecks/utils/strings.py
@@ -10,9 +10,10 @@
#
"""String functions."""
import random
-from string import ascii_uppercase, digits
import typing as t
import re
+from datetime import datetime
+from string import ascii_uppercase, digits
from collections import defaultdict
from decimal import Decimal
from copy import copy
@@ -35,7 +36,8 @@
'format_number',
'format_list',
'format_columns_for_condition',
- 'get_random_string'
+ 'get_random_string',
+ 'format_datetime'
]
@@ -297,3 +299,27 @@
return f'all columns ignoring: {",".join(map(str, ignore_columns))}'
else:
return 'all columns'
+
+
+def format_datetime(
+ value,
+ datetime_format: str = '%Y/%m/%d %H:%M:%S.%f %Z%z' # # 1992/02/13 13:23:00 UTC+0000
+) -> str:
+ """Format datetime object or timestamp value.
+
+ Args:
+ value (Union[datetime, int, float]): datetime (timestamp) to format
+ format (str): format to use
+
+ Returns:
+ str: string representation of the provided value
+
+ Raises:
+ ValueError: if unexpected value type was passed to the function
+ """
+ if isinstance(value, datetime):
+ return value.strftime(datetime_format)
+ elif isinstance(value, (int, float)):
+ return datetime.fromtimestamp(value).strftime(datetime_format)
+ else:
+ raise ValueError(f'Unsupported value type - {type(value).__name__}')
|
{"golden_diff": "diff --git a/deepchecks/utils/strings.py b/deepchecks/utils/strings.py\n--- a/deepchecks/utils/strings.py\n+++ b/deepchecks/utils/strings.py\n@@ -10,9 +10,10 @@\n #\n \"\"\"String functions.\"\"\"\n import random\n-from string import ascii_uppercase, digits\n import typing as t\n import re\n+from datetime import datetime\n+from string import ascii_uppercase, digits\n from collections import defaultdict\n from decimal import Decimal\n from copy import copy\n@@ -35,7 +36,8 @@\n 'format_number',\n 'format_list',\n 'format_columns_for_condition',\n- 'get_random_string'\n+ 'get_random_string',\n+ 'format_datetime'\n ]\n \n \n@@ -297,3 +299,27 @@\n return f'all columns ignoring: {\",\".join(map(str, ignore_columns))}'\n else:\n return 'all columns'\n+\n+\n+def format_datetime(\n+ value,\n+ datetime_format: str = '%Y/%m/%d %H:%M:%S.%f %Z%z' # # 1992/02/13 13:23:00 UTC+0000\n+) -> str:\n+ \"\"\"Format datetime object or timestamp value.\n+\n+ Args:\n+ value (Union[datetime, int, float]): datetime (timestamp) to format\n+ format (str): format to use\n+\n+ Returns:\n+ str: string representation of the provided value\n+\n+ Raises:\n+ ValueError: if unexpected value type was passed to the function\n+ \"\"\"\n+ if isinstance(value, datetime):\n+ return value.strftime(datetime_format)\n+ elif isinstance(value, (int, float)):\n+ return datetime.fromtimestamp(value).strftime(datetime_format)\n+ else:\n+ raise ValueError(f'Unsupported value type - {type(value).__name__}')\n", "issue": "[BUG] Date Train-Test Leakage displays datetimes as object, should be human readable\n\r\n\r\noutput of DateTrainTestLeakage is currently `datetime.datetime(...)` should be `1992/02/13 13:23....` instead\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"String functions.\"\"\"\nimport random\nfrom string import ascii_uppercase, digits\nimport typing as t\nimport re\nfrom collections import defaultdict\nfrom decimal import Decimal\nfrom copy import copy\n\nimport pandas as pd\nfrom pandas.core.dtypes.common import is_numeric_dtype\n\nfrom deepchecks.utils.typing import Hashable\nfrom deepchecks.utils.validation import ensure_hashable_or_mutable_sequence\n\n\n__all__ = [\n 'string_baseform',\n 'get_base_form_to_variants_dict',\n 'split_camel_case',\n 'split_and_keep',\n 'split_by_order',\n 'is_string_column',\n 'format_percent',\n 'format_number',\n 'format_list',\n 'format_columns_for_condition',\n 'get_random_string'\n]\n\n\ndef get_random_string(n: int = 5):\n \"\"\"Return random string at the given size.\n\n Args:\n n (int): the size of the string to return.\n Returns:\n (str): a random string\n \"\"\"\n return ''.join(random.choices(ascii_uppercase + digits, k=n))\n\n\ndef string_baseform(string: str) -> str:\n \"\"\"Remove special characters from given string, leaving only a-z, A-Z, 0-9 characters.\n\n Args:\n string (str): string to remove special characters from\n\n Returns:\n (str): string without special characters\n \"\"\"\n if not isinstance(string, str):\n return string\n return re.sub('[^A-Za-z0-9]+', '', string).lower()\n\n\ndef is_string_column(column: pd.Series) -> bool:\n \"\"\"Determine whether a pandas series is string type.\"\"\"\n if is_numeric_dtype(column):\n return False\n try:\n pd.to_numeric(column)\n return False\n except ValueError:\n return True\n # Non string objects like pd.Timestamp results in TypeError\n except TypeError:\n return False\n\n\ndef split_camel_case(string: str) -> str:\n \"\"\"Split string where there are capital letters and enter space instead.\n\n Args:\n string (str): string to change\n \"\"\"\n return ' '.join(re.findall('[A-Z][^A-Z]*', string))\n\n\ndef get_base_form_to_variants_dict(uniques: t.Iterable[str]) -> t.Dict[str, t.Set[str]]:\n \"\"\"Create dict of base-form of the uniques to their values.\n\n function gets a set of strings, and returns a dictionary of shape Dict[str, Set]\n the key being the \"base_form\" (a clean version of the string),\n and the value being a set of all existing original values.\n This is done using the StringCategory class.\n \"\"\"\n base_form_to_variants = defaultdict(set)\n for item in uniques:\n base_form_to_variants[string_baseform(item)].add(item)\n return base_form_to_variants\n\n\ndef str_min_find(s: str, substr_list: t.Iterable[str]) -> t.Tuple[int, str]:\n \"\"\"\n Find the minimal first occurence of a substring in a string, and return both the index and substring.\n\n Args:\n s (str): The string in which we look for substrings\n substr_list: list of substrings to find\n\n Returns:\n min_find (int): index of minimal first occurence of substring\n min_substr (str): the substring that occures in said index\n\n \"\"\"\n min_find = -1\n min_substr = ''\n for substr in substr_list:\n first_find = s.find(substr)\n if first_find != -1 and (first_find < min_find or min_find == -1):\n min_find = first_find\n min_substr = substr\n return min_find, min_substr\n\n\ndef split_and_keep(s: str, separators: t.Union[str, t.Iterable[str]]) -> t.List[str]:\n \"\"\"\n Split string by a another substring into a list. Like str.split(), but keeps the separator occurrences in the list.\n\n Args:\n s (str): the string to split\n separators (str): the substring to split by\n\n Returns:\n List[str]: list of substrings, including the separator occurrences in string\n\n \"\"\"\n if isinstance(separators, str):\n separators = [separators]\n\n split_s = []\n while len(s) != 0:\n i, substr = str_min_find(s=s, substr_list=separators)\n if i == 0:\n split_s.append(substr)\n s = s[len(substr):]\n elif i == -1:\n split_s.append(s)\n break\n else:\n pre, _ = s.split(substr, 1)\n split_s.append(pre)\n s = s[len(pre):]\n return split_s\n\n\ndef split_by_order(s: str, separators: t.Iterable[str], keep: bool = True) -> t.List[str]:\n \"\"\"\n Split string by a a list of substrings, each used once as a separator.\n\n Args:\n s (str): the string to split\n separators (List[str]): list of substrings to split by\n keep (bool): whether to keep the separators in list as well. Default is True.\n\n Returns:\n List[str]: list of substrings\n \"\"\"\n split_s = []\n separators = list(copy(separators))\n while len(s) != 0:\n if len(separators) > 0:\n sep = separators[0]\n if s.find(sep) == 0:\n if keep is True:\n split_s.append(sep)\n s = s[len(sep):]\n separators.pop(0)\n else:\n pre, _ = s.split(sep, 1)\n split_s.append(pre)\n s = s[len(pre):]\n else:\n split_s.append(s)\n break\n return split_s\n\n\ndef format_percent(ratio: float, floating_point: int = 2) -> str:\n \"\"\"Format percent for elegant display.\n\n Args:\n ratio (float): Ratio to be displayed as percent\n floating_point (int): Number of floating points to display\n\n Returns:\n String of ratio as percent\n \"\"\"\n result: str\n if ratio < 0:\n ratio = -ratio\n prefix = '-'\n else:\n prefix = ''\n\n if int(ratio) == ratio:\n result = f'{int(ratio) * 100}%'\n elif ratio > 1:\n result = f'{ratio * 100:.{floating_point}f}%'\n elif ratio < 10**(-(2+floating_point)):\n result = f'{Decimal(ratio * 100):.{floating_point}E}%'\n elif ratio > (1-10**(-(2+floating_point))):\n if floating_point > 0:\n result = f'99.{\"\".join([\"9\"]*floating_point)}%'\n else:\n result = '99%'\n else:\n result = f'{ratio:.{floating_point}%}'\n\n return prefix + result\n\n\ndef format_number(x, floating_point: int = 2) -> str:\n \"\"\"Format number for elegant display.\n\n Args:\n x (): Number to be displayed\n floating_point (int): Number of floating points to display\n\n Returns:\n String of beautified number\n \"\"\"\n def add_commas(x):\n return f'{x:,}' # yes this actually formats the number 1000 to \"1,000\"\n\n # 0 is lost in the next if case, so we have it here as a special use-case\n if x == 0:\n return '0'\n\n # If x is a very small number, that would be rounded to 0, we would prefer to return it as the format 1.0E-3.\n if abs(x) < 10 ** (-floating_point):\n return f'{Decimal(x):.{floating_point}E}'\n\n # If x is an integer, or if x when rounded is an integer (e.g. 1.999999), then return as integer:\n if round(x) == round(x, floating_point):\n return add_commas(round(x))\n\n # If not, return as a float, but don't print unnecessary zeros at end:\n else:\n ret_x = round(x, floating_point)\n return add_commas(ret_x).rstrip('0')\n\n\ndef format_list(l: t.List[Hashable], max_elements_to_show: int = 10, max_string_length: int = 40) -> str:\n \"\"\"Format columns properties for display in condition name.\n\n Args:\n l (List): list to print.\n max_elements_to_show (int): max elemnts to print before terminating.\n max_string_length (int): max string length before terminating.\n\n Return:\n String of beautified list\n \"\"\"\n string_list = [str(i) for i in l[:max_elements_to_show]]\n output = ', '.join(string_list)\n\n if len(output) > max_string_length:\n return output[:max_string_length] + '...'\n\n if len(l) > max_elements_to_show:\n return output + ', ...'\n\n return output\n\n\ndef format_columns_for_condition(\n columns: t.Union[Hashable, t.List[Hashable], None] = None,\n ignore_columns: t.Union[Hashable, t.List[Hashable], None] = None\n) -> str:\n \"\"\"Format columns properties for display in condition name.\n\n Args:\n columns (Union[Hashable, List[Hashable], None]):\n columns to include into resulting string\n ignore_columns (Union[Hashable, List[Hashable], None]):\n columns to not include into resulting string\n\n Returns: formatted string of columns\n \"\"\"\n if columns is not None:\n columns = ensure_hashable_or_mutable_sequence(columns)\n return f'columns: {\",\".join(map(str, columns))}'\n elif ignore_columns is not None:\n ignore_columns = ensure_hashable_or_mutable_sequence(ignore_columns)\n return f'all columns ignoring: {\",\".join(map(str, ignore_columns))}'\n else:\n return 'all columns'\n", "path": "deepchecks/utils/strings.py"}]}
| 3,745 | 415 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.