problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
18.9k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
465
23.6k
num_tokens_prompt
int64
556
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_18039
rasdani/github-patches
git_diff
TOMToolkit__tom_base-99
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> "Since Time" filter in MARS query Using the MARS query, the results do not depend on the value chosen for the "Since Time" filter. </issue> <code> [start of tom_alerts/brokers/mars.py] 1 import requests 2 import json 3 from requests.exceptions import HTTPError 4 from urllib.parse import urlencode 5 from dateutil.parser import parse 6 from django import forms 7 from crispy_forms.layout import Layout, Div, Fieldset, HTML 8 from astropy.time import Time, TimezoneInfo 9 10 from tom_alerts.alerts import GenericQueryForm, GenericAlert 11 from tom_targets.models import Target, TargetExtra 12 from tom_dataproducts.models import ReducedDatum 13 14 MARS_URL = 'https://mars.lco.global' 15 filters = {0: 'g', 1: 'r', 2: 'i'} 16 17 18 class MARSQueryForm(GenericQueryForm): 19 time__gt = forms.CharField( 20 required=False, 21 label='Time Lower', 22 widget=forms.TextInput(attrs={'type': 'date'}) 23 ) 24 time__lt = forms.CharField( 25 required=False, 26 label='Time Upper', 27 widget=forms.TextInput(attrs={'type': 'date'}) 28 ) 29 since__time = forms.IntegerField( 30 required=False, 31 label='Since Time', 32 help_text='Alerts younger than this number of seconds' 33 ) 34 jd__gt = forms.FloatField(required=False, label='JD Lower') 35 jd__lt = forms.FloatField(required=False, label='JD Upper') 36 filter = forms.CharField(required=False) 37 cone = forms.CharField( 38 required=False, 39 label='Cone Search', 40 help_text='RA,Dec,radius in degrees' 41 ) 42 objectcone = forms.CharField( 43 required=False, 44 label='Object Cone Search', 45 help_text='Object name,radius in degrees' 46 ) 47 objectidps = forms.CharField( 48 required=False, 49 label='Nearby Objects', 50 help_text='Id from PS1 catalog' 51 ) 52 ra__gt = forms.FloatField(required=False, label='RA Lower') 53 ra__lt = forms.FloatField(required=False, label='RA Upper') 54 dec__gt = forms.FloatField(required=False, label='Dec Lower') 55 dec__lt = forms.FloatField(required=False, label='Dec Upper') 56 l__gt = forms.FloatField(required=False, label='l Lower') 57 l__lt = forms.FloatField(required=False, label='l Upper') 58 b__gt = forms.FloatField(required=False, label='b Lower') 59 b__lt = forms.FloatField(required=False, label='b Upper') 60 magpsf__gte = forms.FloatField(required=False, label='Magpsf Lower') 61 magpsf__lte = forms.FloatField(required=False, label='Magpsf Upper') 62 sigmapsf__lte = forms.FloatField(required=False, label='Sigmapsf Upper') 63 magap__gte = forms.FloatField(required=False, label='Magap Lower') 64 magap__lte = forms.FloatField(required=False, label='Magap Upper') 65 distnr__gte = forms.FloatField(required=False, label='Distnr Lower') 66 distnr__lte = forms.FloatField(required=False, label='Distnr Upper') 67 deltamaglatest__gte = forms.FloatField( 68 required=False, 69 label='Delta Mag Lower' 70 ) 71 deltamaglatest__lte = forms.FloatField( 72 required=False, 73 label='Delta Mag Upper' 74 ) 75 deltamagref__gte = forms.FloatField( 76 required=False, 77 label='Delta Mag Ref Lower' 78 ) 79 deltamagref__lte = forms.FloatField( 80 required=False, 81 label='Delta Mag Ref Upper' 82 ) 83 rb__gte = forms.FloatField(required=False, label='Real/Bogus Lower') 84 classtar__gte = forms.FloatField(required=False, label='Classtar Lower') 85 classtar__lte = forms.FloatField(required=False, label='Classtar Upper') 86 fwhm__lte = forms.FloatField(required=False, label='FWHM Upper') 87 88 def __init__(self, *args, **kwargs): 89 super().__init__(*args, **kwargs) 90 self.helper.layout = Layout( 91 HTML(''' 92 <p> 93 Please see <a href="https://mars.lco.global/help">MARS help</a> 94 for a detailed description of available filters. 95 </p> 96 '''), 97 self.common_layout, 98 Fieldset( 99 'Time based filters', 100 'since__time', 101 Div( 102 Div( 103 'time__gt', 104 'jd__gt', 105 css_class='col', 106 ), 107 Div( 108 'time__lt', 109 'jd__lt', 110 css_class='col', 111 ), 112 css_class="form-row", 113 ) 114 ), 115 Fieldset( 116 'Location based filters', 117 'cone', 118 'objectcone', 119 'objectidps', 120 Div( 121 Div( 122 'ra__gt', 123 'dec__gt', 124 'l__gt', 125 'b__gt', 126 css_class='col', 127 ), 128 Div( 129 'ra__lt', 130 'dec__lt', 131 'l__lt', 132 'b__lt', 133 css_class='col', 134 ), 135 css_class="form-row", 136 ) 137 ), 138 Fieldset( 139 'Other Filters', 140 Div( 141 Div( 142 'magpsf__gte', 143 'magap__gte', 144 'distnr__gte', 145 'deltamaglatest__gte', 146 'deltamagref__gte', 147 'classtar__gte', 148 css_class='col' 149 ), 150 Div( 151 'magpsf__lte', 152 'magap__lte', 153 'distnr__lte', 154 'deltamaglatest__lte', 155 'deltamagref__lte', 156 'classtar__lte', 157 css_class='col' 158 ), 159 css_class='form-row', 160 ) 161 ), 162 'filter', 163 'sigmapsf__lte', 164 'rb__gte', 165 'fwhm__lte' 166 ) 167 168 169 class MARSBroker(object): 170 name = 'MARS' 171 form = MARSQueryForm 172 173 def _clean_parameters(self, parameters): 174 return {k: v for k, v in parameters.items() if v and k != 'page'} 175 176 def fetch_alerts(self, parameters): 177 if not parameters.get('page'): 178 parameters['page'] = 1 179 args = urlencode(self._clean_parameters(parameters)) 180 url = '{0}/?page={1}&format=json&{2}'.format( 181 MARS_URL, 182 parameters['page'], 183 args 184 ) 185 alerts = [] 186 response = requests.get(url) 187 response.raise_for_status() 188 parsed = response.json() 189 alerts = parsed['results'] 190 if parsed['has_next'] and parameters['page'] < 10: 191 parameters['page'] += 1 192 alerts += self.fetch_alerts(parameters) 193 return alerts 194 195 def fetch_alert(self, id): 196 url = f'{MARS_URL}/{id}/?format=json' 197 response = requests.get(url) 198 response.raise_for_status() 199 parsed = response.json() 200 return parsed 201 202 def process_reduced_data(self, target, alert=None): 203 if not alert: 204 try: 205 target_datum = ReducedDatum.objects.filter( 206 target=target, 207 data_type='PHOTOMETRY', 208 source_name=self.name).first() 209 if not target_datum: 210 return 211 alert = self.fetch_alert(target_datum.source_location) 212 except HTTPError: 213 raise Exception('Unable to retrieve alert information from broker') 214 for prv_candidate in alert.get('prv_candidate'): 215 if all([key in prv_candidate['candidate'] for key in ['jd', 'magpsf', 'fid']]): 216 jd = Time(prv_candidate['candidate']['jd'], format='jd', scale='utc') 217 jd.to_datetime(timezone=TimezoneInfo()) 218 value = { 219 'magnitude': prv_candidate['candidate']['magpsf'], 220 'filter': filters[prv_candidate['candidate']['fid']] 221 } 222 rd, created = ReducedDatum.objects.get_or_create( 223 timestamp=jd.to_datetime(timezone=TimezoneInfo()), 224 value=json.dumps(value), 225 source_name=self.name, 226 source_location=alert['lco_id'], 227 data_type='photometry', 228 target=target) 229 rd.save() 230 231 def to_target(self, alert): 232 alert_copy = alert.copy() 233 target = Target.objects.create( 234 identifier=alert_copy['objectId'], 235 name=alert_copy['objectId'], 236 type='SIDEREAL', 237 ra=alert_copy['candidate'].pop('ra'), 238 dec=alert_copy['candidate'].pop('dec'), 239 galactic_lng=alert_copy['candidate'].pop('l'), 240 galactic_lat=alert_copy['candidate'].pop('b'), 241 ) 242 for k, v in alert_copy['candidate'].items(): 243 if v is not None: 244 TargetExtra.objects.create(target=target, key=k, value=v) 245 246 return target 247 248 def to_generic_alert(self, alert): 249 timestamp = parse(alert['candidate']['wall_time']) 250 url = '{0}/{1}/'.format(MARS_URL, alert['lco_id']) 251 252 return GenericAlert( 253 timestamp=timestamp, 254 url=url, 255 id=alert['lco_id'], 256 name=alert['objectId'], 257 ra=alert['candidate']['ra'], 258 dec=alert['candidate']['dec'], 259 mag=alert['candidate']['magpsf'], 260 score=alert['candidate']['rb'] 261 ) 262 [end of tom_alerts/brokers/mars.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tom_alerts/brokers/mars.py b/tom_alerts/brokers/mars.py --- a/tom_alerts/brokers/mars.py +++ b/tom_alerts/brokers/mars.py @@ -26,9 +26,9 @@ label='Time Upper', widget=forms.TextInput(attrs={'type': 'date'}) ) - since__time = forms.IntegerField( + time__since = forms.IntegerField( required=False, - label='Since Time', + label='Time Since', help_text='Alerts younger than this number of seconds' ) jd__gt = forms.FloatField(required=False, label='JD Lower') @@ -97,7 +97,7 @@ self.common_layout, Fieldset( 'Time based filters', - 'since__time', + 'time__since', Div( Div( 'time__gt',
{"golden_diff": "diff --git a/tom_alerts/brokers/mars.py b/tom_alerts/brokers/mars.py\n--- a/tom_alerts/brokers/mars.py\n+++ b/tom_alerts/brokers/mars.py\n@@ -26,9 +26,9 @@\n label='Time Upper',\n widget=forms.TextInput(attrs={'type': 'date'})\n )\n- since__time = forms.IntegerField(\n+ time__since = forms.IntegerField(\n required=False,\n- label='Since Time',\n+ label='Time Since',\n help_text='Alerts younger than this number of seconds'\n )\n jd__gt = forms.FloatField(required=False, label='JD Lower')\n@@ -97,7 +97,7 @@\n self.common_layout,\n Fieldset(\n 'Time based filters',\n- 'since__time',\n+ 'time__since',\n Div(\n Div(\n 'time__gt',\n", "issue": "\"Since Time\" filter in MARS query\nUsing the MARS query, the results do not depend on the value chosen for the \"Since Time\" filter.\n", "before_files": [{"content": "import requests\nimport json\nfrom requests.exceptions import HTTPError\nfrom urllib.parse import urlencode\nfrom dateutil.parser import parse\nfrom django import forms\nfrom crispy_forms.layout import Layout, Div, Fieldset, HTML\nfrom astropy.time import Time, TimezoneInfo\n\nfrom tom_alerts.alerts import GenericQueryForm, GenericAlert\nfrom tom_targets.models import Target, TargetExtra\nfrom tom_dataproducts.models import ReducedDatum\n\nMARS_URL = 'https://mars.lco.global'\nfilters = {0: 'g', 1: 'r', 2: 'i'}\n\n\nclass MARSQueryForm(GenericQueryForm):\n time__gt = forms.CharField(\n required=False,\n label='Time Lower',\n widget=forms.TextInput(attrs={'type': 'date'})\n )\n time__lt = forms.CharField(\n required=False,\n label='Time Upper',\n widget=forms.TextInput(attrs={'type': 'date'})\n )\n since__time = forms.IntegerField(\n required=False,\n label='Since Time',\n help_text='Alerts younger than this number of seconds'\n )\n jd__gt = forms.FloatField(required=False, label='JD Lower')\n jd__lt = forms.FloatField(required=False, label='JD Upper')\n filter = forms.CharField(required=False)\n cone = forms.CharField(\n required=False,\n label='Cone Search',\n help_text='RA,Dec,radius in degrees'\n )\n objectcone = forms.CharField(\n required=False,\n label='Object Cone Search',\n help_text='Object name,radius in degrees'\n )\n objectidps = forms.CharField(\n required=False,\n label='Nearby Objects',\n help_text='Id from PS1 catalog'\n )\n ra__gt = forms.FloatField(required=False, label='RA Lower')\n ra__lt = forms.FloatField(required=False, label='RA Upper')\n dec__gt = forms.FloatField(required=False, label='Dec Lower')\n dec__lt = forms.FloatField(required=False, label='Dec Upper')\n l__gt = forms.FloatField(required=False, label='l Lower')\n l__lt = forms.FloatField(required=False, label='l Upper')\n b__gt = forms.FloatField(required=False, label='b Lower')\n b__lt = forms.FloatField(required=False, label='b Upper')\n magpsf__gte = forms.FloatField(required=False, label='Magpsf Lower')\n magpsf__lte = forms.FloatField(required=False, label='Magpsf Upper')\n sigmapsf__lte = forms.FloatField(required=False, label='Sigmapsf Upper')\n magap__gte = forms.FloatField(required=False, label='Magap Lower')\n magap__lte = forms.FloatField(required=False, label='Magap Upper')\n distnr__gte = forms.FloatField(required=False, label='Distnr Lower')\n distnr__lte = forms.FloatField(required=False, label='Distnr Upper')\n deltamaglatest__gte = forms.FloatField(\n required=False,\n label='Delta Mag Lower'\n )\n deltamaglatest__lte = forms.FloatField(\n required=False,\n label='Delta Mag Upper'\n )\n deltamagref__gte = forms.FloatField(\n required=False,\n label='Delta Mag Ref Lower'\n )\n deltamagref__lte = forms.FloatField(\n required=False,\n label='Delta Mag Ref Upper'\n )\n rb__gte = forms.FloatField(required=False, label='Real/Bogus Lower')\n classtar__gte = forms.FloatField(required=False, label='Classtar Lower')\n classtar__lte = forms.FloatField(required=False, label='Classtar Upper')\n fwhm__lte = forms.FloatField(required=False, label='FWHM Upper')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper.layout = Layout(\n HTML('''\n <p>\n Please see <a href=\"https://mars.lco.global/help\">MARS help</a>\n for a detailed description of available filters.\n </p>\n '''),\n self.common_layout,\n Fieldset(\n 'Time based filters',\n 'since__time',\n Div(\n Div(\n 'time__gt',\n 'jd__gt',\n css_class='col',\n ),\n Div(\n 'time__lt',\n 'jd__lt',\n css_class='col',\n ),\n css_class=\"form-row\",\n )\n ),\n Fieldset(\n 'Location based filters',\n 'cone',\n 'objectcone',\n 'objectidps',\n Div(\n Div(\n 'ra__gt',\n 'dec__gt',\n 'l__gt',\n 'b__gt',\n css_class='col',\n ),\n Div(\n 'ra__lt',\n 'dec__lt',\n 'l__lt',\n 'b__lt',\n css_class='col',\n ),\n css_class=\"form-row\",\n )\n ),\n Fieldset(\n 'Other Filters',\n Div(\n Div(\n 'magpsf__gte',\n 'magap__gte',\n 'distnr__gte',\n 'deltamaglatest__gte',\n 'deltamagref__gte',\n 'classtar__gte',\n css_class='col'\n ),\n Div(\n 'magpsf__lte',\n 'magap__lte',\n 'distnr__lte',\n 'deltamaglatest__lte',\n 'deltamagref__lte',\n 'classtar__lte',\n css_class='col'\n ),\n css_class='form-row',\n )\n ),\n 'filter',\n 'sigmapsf__lte',\n 'rb__gte',\n 'fwhm__lte'\n )\n\n\nclass MARSBroker(object):\n name = 'MARS'\n form = MARSQueryForm\n\n def _clean_parameters(self, parameters):\n return {k: v for k, v in parameters.items() if v and k != 'page'}\n\n def fetch_alerts(self, parameters):\n if not parameters.get('page'):\n parameters['page'] = 1\n args = urlencode(self._clean_parameters(parameters))\n url = '{0}/?page={1}&format=json&{2}'.format(\n MARS_URL,\n parameters['page'],\n args\n )\n alerts = []\n response = requests.get(url)\n response.raise_for_status()\n parsed = response.json()\n alerts = parsed['results']\n if parsed['has_next'] and parameters['page'] < 10:\n parameters['page'] += 1\n alerts += self.fetch_alerts(parameters)\n return alerts\n\n def fetch_alert(self, id):\n url = f'{MARS_URL}/{id}/?format=json'\n response = requests.get(url)\n response.raise_for_status()\n parsed = response.json()\n return parsed\n\n def process_reduced_data(self, target, alert=None):\n if not alert:\n try:\n target_datum = ReducedDatum.objects.filter(\n target=target,\n data_type='PHOTOMETRY',\n source_name=self.name).first()\n if not target_datum:\n return\n alert = self.fetch_alert(target_datum.source_location)\n except HTTPError:\n raise Exception('Unable to retrieve alert information from broker')\n for prv_candidate in alert.get('prv_candidate'):\n if all([key in prv_candidate['candidate'] for key in ['jd', 'magpsf', 'fid']]):\n jd = Time(prv_candidate['candidate']['jd'], format='jd', scale='utc')\n jd.to_datetime(timezone=TimezoneInfo())\n value = {\n 'magnitude': prv_candidate['candidate']['magpsf'],\n 'filter': filters[prv_candidate['candidate']['fid']]\n }\n rd, created = ReducedDatum.objects.get_or_create(\n timestamp=jd.to_datetime(timezone=TimezoneInfo()),\n value=json.dumps(value),\n source_name=self.name,\n source_location=alert['lco_id'],\n data_type='photometry',\n target=target)\n rd.save()\n\n def to_target(self, alert):\n alert_copy = alert.copy()\n target = Target.objects.create(\n identifier=alert_copy['objectId'],\n name=alert_copy['objectId'],\n type='SIDEREAL',\n ra=alert_copy['candidate'].pop('ra'),\n dec=alert_copy['candidate'].pop('dec'),\n galactic_lng=alert_copy['candidate'].pop('l'),\n galactic_lat=alert_copy['candidate'].pop('b'),\n )\n for k, v in alert_copy['candidate'].items():\n if v is not None:\n TargetExtra.objects.create(target=target, key=k, value=v)\n\n return target\n\n def to_generic_alert(self, alert):\n timestamp = parse(alert['candidate']['wall_time'])\n url = '{0}/{1}/'.format(MARS_URL, alert['lco_id'])\n\n return GenericAlert(\n timestamp=timestamp,\n url=url,\n id=alert['lco_id'],\n name=alert['objectId'],\n ra=alert['candidate']['ra'],\n dec=alert['candidate']['dec'],\n mag=alert['candidate']['magpsf'],\n score=alert['candidate']['rb']\n )\n", "path": "tom_alerts/brokers/mars.py"}]}
3,217
202
gh_patches_debug_16954
rasdani/github-patches
git_diff
pypa__setuptools-597
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unfriendly error message when unicode is passed to package_dir or packages Originally reported by: **jaraco (Bitbucket: [jaraco](http://bitbucket.org/jaraco), GitHub: [jaraco](http://github.com/jaraco))** --- Adding `__future__.unicode_literals` causes setup.py scripts to fail. In the mailman project, here is an example error when running python2 setup.py build: ``` running build running build_py Traceback (most recent call last): File "setup.py", line 112, in <module> test_suite = 'nose2.collector.collector', File "C:\Program Files\Python27\lib\distutils\core.py", line 152, in setup dist.run_commands() File "C:\Program Files\Python27\lib\distutils\dist.py", line 953, in run_commands self.run_command(cmd) File "C:\Program Files\Python27\lib\distutils\dist.py", line 972, in run_command cmd_obj.run() File "C:\Program Files\Python27\lib\distutils\command\build.py", line 127, in run self.run_command(cmd_name) File "C:\Program Files\Python27\lib\distutils\cmd.py", line 326, in run_command self.distribution.run_command(command) File "C:\Program Files\Python27\lib\distutils\dist.py", line 972, in run_command cmd_obj.run() File "build\bdist.win-amd64\egg\setuptools\command\build_py.py", line 42, in run File "C:\Program Files\Python27\lib\distutils\command\build_py.py", line 372, in build_packages self.build_module(module, module_file, package) File "build\bdist.win-amd64\egg\setuptools\command\build_py.py", line 60, in build_module File "C:\Program Files\Python27\lib\distutils\command\build_py.py", line 333, in build_module "'package' must be a string (dot-separated), list, or tuple") TypeError: 'package' must be a string (dot-separated), list, or tuple ``` A different error occurs when using 'develop': ``` running develop error: 'egg_base' must be a directory name (got `src`) ``` Setuptools could make this error message nicer. --- - Bitbucket: https://bitbucket.org/pypa/setuptools/issue/190 </issue> <code> [start of setuptools/command/build_py.py] 1 from glob import glob 2 from distutils.util import convert_path 3 import distutils.command.build_py as orig 4 import os 5 import fnmatch 6 import textwrap 7 import io 8 import distutils.errors 9 import itertools 10 11 from setuptools.extern.six.moves import map, filter, filterfalse 12 13 try: 14 from setuptools.lib2to3_ex import Mixin2to3 15 except ImportError: 16 class Mixin2to3: 17 def run_2to3(self, files, doctests=True): 18 "do nothing" 19 20 21 class build_py(orig.build_py, Mixin2to3): 22 """Enhanced 'build_py' command that includes data files with packages 23 24 The data files are specified via a 'package_data' argument to 'setup()'. 25 See 'setuptools.dist.Distribution' for more details. 26 27 Also, this version of the 'build_py' command allows you to specify both 28 'py_modules' and 'packages' in the same setup operation. 29 """ 30 31 def finalize_options(self): 32 orig.build_py.finalize_options(self) 33 self.package_data = self.distribution.package_data 34 self.exclude_package_data = (self.distribution.exclude_package_data or 35 {}) 36 if 'data_files' in self.__dict__: 37 del self.__dict__['data_files'] 38 self.__updated_files = [] 39 self.__doctests_2to3 = [] 40 41 def run(self): 42 """Build modules, packages, and copy data files to build directory""" 43 if not self.py_modules and not self.packages: 44 return 45 46 if self.py_modules: 47 self.build_modules() 48 49 if self.packages: 50 self.build_packages() 51 self.build_package_data() 52 53 self.run_2to3(self.__updated_files, False) 54 self.run_2to3(self.__updated_files, True) 55 self.run_2to3(self.__doctests_2to3, True) 56 57 # Only compile actual .py files, using our base class' idea of what our 58 # output files are. 59 self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0)) 60 61 def __getattr__(self, attr): 62 "lazily compute data files" 63 if attr == 'data_files': 64 self.data_files = self._get_data_files() 65 return self.data_files 66 return orig.build_py.__getattr__(self, attr) 67 68 def build_module(self, module, module_file, package): 69 outfile, copied = orig.build_py.build_module(self, module, module_file, 70 package) 71 if copied: 72 self.__updated_files.append(outfile) 73 return outfile, copied 74 75 def _get_data_files(self): 76 """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" 77 self.analyze_manifest() 78 return list(map(self._get_pkg_data_files, self.packages or ())) 79 80 def _get_pkg_data_files(self, package): 81 # Locate package source directory 82 src_dir = self.get_package_dir(package) 83 84 # Compute package build directory 85 build_dir = os.path.join(*([self.build_lib] + package.split('.'))) 86 87 # Strip directory from globbed filenames 88 filenames = [ 89 os.path.relpath(file, src_dir) 90 for file in self.find_data_files(package, src_dir) 91 ] 92 return package, src_dir, build_dir, filenames 93 94 def find_data_files(self, package, src_dir): 95 """Return filenames for package's data files in 'src_dir'""" 96 patterns = self._get_platform_patterns( 97 self.package_data, 98 package, 99 src_dir, 100 ) 101 globs_expanded = map(glob, patterns) 102 # flatten the expanded globs into an iterable of matches 103 globs_matches = itertools.chain.from_iterable(globs_expanded) 104 glob_files = filter(os.path.isfile, globs_matches) 105 files = itertools.chain( 106 self.manifest_files.get(package, []), 107 glob_files, 108 ) 109 return self.exclude_data_files(package, src_dir, files) 110 111 def build_package_data(self): 112 """Copy data files into build directory""" 113 for package, src_dir, build_dir, filenames in self.data_files: 114 for filename in filenames: 115 target = os.path.join(build_dir, filename) 116 self.mkpath(os.path.dirname(target)) 117 srcfile = os.path.join(src_dir, filename) 118 outf, copied = self.copy_file(srcfile, target) 119 srcfile = os.path.abspath(srcfile) 120 if (copied and 121 srcfile in self.distribution.convert_2to3_doctests): 122 self.__doctests_2to3.append(outf) 123 124 def analyze_manifest(self): 125 self.manifest_files = mf = {} 126 if not self.distribution.include_package_data: 127 return 128 src_dirs = {} 129 for package in self.packages or (): 130 # Locate package source directory 131 src_dirs[assert_relative(self.get_package_dir(package))] = package 132 133 self.run_command('egg_info') 134 ei_cmd = self.get_finalized_command('egg_info') 135 for path in ei_cmd.filelist.files: 136 d, f = os.path.split(assert_relative(path)) 137 prev = None 138 oldf = f 139 while d and d != prev and d not in src_dirs: 140 prev = d 141 d, df = os.path.split(d) 142 f = os.path.join(df, f) 143 if d in src_dirs: 144 if path.endswith('.py') and f == oldf: 145 continue # it's a module, not data 146 mf.setdefault(src_dirs[d], []).append(path) 147 148 def get_data_files(self): 149 pass # Lazily compute data files in _get_data_files() function. 150 151 def check_package(self, package, package_dir): 152 """Check namespace packages' __init__ for declare_namespace""" 153 try: 154 return self.packages_checked[package] 155 except KeyError: 156 pass 157 158 init_py = orig.build_py.check_package(self, package, package_dir) 159 self.packages_checked[package] = init_py 160 161 if not init_py or not self.distribution.namespace_packages: 162 return init_py 163 164 for pkg in self.distribution.namespace_packages: 165 if pkg == package or pkg.startswith(package + '.'): 166 break 167 else: 168 return init_py 169 170 with io.open(init_py, 'rb') as f: 171 contents = f.read() 172 if b'declare_namespace' not in contents: 173 raise distutils.errors.DistutilsError( 174 "Namespace package problem: %s is a namespace package, but " 175 "its\n__init__.py does not call declare_namespace()! Please " 176 'fix it.\n(See the setuptools manual under ' 177 '"Namespace Packages" for details.)\n"' % (package,) 178 ) 179 return init_py 180 181 def initialize_options(self): 182 self.packages_checked = {} 183 orig.build_py.initialize_options(self) 184 185 def get_package_dir(self, package): 186 res = orig.build_py.get_package_dir(self, package) 187 if self.distribution.src_root is not None: 188 return os.path.join(self.distribution.src_root, res) 189 return res 190 191 def exclude_data_files(self, package, src_dir, files): 192 """Filter filenames for package's data files in 'src_dir'""" 193 files = list(files) 194 patterns = self._get_platform_patterns( 195 self.exclude_package_data, 196 package, 197 src_dir, 198 ) 199 match_groups = ( 200 fnmatch.filter(files, pattern) 201 for pattern in patterns 202 ) 203 # flatten the groups of matches into an iterable of matches 204 matches = itertools.chain.from_iterable(match_groups) 205 bad = set(matches) 206 keepers = ( 207 fn 208 for fn in files 209 if fn not in bad 210 ) 211 # ditch dupes 212 return list(_unique_everseen(keepers)) 213 214 @staticmethod 215 def _get_platform_patterns(spec, package, src_dir): 216 """ 217 yield platfrom-specific path patterns (suitable for glob 218 or fn_match) from a glob-based spec (such as 219 self.package_data or self.exclude_package_data) 220 matching package in src_dir. 221 """ 222 raw_patterns = itertools.chain( 223 spec.get('', []), 224 spec.get(package, []), 225 ) 226 return ( 227 # Each pattern has to be converted to a platform-specific path 228 os.path.join(src_dir, convert_path(pattern)) 229 for pattern in raw_patterns 230 ) 231 232 233 # from Python docs 234 def _unique_everseen(iterable, key=None): 235 "List unique elements, preserving order. Remember all elements ever seen." 236 # unique_everseen('AAAABBBCCDAABBB') --> A B C D 237 # unique_everseen('ABBCcAD', str.lower) --> A B C D 238 seen = set() 239 seen_add = seen.add 240 if key is None: 241 for element in filterfalse(seen.__contains__, iterable): 242 seen_add(element) 243 yield element 244 else: 245 for element in iterable: 246 k = key(element) 247 if k not in seen: 248 seen_add(k) 249 yield element 250 251 252 def assert_relative(path): 253 if not os.path.isabs(path): 254 return path 255 from distutils.errors import DistutilsSetupError 256 257 msg = textwrap.dedent(""" 258 Error: setup script specifies an absolute path: 259 260 %s 261 262 setup() arguments must *always* be /-separated paths relative to the 263 setup.py directory, *never* absolute paths. 264 """).lstrip() % path 265 raise DistutilsSetupError(msg) 266 [end of setuptools/command/build_py.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setuptools/command/build_py.py b/setuptools/command/build_py.py --- a/setuptools/command/build_py.py +++ b/setuptools/command/build_py.py @@ -8,6 +8,7 @@ import distutils.errors import itertools +from setuptools.extern import six from setuptools.extern.six.moves import map, filter, filterfalse try: @@ -66,6 +67,9 @@ return orig.build_py.__getattr__(self, attr) def build_module(self, module, module_file, package): + if six.PY2 and isinstance(package, six.string_types): + # avoid errors on Python 2 when unicode is passed (#190) + package = package.split('.') outfile, copied = orig.build_py.build_module(self, module, module_file, package) if copied:
{"golden_diff": "diff --git a/setuptools/command/build_py.py b/setuptools/command/build_py.py\n--- a/setuptools/command/build_py.py\n+++ b/setuptools/command/build_py.py\n@@ -8,6 +8,7 @@\n import distutils.errors\n import itertools\n \n+from setuptools.extern import six\n from setuptools.extern.six.moves import map, filter, filterfalse\n \n try:\n@@ -66,6 +67,9 @@\n return orig.build_py.__getattr__(self, attr)\n \n def build_module(self, module, module_file, package):\n+ if six.PY2 and isinstance(package, six.string_types):\n+ # avoid errors on Python 2 when unicode is passed (#190)\n+ package = package.split('.')\n outfile, copied = orig.build_py.build_module(self, module, module_file,\n package)\n if copied:\n", "issue": "Unfriendly error message when unicode is passed to package_dir or packages\nOriginally reported by: **jaraco (Bitbucket: [jaraco](http://bitbucket.org/jaraco), GitHub: [jaraco](http://github.com/jaraco))**\n\n---\n\nAdding `__future__.unicode_literals` causes setup.py scripts to fail. In the mailman project, here is an example error when running python2 setup.py build:\n\n```\nrunning build\nrunning build_py\nTraceback (most recent call last):\n File \"setup.py\", line 112, in <module>\n test_suite = 'nose2.collector.collector',\n File \"C:\\Program Files\\Python27\\lib\\distutils\\core.py\", line 152, in setup\n dist.run_commands()\n File \"C:\\Program Files\\Python27\\lib\\distutils\\dist.py\", line 953, in run_commands\n self.run_command(cmd)\n File \"C:\\Program Files\\Python27\\lib\\distutils\\dist.py\", line 972, in run_command\n cmd_obj.run()\n File \"C:\\Program Files\\Python27\\lib\\distutils\\command\\build.py\", line 127, in run\n self.run_command(cmd_name)\n File \"C:\\Program Files\\Python27\\lib\\distutils\\cmd.py\", line 326, in run_command\n self.distribution.run_command(command)\n File \"C:\\Program Files\\Python27\\lib\\distutils\\dist.py\", line 972, in run_command\n cmd_obj.run()\n File \"build\\bdist.win-amd64\\egg\\setuptools\\command\\build_py.py\", line 42, in run\n File \"C:\\Program Files\\Python27\\lib\\distutils\\command\\build_py.py\", line 372, in build_packages\n self.build_module(module, module_file, package)\n File \"build\\bdist.win-amd64\\egg\\setuptools\\command\\build_py.py\", line 60, in build_module\n File \"C:\\Program Files\\Python27\\lib\\distutils\\command\\build_py.py\", line 333, in build_module\n \"'package' must be a string (dot-separated), list, or tuple\")\nTypeError: 'package' must be a string (dot-separated), list, or tuple\n```\n\nA different error occurs when using 'develop':\n\n```\nrunning develop\nerror: 'egg_base' must be a directory name (got `src`)\n```\n\nSetuptools could make this error message nicer.\n\n---\n- Bitbucket: https://bitbucket.org/pypa/setuptools/issue/190\n\n", "before_files": [{"content": "from glob import glob\nfrom distutils.util import convert_path\nimport distutils.command.build_py as orig\nimport os\nimport fnmatch\nimport textwrap\nimport io\nimport distutils.errors\nimport itertools\n\nfrom setuptools.extern.six.moves import map, filter, filterfalse\n\ntry:\n from setuptools.lib2to3_ex import Mixin2to3\nexcept ImportError:\n class Mixin2to3:\n def run_2to3(self, files, doctests=True):\n \"do nothing\"\n\n\nclass build_py(orig.build_py, Mixin2to3):\n \"\"\"Enhanced 'build_py' command that includes data files with packages\n\n The data files are specified via a 'package_data' argument to 'setup()'.\n See 'setuptools.dist.Distribution' for more details.\n\n Also, this version of the 'build_py' command allows you to specify both\n 'py_modules' and 'packages' in the same setup operation.\n \"\"\"\n\n def finalize_options(self):\n orig.build_py.finalize_options(self)\n self.package_data = self.distribution.package_data\n self.exclude_package_data = (self.distribution.exclude_package_data or\n {})\n if 'data_files' in self.__dict__:\n del self.__dict__['data_files']\n self.__updated_files = []\n self.__doctests_2to3 = []\n\n def run(self):\n \"\"\"Build modules, packages, and copy data files to build directory\"\"\"\n if not self.py_modules and not self.packages:\n return\n\n if self.py_modules:\n self.build_modules()\n\n if self.packages:\n self.build_packages()\n self.build_package_data()\n\n self.run_2to3(self.__updated_files, False)\n self.run_2to3(self.__updated_files, True)\n self.run_2to3(self.__doctests_2to3, True)\n\n # Only compile actual .py files, using our base class' idea of what our\n # output files are.\n self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))\n\n def __getattr__(self, attr):\n \"lazily compute data files\"\n if attr == 'data_files':\n self.data_files = self._get_data_files()\n return self.data_files\n return orig.build_py.__getattr__(self, attr)\n\n def build_module(self, module, module_file, package):\n outfile, copied = orig.build_py.build_module(self, module, module_file,\n package)\n if copied:\n self.__updated_files.append(outfile)\n return outfile, copied\n\n def _get_data_files(self):\n \"\"\"Generate list of '(package,src_dir,build_dir,filenames)' tuples\"\"\"\n self.analyze_manifest()\n return list(map(self._get_pkg_data_files, self.packages or ()))\n\n def _get_pkg_data_files(self, package):\n # Locate package source directory\n src_dir = self.get_package_dir(package)\n\n # Compute package build directory\n build_dir = os.path.join(*([self.build_lib] + package.split('.')))\n\n # Strip directory from globbed filenames\n filenames = [\n os.path.relpath(file, src_dir)\n for file in self.find_data_files(package, src_dir)\n ]\n return package, src_dir, build_dir, filenames\n\n def find_data_files(self, package, src_dir):\n \"\"\"Return filenames for package's data files in 'src_dir'\"\"\"\n patterns = self._get_platform_patterns(\n self.package_data,\n package,\n src_dir,\n )\n globs_expanded = map(glob, patterns)\n # flatten the expanded globs into an iterable of matches\n globs_matches = itertools.chain.from_iterable(globs_expanded)\n glob_files = filter(os.path.isfile, globs_matches)\n files = itertools.chain(\n self.manifest_files.get(package, []),\n glob_files,\n )\n return self.exclude_data_files(package, src_dir, files)\n\n def build_package_data(self):\n \"\"\"Copy data files into build directory\"\"\"\n for package, src_dir, build_dir, filenames in self.data_files:\n for filename in filenames:\n target = os.path.join(build_dir, filename)\n self.mkpath(os.path.dirname(target))\n srcfile = os.path.join(src_dir, filename)\n outf, copied = self.copy_file(srcfile, target)\n srcfile = os.path.abspath(srcfile)\n if (copied and\n srcfile in self.distribution.convert_2to3_doctests):\n self.__doctests_2to3.append(outf)\n\n def analyze_manifest(self):\n self.manifest_files = mf = {}\n if not self.distribution.include_package_data:\n return\n src_dirs = {}\n for package in self.packages or ():\n # Locate package source directory\n src_dirs[assert_relative(self.get_package_dir(package))] = package\n\n self.run_command('egg_info')\n ei_cmd = self.get_finalized_command('egg_info')\n for path in ei_cmd.filelist.files:\n d, f = os.path.split(assert_relative(path))\n prev = None\n oldf = f\n while d and d != prev and d not in src_dirs:\n prev = d\n d, df = os.path.split(d)\n f = os.path.join(df, f)\n if d in src_dirs:\n if path.endswith('.py') and f == oldf:\n continue # it's a module, not data\n mf.setdefault(src_dirs[d], []).append(path)\n\n def get_data_files(self):\n pass # Lazily compute data files in _get_data_files() function.\n\n def check_package(self, package, package_dir):\n \"\"\"Check namespace packages' __init__ for declare_namespace\"\"\"\n try:\n return self.packages_checked[package]\n except KeyError:\n pass\n\n init_py = orig.build_py.check_package(self, package, package_dir)\n self.packages_checked[package] = init_py\n\n if not init_py or not self.distribution.namespace_packages:\n return init_py\n\n for pkg in self.distribution.namespace_packages:\n if pkg == package or pkg.startswith(package + '.'):\n break\n else:\n return init_py\n\n with io.open(init_py, 'rb') as f:\n contents = f.read()\n if b'declare_namespace' not in contents:\n raise distutils.errors.DistutilsError(\n \"Namespace package problem: %s is a namespace package, but \"\n \"its\\n__init__.py does not call declare_namespace()! Please \"\n 'fix it.\\n(See the setuptools manual under '\n '\"Namespace Packages\" for details.)\\n\"' % (package,)\n )\n return init_py\n\n def initialize_options(self):\n self.packages_checked = {}\n orig.build_py.initialize_options(self)\n\n def get_package_dir(self, package):\n res = orig.build_py.get_package_dir(self, package)\n if self.distribution.src_root is not None:\n return os.path.join(self.distribution.src_root, res)\n return res\n\n def exclude_data_files(self, package, src_dir, files):\n \"\"\"Filter filenames for package's data files in 'src_dir'\"\"\"\n files = list(files)\n patterns = self._get_platform_patterns(\n self.exclude_package_data,\n package,\n src_dir,\n )\n match_groups = (\n fnmatch.filter(files, pattern)\n for pattern in patterns\n )\n # flatten the groups of matches into an iterable of matches\n matches = itertools.chain.from_iterable(match_groups)\n bad = set(matches)\n keepers = (\n fn\n for fn in files\n if fn not in bad\n )\n # ditch dupes\n return list(_unique_everseen(keepers))\n\n @staticmethod\n def _get_platform_patterns(spec, package, src_dir):\n \"\"\"\n yield platfrom-specific path patterns (suitable for glob\n or fn_match) from a glob-based spec (such as\n self.package_data or self.exclude_package_data)\n matching package in src_dir.\n \"\"\"\n raw_patterns = itertools.chain(\n spec.get('', []),\n spec.get(package, []),\n )\n return (\n # Each pattern has to be converted to a platform-specific path\n os.path.join(src_dir, convert_path(pattern))\n for pattern in raw_patterns\n )\n\n\n# from Python docs\ndef _unique_everseen(iterable, key=None):\n \"List unique elements, preserving order. Remember all elements ever seen.\"\n # unique_everseen('AAAABBBCCDAABBB') --> A B C D\n # unique_everseen('ABBCcAD', str.lower) --> A B C D\n seen = set()\n seen_add = seen.add\n if key is None:\n for element in filterfalse(seen.__contains__, iterable):\n seen_add(element)\n yield element\n else:\n for element in iterable:\n k = key(element)\n if k not in seen:\n seen_add(k)\n yield element\n\n\ndef assert_relative(path):\n if not os.path.isabs(path):\n return path\n from distutils.errors import DistutilsSetupError\n\n msg = textwrap.dedent(\"\"\"\n Error: setup script specifies an absolute path:\n\n %s\n\n setup() arguments must *always* be /-separated paths relative to the\n setup.py directory, *never* absolute paths.\n \"\"\").lstrip() % path\n raise DistutilsSetupError(msg)\n", "path": "setuptools/command/build_py.py"}]}
3,860
179
gh_patches_debug_36503
rasdani/github-patches
git_diff
bokeh__bokeh-9870
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Minor typos in validation error output for FactorRange and CDSView **Software version info** bokeh 2.0.0 Mozilla Firefox 74.0 macOS Catalina 10.15.3 **Expected behavior:** Validation errors will have an output with correct spelling. Example one: "FactorRange must specify a unique list of categorical factors for an axis" Example two: "CDSView filters are not compatible with glyphs with connected topology such as Line or Patch" **Observed behavior:** Validation errors have an output with typos (see attached screenshot). Example one: "FactorRange must **_specicy_** a unique list of categorical factors for an axis" Example two: "CDSView filters are not compatible with glyphs with connected topology **_suchs_** as Line **_and_** Patch" **Complete, minimal, self-contained example code that reproduces the issue** **Example one:** ```py from bokeh.io import output_notebook, show from bokeh.models import ColumnDataSource, FactorRange from bokeh.plotting import figure output_notebook() fruits = ['Apples', 'Apples'] years = ['2015', '2016'] data = {'fruits' : fruits, '2015' : [2, 1], '2016' : [5, 3]} x = [ (fruit, year) for fruit in fruits for year in years ] counts = sum(zip(data['2015'], data['2016']), ()) # like an hstack source = ColumnDataSource(data=dict(x=x, counts=counts)) p = figure(x_range=FactorRange(*x), plot_height=250, title="Fruit Counts by Year", toolbar_location=None, tools="") p.vbar(x='x', top='counts', width=0.9, source=source) show(p) ``` **Example two** ```py from bokeh.layouts import row from bokeh.plotting import output_notebook, figure, show from bokeh.sampledata.autompg import autompg from bokeh.models import CDSView, GroupFilter from bokeh.models.sources import ColumnDataSource output_notebook() autompg = autompg.assign(efficient=(autompg.mpg > 20).astype(str)) autompg = autompg.groupby(['yr', 'efficient']).mpg.mean().reset_index() autompg = autompg.sort_values(['efficient', 'yr']) list_p = [] source=ColumnDataSource(autompg) for eff in ['True', 'False']: filter_ = GroupFilter(column_name='efficient', group=eff) view = CDSView(source=source, filters=[filter_]) list_p.append(figure(title=eff)) list_p[-1].line(x='yr', y='mpg', source=source, view=view) show(row(list_p)) ``` **Screenshots or screencasts of the bug in action** Example one: <img width="1052" alt="Screen Shot 2020-04-01 at 11 12 20 PM" src="https://user-images.githubusercontent.com/18173173/78207887-f42d7380-7470-11ea-853a-f7aa905f91d2.png"> Example two: <img width="1036" alt="Screen Shot 2020-04-01 at 11 24 33 PM" src="https://user-images.githubusercontent.com/18173173/78207888-f68fcd80-7470-11ea-9370-c048914aa6d8.png"> </issue> <code> [start of bokeh/core/validation/errors.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors. 3 # All rights reserved. 4 # 5 # The full license is in the file LICENSE.txt, distributed with this software. 6 #----------------------------------------------------------------------------- 7 ''' These define the standard error codes and messages for Bokeh 8 validation checks. 9 10 1001 *(BAD_COLUMN_NAME)* 11 A glyph has a property set to a field name that does not correspond to any 12 column in the |GlyphRenderer|'s data source. 13 14 1002 *(MISSING_GLYPH)* 15 A |GlyphRenderer| has no glyph configured. 16 17 1003 *(NO_SOURCE_FOR_GLYPH)* 18 A |GlyphRenderer| has no data source configured. 19 20 1004 *(REQUIRED_RANGE)* 21 A |Plot| is missing one or more required default ranges (will result in 22 blank plot). 23 24 1005 *(MISSING_GOOGLE_API_KEY)* 25 Google Maps API now requires an API key for all use. See 26 https://developers.google.com/maps/documentation/javascript/get-api-key 27 for more information on how to obtain your own, to use for the 28 ``api_key`` property of your Google Map plot . 29 30 1006 *(NON_MATCHING_DATA_SOURCES_ON_LEGEND_ITEM_RENDERERS)* 31 All data_sources on ``LegendItem.renderers`` must match when LegendItem.label 32 is type field. 33 34 1007 *(MISSING_MERCATOR_DIMENSION)* 35 ``MercatorTicker`` and ``MercatorTickFormatter``models must have their 36 ``dimension`` property set to ``'lat'`` or ``'lon'``. 37 38 1008 *(REQUIRED_SCALE)* 39 A |Scale| on is missing one or more required default scales (will result in 40 blank plot). 41 42 1009 *(INCOMPATIBLE_SCALE_AND_RANGE)* 43 A |Scale| type is incompatible with one or more ranges on the same plot 44 dimension (will result in blank plot). 45 46 1010 *(CDSVIEW_SOURCE_DOESNT_MATCH)* 47 A |GlyphRenderer| has a ``CDSView`` whose source doesn't match the ``GlyphRenderer``'s 48 data source. 49 50 1011 *(MALFORMED_GRAPH_SOURCE)* 51 The ``GraphSource`` is incorrectly configured. 52 53 1012 *(INCOMPATIBLE_MAP_RANGE_TYPE)* 54 Map plots can only support ``Range1d`` types, not data ranges. 55 56 1013 *(INCOMPATIBLE_POINT_DRAW_RENDERER)* 57 The ``PointDrawTool`` renderers may only reference ``XYGlyph`` models. 58 59 1014 *(INCOMPATIBLE_BOX_EDIT_RENDERER)* 60 The ``BoxEditTool`` renderers may only reference ``Rect`` glyph models. 61 62 1015 *(INCOMPATIBLE_POLY_DRAW_RENDERER)* 63 The ``PolyDrawTool`` renderers may only reference ``MultiLine`` and ``Patches`` glyph models. 64 65 1016 *(INCOMPATIBLE_POLY_EDIT_RENDERER)* 66 The ``PolyEditTool`` renderers may only reference ``MultiLine`` and ``Patches`` glyph models. 67 68 1017 *(INCOMPATIBLE_POLY_EDIT_VERTEX_RENDERER)* 69 The ``PolyEditTool`` vertex_renderer may only reference ``XYGlyph`` models. 70 71 1018 *(NO_RANGE_TOOL_RANGES)* 72 The ``RangeTool`` must have at least one of ``x_range`` or ``y_range`` configured 73 74 1019 *(DUPLICATE_FACTORS)* 75 ``FactorRange`` must specify a unique list of categorical factors for an axis. 76 77 1020 *(BAD_EXTRA_RANGE_NAME)* 78 An extra range name is configured with a name that does not correspond to any range. 79 80 1021 *(EQUAL_SLIDER_START_END)* 81 ``noUiSlider`` most have a nonequal start and end. 82 83 1022 *(MIN_PREFERRED_MAX_WIDTH)* 84 Expected min_width <= width <= max_width 85 86 1023 *(MIN_PREFERRED_MAX_HEIGHT)* 87 Expected min_height <= height <= max_height 88 89 1024 *(CDSVIEW_FILTERS_WITH_CONNECTED)* 90 ``CDSView`` filters are not compatible with glyphs with connected topology suchs as Line and Patch. 91 92 9999 *(EXT)* 93 Indicates that a custom error check has failed. 94 95 ''' 96 97 #----------------------------------------------------------------------------- 98 # Boilerplate 99 #----------------------------------------------------------------------------- 100 import logging # isort:skip 101 log = logging.getLogger(__name__) 102 103 #----------------------------------------------------------------------------- 104 # Imports 105 #----------------------------------------------------------------------------- 106 107 #----------------------------------------------------------------------------- 108 # Globals and constants 109 #----------------------------------------------------------------------------- 110 111 codes = { 112 1001: ("BAD_COLUMN_NAME", "Glyph refers to nonexistent column name. This could either be due to a misspelling or typo, or due to an expected column being missing. "), # NOQA 113 1002: ("MISSING_GLYPH", "Glyph renderer has no glyph set"), 114 1003: ("NO_SOURCE_FOR_GLYPH", "Glyph renderer has no data source"), 115 1004: ("REQUIRED_RANGE", "A required Range object is missing"), 116 1005: ("MISSING_GOOGLE_API_KEY", "Google now requires API keys for all Google Maps usage"), 117 1006: ("NON_MATCHING_DATA_SOURCES_ON_LEGEND_ITEM_RENDERERS", "LegendItem.label is a field, but renderer data sources don't match"), 118 1007: ("MISSING_MERCATOR_DIMENSION", "Mercator Tickers and Formatters must have their dimension property set to 'lat' or 'lon'"), 119 1008: ("REQUIRED_SCALE", "A required Scale object is missing"), 120 1009: ("INCOMPATIBLE_SCALE_AND_RANGE", "A Scale is incompatible with one or more ranges on the same plot dimension"), 121 1010: ("CDSVIEW_SOURCE_DOESNT_MATCH", "CDSView used by Glyph renderer must have a source that matches the Glyph renderer's data source"), 122 1011: ("MALFORMED_GRAPH_SOURCE", "The GraphSource is incorrectly configured"), 123 1012: ("INCOMPATIBLE_MAP_RANGE_TYPE", "Map plots can only support Range1d types, not data ranges"), 124 1013: ("INCOMPATIBLE_POINT_DRAW_RENDERER", "PointDrawTool renderers may only reference XYGlyph models"), 125 1014: ("INCOMPATIBLE_BOX_EDIT_RENDERER", "BoxEditTool renderers may only reference Rect glyph models"), 126 1015: ("INCOMPATIBLE_POLY_DRAW_RENDERER", "PolyDrawTool renderers may only reference MultiLine and Patches glyph models"), 127 1016: ("INCOMPATIBLE_POLY_EDIT_RENDERER", "PolyEditTool renderers may only reference MultiLine and Patches glyph models"), 128 1017: ("INCOMPATIBLE_POLY_EDIT_VERTEX_RENDERER", "PolyEditTool vertex_renderer may only reference XYGlyph models"), 129 1018: ("NO_RANGE_TOOL_RANGES", "RangeTool must have at least one of x_range or y_range configured"), 130 1019: ("DUPLICATE_FACTORS", "FactorRange must specicy a unique list of categorical factors for an axis"), 131 1020: ("BAD_EXTRA_RANGE_NAME", "An extra range name is configued with a name that does not correspond to any range"), 132 1021: ("EQUAL_SLIDER_START_END", "Slider 'start' and 'end' cannot be equal"), 133 1022: ("MIN_PREFERRED_MAX_WIDTH", "Expected min_width <= width <= max_width"), 134 1023: ("MIN_PREFERRED_MAX_HEIGHT", "Expected min_height <= height <= max_height"), 135 1024: ("CDSVIEW_FILTERS_WITH_CONNECTED", "CDSView filters are not compatible with glyphs with connected topology suchs as Line and Patch"), 136 9999: ("EXT", "Custom extension reports error"), 137 } 138 139 __all__ = () 140 141 #----------------------------------------------------------------------------- 142 # General API 143 #----------------------------------------------------------------------------- 144 145 #----------------------------------------------------------------------------- 146 # Dev API 147 #----------------------------------------------------------------------------- 148 149 #----------------------------------------------------------------------------- 150 # Private API 151 #----------------------------------------------------------------------------- 152 153 #----------------------------------------------------------------------------- 154 # Code 155 #----------------------------------------------------------------------------- 156 157 for code in codes: 158 exec("%s = %d" % (codes[code][0], code)) 159 [end of bokeh/core/validation/errors.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bokeh/core/validation/errors.py b/bokeh/core/validation/errors.py --- a/bokeh/core/validation/errors.py +++ b/bokeh/core/validation/errors.py @@ -87,7 +87,7 @@ Expected min_height <= height <= max_height 1024 *(CDSVIEW_FILTERS_WITH_CONNECTED)* - ``CDSView`` filters are not compatible with glyphs with connected topology suchs as Line and Patch. + ``CDSView`` filters are not compatible with glyphs with connected topology such as Line or Patch. 9999 *(EXT)* Indicates that a custom error check has failed. @@ -127,12 +127,12 @@ 1016: ("INCOMPATIBLE_POLY_EDIT_RENDERER", "PolyEditTool renderers may only reference MultiLine and Patches glyph models"), 1017: ("INCOMPATIBLE_POLY_EDIT_VERTEX_RENDERER", "PolyEditTool vertex_renderer may only reference XYGlyph models"), 1018: ("NO_RANGE_TOOL_RANGES", "RangeTool must have at least one of x_range or y_range configured"), - 1019: ("DUPLICATE_FACTORS", "FactorRange must specicy a unique list of categorical factors for an axis"), + 1019: ("DUPLICATE_FACTORS", "FactorRange must specify a unique list of categorical factors for an axis"), 1020: ("BAD_EXTRA_RANGE_NAME", "An extra range name is configued with a name that does not correspond to any range"), 1021: ("EQUAL_SLIDER_START_END", "Slider 'start' and 'end' cannot be equal"), 1022: ("MIN_PREFERRED_MAX_WIDTH", "Expected min_width <= width <= max_width"), 1023: ("MIN_PREFERRED_MAX_HEIGHT", "Expected min_height <= height <= max_height"), - 1024: ("CDSVIEW_FILTERS_WITH_CONNECTED", "CDSView filters are not compatible with glyphs with connected topology suchs as Line and Patch"), + 1024: ("CDSVIEW_FILTERS_WITH_CONNECTED", "CDSView filters are not compatible with glyphs with connected topology such as Line or Patch"), 9999: ("EXT", "Custom extension reports error"), }
{"golden_diff": "diff --git a/bokeh/core/validation/errors.py b/bokeh/core/validation/errors.py\n--- a/bokeh/core/validation/errors.py\n+++ b/bokeh/core/validation/errors.py\n@@ -87,7 +87,7 @@\n Expected min_height <= height <= max_height\n \n 1024 *(CDSVIEW_FILTERS_WITH_CONNECTED)*\n- ``CDSView`` filters are not compatible with glyphs with connected topology suchs as Line and Patch.\n+ ``CDSView`` filters are not compatible with glyphs with connected topology such as Line or Patch.\n \n 9999 *(EXT)*\n Indicates that a custom error check has failed.\n@@ -127,12 +127,12 @@\n 1016: (\"INCOMPATIBLE_POLY_EDIT_RENDERER\", \"PolyEditTool renderers may only reference MultiLine and Patches glyph models\"),\n 1017: (\"INCOMPATIBLE_POLY_EDIT_VERTEX_RENDERER\", \"PolyEditTool vertex_renderer may only reference XYGlyph models\"),\n 1018: (\"NO_RANGE_TOOL_RANGES\", \"RangeTool must have at least one of x_range or y_range configured\"),\n- 1019: (\"DUPLICATE_FACTORS\", \"FactorRange must specicy a unique list of categorical factors for an axis\"),\n+ 1019: (\"DUPLICATE_FACTORS\", \"FactorRange must specify a unique list of categorical factors for an axis\"),\n 1020: (\"BAD_EXTRA_RANGE_NAME\", \"An extra range name is configued with a name that does not correspond to any range\"),\n 1021: (\"EQUAL_SLIDER_START_END\", \"Slider 'start' and 'end' cannot be equal\"),\n 1022: (\"MIN_PREFERRED_MAX_WIDTH\", \"Expected min_width <= width <= max_width\"),\n 1023: (\"MIN_PREFERRED_MAX_HEIGHT\", \"Expected min_height <= height <= max_height\"),\n- 1024: (\"CDSVIEW_FILTERS_WITH_CONNECTED\", \"CDSView filters are not compatible with glyphs with connected topology suchs as Line and Patch\"),\n+ 1024: (\"CDSVIEW_FILTERS_WITH_CONNECTED\", \"CDSView filters are not compatible with glyphs with connected topology such as Line or Patch\"),\n 9999: (\"EXT\", \"Custom extension reports error\"),\n }\n", "issue": "[BUG] Minor typos in validation error output for FactorRange and CDSView\n**Software version info**\r\nbokeh 2.0.0\r\nMozilla Firefox 74.0\r\nmacOS Catalina 10.15.3\r\n\r\n**Expected behavior:**\r\nValidation errors will have an output with correct spelling.\r\n\r\nExample one:\r\n\"FactorRange must specify a unique list of categorical factors for an axis\"\r\nExample two:\r\n\"CDSView filters are not compatible with glyphs with connected topology such as Line or Patch\"\r\n\r\n**Observed behavior:**\r\nValidation errors have an output with typos (see attached screenshot).\r\n\r\nExample one:\r\n\"FactorRange must **_specicy_** a unique list of categorical factors for an axis\"\r\nExample two:\r\n\"CDSView filters are not compatible with glyphs with connected topology **_suchs_** as Line **_and_** Patch\"\r\n\r\n**Complete, minimal, self-contained example code that reproduces the issue**\r\n\r\n**Example one:**\r\n```py\r\nfrom bokeh.io import output_notebook, show\r\nfrom bokeh.models import ColumnDataSource, FactorRange\r\nfrom bokeh.plotting import figure\r\n\r\noutput_notebook()\r\n\r\nfruits = ['Apples', 'Apples']\r\nyears = ['2015', '2016']\r\n\r\ndata = {'fruits' : fruits,\r\n '2015' : [2, 1],\r\n '2016' : [5, 3]}\r\n\r\nx = [ (fruit, year) for fruit in fruits for year in years ]\r\ncounts = sum(zip(data['2015'], data['2016']), ()) # like an hstack\r\n\r\nsource = ColumnDataSource(data=dict(x=x, counts=counts))\r\n\r\np = figure(x_range=FactorRange(*x), plot_height=250, title=\"Fruit Counts by Year\",\r\n toolbar_location=None, tools=\"\")\r\n\r\np.vbar(x='x', top='counts', width=0.9, source=source)\r\n\r\nshow(p)\r\n\r\n```\r\n\r\n**Example two**\r\n```py\r\nfrom bokeh.layouts import row\r\nfrom bokeh.plotting import output_notebook, figure, show\r\nfrom bokeh.sampledata.autompg import autompg\r\nfrom bokeh.models import CDSView, GroupFilter\r\nfrom bokeh.models.sources import ColumnDataSource\r\noutput_notebook()\r\n\r\nautompg = autompg.assign(efficient=(autompg.mpg > 20).astype(str))\r\nautompg = autompg.groupby(['yr', 'efficient']).mpg.mean().reset_index()\r\nautompg = autompg.sort_values(['efficient', 'yr'])\r\nlist_p = []\r\nsource=ColumnDataSource(autompg)\r\nfor eff in ['True', 'False']:\r\n filter_ = GroupFilter(column_name='efficient', group=eff)\r\n view = CDSView(source=source, filters=[filter_])\r\n\r\n list_p.append(figure(title=eff))\r\n list_p[-1].line(x='yr', y='mpg', source=source, view=view)\r\n\r\nshow(row(list_p))\r\n```\r\n\r\n**Screenshots or screencasts of the bug in action**\r\nExample one:\r\n<img width=\"1052\" alt=\"Screen Shot 2020-04-01 at 11 12 20 PM\" src=\"https://user-images.githubusercontent.com/18173173/78207887-f42d7380-7470-11ea-853a-f7aa905f91d2.png\">\r\n\r\nExample two:\r\n<img width=\"1036\" alt=\"Screen Shot 2020-04-01 at 11 24 33 PM\" src=\"https://user-images.githubusercontent.com/18173173/78207888-f68fcd80-7470-11ea-9370-c048914aa6d8.png\">\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' These define the standard error codes and messages for Bokeh\nvalidation checks.\n\n1001 *(BAD_COLUMN_NAME)*\n A glyph has a property set to a field name that does not correspond to any\n column in the |GlyphRenderer|'s data source.\n\n1002 *(MISSING_GLYPH)*\n A |GlyphRenderer| has no glyph configured.\n\n1003 *(NO_SOURCE_FOR_GLYPH)*\n A |GlyphRenderer| has no data source configured.\n\n1004 *(REQUIRED_RANGE)*\n A |Plot| is missing one or more required default ranges (will result in\n blank plot).\n\n1005 *(MISSING_GOOGLE_API_KEY)*\n Google Maps API now requires an API key for all use. See\n https://developers.google.com/maps/documentation/javascript/get-api-key\n for more information on how to obtain your own, to use for the\n ``api_key`` property of your Google Map plot .\n\n1006 *(NON_MATCHING_DATA_SOURCES_ON_LEGEND_ITEM_RENDERERS)*\n All data_sources on ``LegendItem.renderers`` must match when LegendItem.label\n is type field.\n\n1007 *(MISSING_MERCATOR_DIMENSION)*\n ``MercatorTicker`` and ``MercatorTickFormatter``models must have their\n ``dimension`` property set to ``'lat'`` or ``'lon'``.\n\n1008 *(REQUIRED_SCALE)*\n A |Scale| on is missing one or more required default scales (will result in\n blank plot).\n\n1009 *(INCOMPATIBLE_SCALE_AND_RANGE)*\n A |Scale| type is incompatible with one or more ranges on the same plot\n dimension (will result in blank plot).\n\n1010 *(CDSVIEW_SOURCE_DOESNT_MATCH)*\n A |GlyphRenderer| has a ``CDSView`` whose source doesn't match the ``GlyphRenderer``'s\n data source.\n\n1011 *(MALFORMED_GRAPH_SOURCE)*\n The ``GraphSource`` is incorrectly configured.\n\n1012 *(INCOMPATIBLE_MAP_RANGE_TYPE)*\n Map plots can only support ``Range1d`` types, not data ranges.\n\n1013 *(INCOMPATIBLE_POINT_DRAW_RENDERER)*\n The ``PointDrawTool`` renderers may only reference ``XYGlyph`` models.\n\n1014 *(INCOMPATIBLE_BOX_EDIT_RENDERER)*\n The ``BoxEditTool`` renderers may only reference ``Rect`` glyph models.\n\n1015 *(INCOMPATIBLE_POLY_DRAW_RENDERER)*\n The ``PolyDrawTool`` renderers may only reference ``MultiLine`` and ``Patches`` glyph models.\n\n1016 *(INCOMPATIBLE_POLY_EDIT_RENDERER)*\n The ``PolyEditTool`` renderers may only reference ``MultiLine`` and ``Patches`` glyph models.\n\n1017 *(INCOMPATIBLE_POLY_EDIT_VERTEX_RENDERER)*\n The ``PolyEditTool`` vertex_renderer may only reference ``XYGlyph`` models.\n\n1018 *(NO_RANGE_TOOL_RANGES)*\n The ``RangeTool`` must have at least one of ``x_range`` or ``y_range`` configured\n\n1019 *(DUPLICATE_FACTORS)*\n ``FactorRange`` must specify a unique list of categorical factors for an axis.\n\n1020 *(BAD_EXTRA_RANGE_NAME)*\n An extra range name is configured with a name that does not correspond to any range.\n\n1021 *(EQUAL_SLIDER_START_END)*\n ``noUiSlider`` most have a nonequal start and end.\n\n1022 *(MIN_PREFERRED_MAX_WIDTH)*\n Expected min_width <= width <= max_width\n\n1023 *(MIN_PREFERRED_MAX_HEIGHT)*\n Expected min_height <= height <= max_height\n\n1024 *(CDSVIEW_FILTERS_WITH_CONNECTED)*\n ``CDSView`` filters are not compatible with glyphs with connected topology suchs as Line and Patch.\n\n9999 *(EXT)*\n Indicates that a custom error check has failed.\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\ncodes = {\n 1001: (\"BAD_COLUMN_NAME\", \"Glyph refers to nonexistent column name. This could either be due to a misspelling or typo, or due to an expected column being missing. \"), # NOQA\n 1002: (\"MISSING_GLYPH\", \"Glyph renderer has no glyph set\"),\n 1003: (\"NO_SOURCE_FOR_GLYPH\", \"Glyph renderer has no data source\"),\n 1004: (\"REQUIRED_RANGE\", \"A required Range object is missing\"),\n 1005: (\"MISSING_GOOGLE_API_KEY\", \"Google now requires API keys for all Google Maps usage\"),\n 1006: (\"NON_MATCHING_DATA_SOURCES_ON_LEGEND_ITEM_RENDERERS\", \"LegendItem.label is a field, but renderer data sources don't match\"),\n 1007: (\"MISSING_MERCATOR_DIMENSION\", \"Mercator Tickers and Formatters must have their dimension property set to 'lat' or 'lon'\"),\n 1008: (\"REQUIRED_SCALE\", \"A required Scale object is missing\"),\n 1009: (\"INCOMPATIBLE_SCALE_AND_RANGE\", \"A Scale is incompatible with one or more ranges on the same plot dimension\"),\n 1010: (\"CDSVIEW_SOURCE_DOESNT_MATCH\", \"CDSView used by Glyph renderer must have a source that matches the Glyph renderer's data source\"),\n 1011: (\"MALFORMED_GRAPH_SOURCE\", \"The GraphSource is incorrectly configured\"),\n 1012: (\"INCOMPATIBLE_MAP_RANGE_TYPE\", \"Map plots can only support Range1d types, not data ranges\"),\n 1013: (\"INCOMPATIBLE_POINT_DRAW_RENDERER\", \"PointDrawTool renderers may only reference XYGlyph models\"),\n 1014: (\"INCOMPATIBLE_BOX_EDIT_RENDERER\", \"BoxEditTool renderers may only reference Rect glyph models\"),\n 1015: (\"INCOMPATIBLE_POLY_DRAW_RENDERER\", \"PolyDrawTool renderers may only reference MultiLine and Patches glyph models\"),\n 1016: (\"INCOMPATIBLE_POLY_EDIT_RENDERER\", \"PolyEditTool renderers may only reference MultiLine and Patches glyph models\"),\n 1017: (\"INCOMPATIBLE_POLY_EDIT_VERTEX_RENDERER\", \"PolyEditTool vertex_renderer may only reference XYGlyph models\"),\n 1018: (\"NO_RANGE_TOOL_RANGES\", \"RangeTool must have at least one of x_range or y_range configured\"),\n 1019: (\"DUPLICATE_FACTORS\", \"FactorRange must specicy a unique list of categorical factors for an axis\"),\n 1020: (\"BAD_EXTRA_RANGE_NAME\", \"An extra range name is configued with a name that does not correspond to any range\"),\n 1021: (\"EQUAL_SLIDER_START_END\", \"Slider 'start' and 'end' cannot be equal\"),\n 1022: (\"MIN_PREFERRED_MAX_WIDTH\", \"Expected min_width <= width <= max_width\"),\n 1023: (\"MIN_PREFERRED_MAX_HEIGHT\", \"Expected min_height <= height <= max_height\"),\n 1024: (\"CDSVIEW_FILTERS_WITH_CONNECTED\", \"CDSView filters are not compatible with glyphs with connected topology suchs as Line and Patch\"),\n 9999: (\"EXT\", \"Custom extension reports error\"),\n}\n\n__all__ = ()\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\nfor code in codes:\n exec(\"%s = %d\" % (codes[code][0], code))\n", "path": "bokeh/core/validation/errors.py"}]}
3,596
522
gh_patches_debug_25356
rasdani/github-patches
git_diff
python-discord__bot-448
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The off-topic channel name updating task fails on non-success API response The background task we create to update off-topic names at midnight UTC fails if it receives a non-success API response. The reason is that our `bot.api_client` will raise the `bot.api.ResponseCodeError` exception on non-success response status codes. This means that the off-topic channel names won't be updated again until either the bot is restarted or the task is started manually again by an admin. The relevant lines of code: https://github.com/python-discord/bot/blob/e70c96248bd7b548412811a4f1ffe88bed41f815/bot/cogs/off_topic_names.py#L59-L61 To handle it, we could simply include a `try-except` block and log the exception in the `except` block. I'm not sure if we want to log the entire exception, since the exception text could be a [massive HTML-response generated by cloudflare](https://paste.pythondiscord.com/ohibicedif). Logging the failure with the response code should generally give us enough to determine the cause of the failure. </issue> <code> [start of bot/cogs/off_topic_names.py] 1 import asyncio 2 import difflib 3 import logging 4 from datetime import datetime, timedelta 5 6 from discord import Colour, Embed 7 from discord.ext.commands import BadArgument, Bot, Cog, Context, Converter, group 8 9 from bot.constants import Channels, MODERATION_ROLES 10 from bot.decorators import with_role 11 from bot.pagination import LinePaginator 12 13 14 CHANNELS = (Channels.off_topic_0, Channels.off_topic_1, Channels.off_topic_2) 15 log = logging.getLogger(__name__) 16 17 18 class OffTopicName(Converter): 19 """A converter that ensures an added off-topic name is valid.""" 20 21 @staticmethod 22 async def convert(ctx: Context, argument: str) -> str: 23 """Attempt to replace any invalid characters with their approximate Unicode equivalent.""" 24 allowed_characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ!?'`-" 25 26 if not (2 <= len(argument) <= 96): 27 raise BadArgument("Channel name must be between 2 and 96 chars long") 28 29 elif not all(c.isalnum() or c in allowed_characters for c in argument): 30 raise BadArgument( 31 "Channel name must only consist of " 32 "alphanumeric characters, minus signs or apostrophes." 33 ) 34 35 # Replace invalid characters with unicode alternatives. 36 table = str.maketrans( 37 allowed_characters, '𝖠𝖡𝖢𝖣𝖤𝖥𝖦𝖧𝖨𝖩𝖪𝖫𝖬𝖭𝖮𝖯𝖰𝖱𝖲𝖳𝖴𝖵𝖶𝖷𝖸𝖹ǃ?’’-' 38 ) 39 return argument.translate(table) 40 41 42 async def update_names(bot: Bot) -> None: 43 """Background updater task that performs the daily channel name update.""" 44 while True: 45 # Since we truncate the compute timedelta to seconds, we add one second to ensure 46 # we go past midnight in the `seconds_to_sleep` set below. 47 today_at_midnight = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=0) 48 next_midnight = today_at_midnight + timedelta(days=1) 49 seconds_to_sleep = (next_midnight - datetime.utcnow()).seconds + 1 50 await asyncio.sleep(seconds_to_sleep) 51 52 channel_0_name, channel_1_name, channel_2_name = await bot.api_client.get( 53 'bot/off-topic-channel-names', params={'random_items': 3} 54 ) 55 channel_0, channel_1, channel_2 = (bot.get_channel(channel_id) for channel_id in CHANNELS) 56 57 await channel_0.edit(name=f'ot0-{channel_0_name}') 58 await channel_1.edit(name=f'ot1-{channel_1_name}') 59 await channel_2.edit(name=f'ot2-{channel_2_name}') 60 log.debug( 61 "Updated off-topic channel names to" 62 f" {channel_0_name}, {channel_1_name} and {channel_2_name}" 63 ) 64 65 66 class OffTopicNames(Cog): 67 """Commands related to managing the off-topic category channel names.""" 68 69 def __init__(self, bot: Bot): 70 self.bot = bot 71 self.updater_task = None 72 73 def cog_unload(self) -> None: 74 """Cancel any running updater tasks on cog unload.""" 75 if self.updater_task is not None: 76 self.updater_task.cancel() 77 78 @Cog.listener() 79 async def on_ready(self) -> None: 80 """Start off-topic channel updating event loop if it hasn't already started.""" 81 if self.updater_task is None: 82 coro = update_names(self.bot) 83 self.updater_task = self.bot.loop.create_task(coro) 84 85 @group(name='otname', aliases=('otnames', 'otn'), invoke_without_command=True) 86 @with_role(*MODERATION_ROLES) 87 async def otname_group(self, ctx: Context) -> None: 88 """Add or list items from the off-topic channel name rotation.""" 89 await ctx.invoke(self.bot.get_command("help"), "otname") 90 91 @otname_group.command(name='add', aliases=('a',)) 92 @with_role(*MODERATION_ROLES) 93 async def add_command(self, ctx: Context, *names: OffTopicName) -> None: 94 """Adds a new off-topic name to the rotation.""" 95 # Chain multiple words to a single one 96 name = "-".join(names) 97 98 await self.bot.api_client.post(f'bot/off-topic-channel-names', params={'name': name}) 99 log.info( 100 f"{ctx.author.name}#{ctx.author.discriminator}" 101 f" added the off-topic channel name '{name}" 102 ) 103 await ctx.send(f":ok_hand: Added `{name}` to the names list.") 104 105 @otname_group.command(name='delete', aliases=('remove', 'rm', 'del', 'd')) 106 @with_role(*MODERATION_ROLES) 107 async def delete_command(self, ctx: Context, *names: OffTopicName) -> None: 108 """Removes a off-topic name from the rotation.""" 109 # Chain multiple words to a single one 110 name = "-".join(names) 111 112 await self.bot.api_client.delete(f'bot/off-topic-channel-names/{name}') 113 log.info( 114 f"{ctx.author.name}#{ctx.author.discriminator}" 115 f" deleted the off-topic channel name '{name}" 116 ) 117 await ctx.send(f":ok_hand: Removed `{name}` from the names list.") 118 119 @otname_group.command(name='list', aliases=('l',)) 120 @with_role(*MODERATION_ROLES) 121 async def list_command(self, ctx: Context) -> None: 122 """ 123 Lists all currently known off-topic channel names in a paginator. 124 125 Restricted to Moderator and above to not spoil the surprise. 126 """ 127 result = await self.bot.api_client.get('bot/off-topic-channel-names') 128 lines = sorted(f"• {name}" for name in result) 129 embed = Embed( 130 title=f"Known off-topic names (`{len(result)}` total)", 131 colour=Colour.blue() 132 ) 133 if result: 134 await LinePaginator.paginate(lines, ctx, embed, max_size=400, empty=False) 135 else: 136 embed.description = "Hmmm, seems like there's nothing here yet." 137 await ctx.send(embed=embed) 138 139 @otname_group.command(name='search', aliases=('s',)) 140 @with_role(*MODERATION_ROLES) 141 async def search_command(self, ctx: Context, *, query: OffTopicName) -> None: 142 """Search for an off-topic name.""" 143 result = await self.bot.api_client.get('bot/off-topic-channel-names') 144 in_matches = {name for name in result if query in name} 145 close_matches = difflib.get_close_matches(query, result, n=10, cutoff=0.70) 146 lines = sorted(f"• {name}" for name in in_matches.union(close_matches)) 147 embed = Embed( 148 title=f"Query results", 149 colour=Colour.blue() 150 ) 151 152 if lines: 153 await LinePaginator.paginate(lines, ctx, embed, max_size=400, empty=False) 154 else: 155 embed.description = "Nothing found." 156 await ctx.send(embed=embed) 157 158 159 def setup(bot: Bot) -> None: 160 """Off topic names cog load.""" 161 bot.add_cog(OffTopicNames(bot)) 162 log.info("Cog loaded: OffTopicNames") 163 [end of bot/cogs/off_topic_names.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bot/cogs/off_topic_names.py b/bot/cogs/off_topic_names.py --- a/bot/cogs/off_topic_names.py +++ b/bot/cogs/off_topic_names.py @@ -6,6 +6,7 @@ from discord import Colour, Embed from discord.ext.commands import BadArgument, Bot, Cog, Context, Converter, group +from bot.api import ResponseCodeError from bot.constants import Channels, MODERATION_ROLES from bot.decorators import with_role from bot.pagination import LinePaginator @@ -49,9 +50,13 @@ seconds_to_sleep = (next_midnight - datetime.utcnow()).seconds + 1 await asyncio.sleep(seconds_to_sleep) - channel_0_name, channel_1_name, channel_2_name = await bot.api_client.get( - 'bot/off-topic-channel-names', params={'random_items': 3} - ) + try: + channel_0_name, channel_1_name, channel_2_name = await bot.api_client.get( + 'bot/off-topic-channel-names', params={'random_items': 3} + ) + except ResponseCodeError as e: + log.error(f"Failed to get new off topic channel names: code {e.response.status}") + continue channel_0, channel_1, channel_2 = (bot.get_channel(channel_id) for channel_id in CHANNELS) await channel_0.edit(name=f'ot0-{channel_0_name}')
{"golden_diff": "diff --git a/bot/cogs/off_topic_names.py b/bot/cogs/off_topic_names.py\n--- a/bot/cogs/off_topic_names.py\n+++ b/bot/cogs/off_topic_names.py\n@@ -6,6 +6,7 @@\n from discord import Colour, Embed\n from discord.ext.commands import BadArgument, Bot, Cog, Context, Converter, group\n \n+from bot.api import ResponseCodeError\n from bot.constants import Channels, MODERATION_ROLES\n from bot.decorators import with_role\n from bot.pagination import LinePaginator\n@@ -49,9 +50,13 @@\n seconds_to_sleep = (next_midnight - datetime.utcnow()).seconds + 1\n await asyncio.sleep(seconds_to_sleep)\n \n- channel_0_name, channel_1_name, channel_2_name = await bot.api_client.get(\n- 'bot/off-topic-channel-names', params={'random_items': 3}\n- )\n+ try:\n+ channel_0_name, channel_1_name, channel_2_name = await bot.api_client.get(\n+ 'bot/off-topic-channel-names', params={'random_items': 3}\n+ )\n+ except ResponseCodeError as e:\n+ log.error(f\"Failed to get new off topic channel names: code {e.response.status}\")\n+ continue\n channel_0, channel_1, channel_2 = (bot.get_channel(channel_id) for channel_id in CHANNELS)\n \n await channel_0.edit(name=f'ot0-{channel_0_name}')\n", "issue": "The off-topic channel name updating task fails on non-success API response\nThe background task we create to update off-topic names at midnight UTC fails if it receives a non-success API response. The reason is that our `bot.api_client` will raise the `bot.api.ResponseCodeError` exception on non-success response status codes. This means that the off-topic channel names won't be updated again until either the bot is restarted or the task is started manually again by an admin.\r\n\r\nThe relevant lines of code:\r\nhttps://github.com/python-discord/bot/blob/e70c96248bd7b548412811a4f1ffe88bed41f815/bot/cogs/off_topic_names.py#L59-L61\r\n\r\nTo handle it, we could simply include a `try-except` block and log the exception in the `except` block. I'm not sure if we want to log the entire exception, since the exception text could be a [massive HTML-response generated by cloudflare](https://paste.pythondiscord.com/ohibicedif). Logging the failure with the response code should generally give us enough to determine the cause of the failure.\n", "before_files": [{"content": "import asyncio\nimport difflib\nimport logging\nfrom datetime import datetime, timedelta\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import BadArgument, Bot, Cog, Context, Converter, group\n\nfrom bot.constants import Channels, MODERATION_ROLES\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\n\nCHANNELS = (Channels.off_topic_0, Channels.off_topic_1, Channels.off_topic_2)\nlog = logging.getLogger(__name__)\n\n\nclass OffTopicName(Converter):\n \"\"\"A converter that ensures an added off-topic name is valid.\"\"\"\n\n @staticmethod\n async def convert(ctx: Context, argument: str) -> str:\n \"\"\"Attempt to replace any invalid characters with their approximate Unicode equivalent.\"\"\"\n allowed_characters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ!?'`-\"\n\n if not (2 <= len(argument) <= 96):\n raise BadArgument(\"Channel name must be between 2 and 96 chars long\")\n\n elif not all(c.isalnum() or c in allowed_characters for c in argument):\n raise BadArgument(\n \"Channel name must only consist of \"\n \"alphanumeric characters, minus signs or apostrophes.\"\n )\n\n # Replace invalid characters with unicode alternatives.\n table = str.maketrans(\n allowed_characters, '\ud835\udda0\ud835\udda1\ud835\udda2\ud835\udda3\ud835\udda4\ud835\udda5\ud835\udda6\ud835\udda7\ud835\udda8\ud835\udda9\ud835\uddaa\ud835\uddab\ud835\uddac\ud835\uddad\ud835\uddae\ud835\uddaf\ud835\uddb0\ud835\uddb1\ud835\uddb2\ud835\uddb3\ud835\uddb4\ud835\uddb5\ud835\uddb6\ud835\uddb7\ud835\uddb8\ud835\uddb9\u01c3\uff1f\u2019\u2019-'\n )\n return argument.translate(table)\n\n\nasync def update_names(bot: Bot) -> None:\n \"\"\"Background updater task that performs the daily channel name update.\"\"\"\n while True:\n # Since we truncate the compute timedelta to seconds, we add one second to ensure\n # we go past midnight in the `seconds_to_sleep` set below.\n today_at_midnight = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=0)\n next_midnight = today_at_midnight + timedelta(days=1)\n seconds_to_sleep = (next_midnight - datetime.utcnow()).seconds + 1\n await asyncio.sleep(seconds_to_sleep)\n\n channel_0_name, channel_1_name, channel_2_name = await bot.api_client.get(\n 'bot/off-topic-channel-names', params={'random_items': 3}\n )\n channel_0, channel_1, channel_2 = (bot.get_channel(channel_id) for channel_id in CHANNELS)\n\n await channel_0.edit(name=f'ot0-{channel_0_name}')\n await channel_1.edit(name=f'ot1-{channel_1_name}')\n await channel_2.edit(name=f'ot2-{channel_2_name}')\n log.debug(\n \"Updated off-topic channel names to\"\n f\" {channel_0_name}, {channel_1_name} and {channel_2_name}\"\n )\n\n\nclass OffTopicNames(Cog):\n \"\"\"Commands related to managing the off-topic category channel names.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.updater_task = None\n\n def cog_unload(self) -> None:\n \"\"\"Cancel any running updater tasks on cog unload.\"\"\"\n if self.updater_task is not None:\n self.updater_task.cancel()\n\n @Cog.listener()\n async def on_ready(self) -> None:\n \"\"\"Start off-topic channel updating event loop if it hasn't already started.\"\"\"\n if self.updater_task is None:\n coro = update_names(self.bot)\n self.updater_task = self.bot.loop.create_task(coro)\n\n @group(name='otname', aliases=('otnames', 'otn'), invoke_without_command=True)\n @with_role(*MODERATION_ROLES)\n async def otname_group(self, ctx: Context) -> None:\n \"\"\"Add or list items from the off-topic channel name rotation.\"\"\"\n await ctx.invoke(self.bot.get_command(\"help\"), \"otname\")\n\n @otname_group.command(name='add', aliases=('a',))\n @with_role(*MODERATION_ROLES)\n async def add_command(self, ctx: Context, *names: OffTopicName) -> None:\n \"\"\"Adds a new off-topic name to the rotation.\"\"\"\n # Chain multiple words to a single one\n name = \"-\".join(names)\n\n await self.bot.api_client.post(f'bot/off-topic-channel-names', params={'name': name})\n log.info(\n f\"{ctx.author.name}#{ctx.author.discriminator}\"\n f\" added the off-topic channel name '{name}\"\n )\n await ctx.send(f\":ok_hand: Added `{name}` to the names list.\")\n\n @otname_group.command(name='delete', aliases=('remove', 'rm', 'del', 'd'))\n @with_role(*MODERATION_ROLES)\n async def delete_command(self, ctx: Context, *names: OffTopicName) -> None:\n \"\"\"Removes a off-topic name from the rotation.\"\"\"\n # Chain multiple words to a single one\n name = \"-\".join(names)\n\n await self.bot.api_client.delete(f'bot/off-topic-channel-names/{name}')\n log.info(\n f\"{ctx.author.name}#{ctx.author.discriminator}\"\n f\" deleted the off-topic channel name '{name}\"\n )\n await ctx.send(f\":ok_hand: Removed `{name}` from the names list.\")\n\n @otname_group.command(name='list', aliases=('l',))\n @with_role(*MODERATION_ROLES)\n async def list_command(self, ctx: Context) -> None:\n \"\"\"\n Lists all currently known off-topic channel names in a paginator.\n\n Restricted to Moderator and above to not spoil the surprise.\n \"\"\"\n result = await self.bot.api_client.get('bot/off-topic-channel-names')\n lines = sorted(f\"\u2022 {name}\" for name in result)\n embed = Embed(\n title=f\"Known off-topic names (`{len(result)}` total)\",\n colour=Colour.blue()\n )\n if result:\n await LinePaginator.paginate(lines, ctx, embed, max_size=400, empty=False)\n else:\n embed.description = \"Hmmm, seems like there's nothing here yet.\"\n await ctx.send(embed=embed)\n\n @otname_group.command(name='search', aliases=('s',))\n @with_role(*MODERATION_ROLES)\n async def search_command(self, ctx: Context, *, query: OffTopicName) -> None:\n \"\"\"Search for an off-topic name.\"\"\"\n result = await self.bot.api_client.get('bot/off-topic-channel-names')\n in_matches = {name for name in result if query in name}\n close_matches = difflib.get_close_matches(query, result, n=10, cutoff=0.70)\n lines = sorted(f\"\u2022 {name}\" for name in in_matches.union(close_matches))\n embed = Embed(\n title=f\"Query results\",\n colour=Colour.blue()\n )\n\n if lines:\n await LinePaginator.paginate(lines, ctx, embed, max_size=400, empty=False)\n else:\n embed.description = \"Nothing found.\"\n await ctx.send(embed=embed)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Off topic names cog load.\"\"\"\n bot.add_cog(OffTopicNames(bot))\n log.info(\"Cog loaded: OffTopicNames\")\n", "path": "bot/cogs/off_topic_names.py"}]}
2,763
324
gh_patches_debug_15891
rasdani/github-patches
git_diff
openshift__openshift-ansible-7849
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Any chance of getting this playbook to work without cinder ? #### Description Provide a brief description of your issue here. For example: We have access to an openstack without cinder. Any chance of getting this playbook to work without cinder ? ##### Version Please put the following version information in the code block indicated below. * Your ansible version per `ansible --version` If you're operating from a **git clone**: * The output of `git describe` If you're running from playbooks installed via RPM or `atomic-openshift-utils` * The output of `rpm -q atomic-openshift-utils openshift-ansible` Place the output between the code block below: ``` $ ansible --version ansible 2.5.0 config file = /home/arthur/local/openshift/ansible.cfg configured module search path = [u'/home/arthur/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /home/arthur/.virtualenvs/ansible/local/lib/python2.7/site-packages/ansible executable location = /home/arthur/.virtualenvs/ansible/bin/ansible python version = 2.7.14 (default, Sep 23 2017, 22:06:14) [GCC 7.2.0] $ git describe openshift-ansible-3.10.0-0.15.0-9-gf28aba492 ``` ##### Steps To Reproduce When I saw the following comment in all.yml ``` # If you want to use the VM storage instead of Cinder volumes, set this to `true`. ``` I thought this might be possible but I get the following error when executing the ansible playbook : ``` $ ansible-playbook [snip] [snip] in endpoint_data_for raise exceptions.EndpointNotFound(msg) keystoneauth1.exceptions.catalog.EndpointNotFound: public endpoint for volumev2 service in RegionOne region not found [snip] $ openstack volume list public endpoint for volumev2 service in RegionOne region not found ``` </issue> <code> [start of playbooks/openstack/inventory.py] 1 #!/usr/bin/env python 2 """ 3 This is an Ansible dynamic inventory for OpenStack. 4 5 It requires your OpenStack credentials to be set in clouds.yaml or your shell 6 environment. 7 8 """ 9 10 from __future__ import print_function 11 12 from collections import Mapping 13 import json 14 import os 15 16 import shade 17 18 19 def base_openshift_inventory(cluster_hosts): 20 '''Set the base openshift inventory.''' 21 inventory = {} 22 23 masters = [server.name for server in cluster_hosts 24 if server.metadata['host-type'] == 'master'] 25 26 etcd = [server.name for server in cluster_hosts 27 if server.metadata['host-type'] == 'etcd'] 28 if not etcd: 29 etcd = masters 30 31 infra_hosts = [server.name for server in cluster_hosts 32 if server.metadata['host-type'] == 'node' and 33 server.metadata['sub-host-type'] == 'infra'] 34 35 app = [server.name for server in cluster_hosts 36 if server.metadata['host-type'] == 'node' and 37 server.metadata['sub-host-type'] == 'app'] 38 39 cns = [server.name for server in cluster_hosts 40 if server.metadata['host-type'] == 'cns'] 41 42 nodes = list(set(masters + infra_hosts + app + cns)) 43 44 dns = [server.name for server in cluster_hosts 45 if server.metadata['host-type'] == 'dns'] 46 47 load_balancers = [server.name for server in cluster_hosts 48 if server.metadata['host-type'] == 'lb'] 49 50 osev3 = list(set(nodes + etcd + load_balancers)) 51 52 inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]} 53 inventory['OSEv3'] = {'hosts': osev3} 54 inventory['masters'] = {'hosts': masters} 55 inventory['etcd'] = {'hosts': etcd} 56 inventory['nodes'] = {'hosts': nodes} 57 inventory['infra_hosts'] = {'hosts': infra_hosts} 58 inventory['app'] = {'hosts': app} 59 inventory['glusterfs'] = {'hosts': cns} 60 inventory['dns'] = {'hosts': dns} 61 inventory['lb'] = {'hosts': load_balancers} 62 inventory['localhost'] = {'ansible_connection': 'local'} 63 64 return inventory 65 66 67 def get_docker_storage_mountpoints(volumes): 68 '''Check volumes to see if they're being used for docker storage''' 69 docker_storage_mountpoints = {} 70 for volume in volumes: 71 if volume.metadata.get('purpose') == "openshift_docker_storage": 72 for attachment in volume.attachments: 73 if attachment.server_id in docker_storage_mountpoints: 74 docker_storage_mountpoints[attachment.server_id].append(attachment.device) 75 else: 76 docker_storage_mountpoints[attachment.server_id] = [attachment.device] 77 return docker_storage_mountpoints 78 79 80 def _get_hostvars(server, docker_storage_mountpoints): 81 ssh_ip_address = server.public_v4 or server.private_v4 82 hostvars = { 83 'ansible_host': ssh_ip_address 84 } 85 86 public_v4 = server.public_v4 or server.private_v4 87 if public_v4: 88 hostvars['public_v4'] = server.public_v4 89 hostvars['openshift_public_ip'] = server.public_v4 90 # TODO(shadower): what about multiple networks? 91 if server.private_v4: 92 hostvars['private_v4'] = server.private_v4 93 hostvars['openshift_ip'] = server.private_v4 94 95 # NOTE(shadower): Yes, we set both hostname and IP to the private 96 # IP address for each node. OpenStack doesn't resolve nodes by 97 # name at all, so using a hostname here would require an internal 98 # DNS which would complicate the setup and potentially introduce 99 # performance issues. 100 hostvars['openshift_hostname'] = server.metadata.get( 101 'openshift_hostname', server.private_v4) 102 hostvars['openshift_public_hostname'] = server.name 103 104 if server.metadata['host-type'] == 'cns': 105 hostvars['glusterfs_devices'] = ['/dev/nvme0n1'] 106 107 node_labels = server.metadata.get('node_labels') 108 # NOTE(shadower): the node_labels value must be a dict not string 109 if not isinstance(node_labels, Mapping): 110 node_labels = json.loads(node_labels) 111 112 if node_labels: 113 hostvars['openshift_node_labels'] = node_labels 114 115 # check for attached docker storage volumes 116 if 'os-extended-volumes:volumes_attached' in server: 117 if server.id in docker_storage_mountpoints: 118 hostvars['docker_storage_mountpoints'] = ' '.join( 119 docker_storage_mountpoints[server.id]) 120 return hostvars 121 122 123 def build_inventory(): 124 '''Build the dynamic inventory.''' 125 cloud = shade.openstack_cloud() 126 127 # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER` 128 # environment variable. 129 cluster_hosts = [ 130 server for server in cloud.list_servers() 131 if 'metadata' in server and 'clusterid' in server.metadata] 132 133 inventory = base_openshift_inventory(cluster_hosts) 134 135 for server in cluster_hosts: 136 if 'group' in server.metadata: 137 group = server.metadata.get('group') 138 if group not in inventory: 139 inventory[group] = {'hosts': []} 140 inventory[group]['hosts'].append(server.name) 141 142 inventory['_meta'] = {'hostvars': {}} 143 144 # cinder volumes used for docker storage 145 docker_storage_mountpoints = get_docker_storage_mountpoints( 146 cloud.list_volumes()) 147 for server in cluster_hosts: 148 inventory['_meta']['hostvars'][server.name] = _get_hostvars( 149 server, 150 docker_storage_mountpoints) 151 152 stout = _get_stack_outputs(cloud) 153 if stout is not None: 154 try: 155 inventory['localhost'].update({ 156 'openshift_openstack_api_lb_provider': 157 stout['api_lb_provider'], 158 'openshift_openstack_api_lb_port_id': 159 stout['api_lb_vip_port_id'], 160 'openshift_openstack_api_lb_sg_id': 161 stout['api_lb_sg_id']}) 162 except KeyError: 163 pass # Not an API load balanced deployment 164 165 try: 166 inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout) 167 except KeyError: 168 pass # Not a kuryr deployment 169 return inventory 170 171 172 def _get_stack_outputs(cloud_client): 173 """Returns a dictionary with the stack outputs""" 174 cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster') 175 176 stack = cloud_client.get_stack(cluster_name) 177 if stack is None or stack['stack_status'] not in ( 178 'CREATE_COMPLETE', 'UPDATE_COMPLETE'): 179 return None 180 181 data = {} 182 for output in stack['outputs']: 183 data[output['output_key']] = output['output_value'] 184 return data 185 186 187 def _get_kuryr_vars(cloud_client, data): 188 """Returns a dictionary of Kuryr variables resulting of heat stacking""" 189 settings = {} 190 settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet'] 191 settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet'] 192 settings['kuryr_openstack_service_subnet_id'] = data['service_subnet'] 193 settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id'] 194 settings['kuryr_openstack_pod_project_id'] = ( 195 cloud_client.current_project_id) 196 197 settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url'] 198 settings['kuryr_openstack_username'] = cloud_client.auth['username'] 199 settings['kuryr_openstack_password'] = cloud_client.auth['password'] 200 if 'user_domain_id' in cloud_client.auth: 201 settings['kuryr_openstack_user_domain_name'] = ( 202 cloud_client.auth['user_domain_id']) 203 else: 204 settings['kuryr_openstack_user_domain_name'] = ( 205 cloud_client.auth['user_domain_name']) 206 # FIXME(apuimedo): consolidate kuryr controller credentials into the same 207 # vars the openstack playbook uses. 208 settings['kuryr_openstack_project_id'] = cloud_client.current_project_id 209 if 'project_domain_id' in cloud_client.auth: 210 settings['kuryr_openstack_project_domain_name'] = ( 211 cloud_client.auth['project_domain_id']) 212 else: 213 settings['kuryr_openstack_project_domain_name'] = ( 214 cloud_client.auth['project_domain_name']) 215 return settings 216 217 218 if __name__ == '__main__': 219 print(json.dumps(build_inventory(), indent=4, sort_keys=True)) 220 [end of playbooks/openstack/inventory.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py --- a/playbooks/openstack/inventory.py +++ b/playbooks/openstack/inventory.py @@ -13,6 +13,7 @@ import json import os +from keystoneauth1.exceptions.catalog import EndpointNotFound import shade @@ -141,9 +142,14 @@ inventory['_meta'] = {'hostvars': {}} + # Some clouds don't have Cinder. That's okay: + try: + volumes = cloud.list_volumes() + except EndpointNotFound: + volumes = [] + # cinder volumes used for docker storage - docker_storage_mountpoints = get_docker_storage_mountpoints( - cloud.list_volumes()) + docker_storage_mountpoints = get_docker_storage_mountpoints(volumes) for server in cluster_hosts: inventory['_meta']['hostvars'][server.name] = _get_hostvars( server,
{"golden_diff": "diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py\n--- a/playbooks/openstack/inventory.py\n+++ b/playbooks/openstack/inventory.py\n@@ -13,6 +13,7 @@\n import json\n import os\n \n+from keystoneauth1.exceptions.catalog import EndpointNotFound\n import shade\n \n \n@@ -141,9 +142,14 @@\n \n inventory['_meta'] = {'hostvars': {}}\n \n+ # Some clouds don't have Cinder. That's okay:\n+ try:\n+ volumes = cloud.list_volumes()\n+ except EndpointNotFound:\n+ volumes = []\n+\n # cinder volumes used for docker storage\n- docker_storage_mountpoints = get_docker_storage_mountpoints(\n- cloud.list_volumes())\n+ docker_storage_mountpoints = get_docker_storage_mountpoints(volumes)\n for server in cluster_hosts:\n inventory['_meta']['hostvars'][server.name] = _get_hostvars(\n server,\n", "issue": "Any chance of getting this playbook to work without cinder ? \n#### Description\r\n\r\nProvide a brief description of your issue here. For example:\r\n\r\nWe have access to an openstack without cinder. Any chance of getting this playbook to work without cinder ? \r\n\r\n\r\n\r\n##### Version\r\n\r\nPlease put the following version information in the code block\r\nindicated below.\r\n\r\n* Your ansible version per `ansible --version`\r\n\r\nIf you're operating from a **git clone**:\r\n\r\n* The output of `git describe`\r\n\r\n\r\nIf you're running from playbooks installed via RPM or\r\n`atomic-openshift-utils`\r\n\r\n* The output of `rpm -q atomic-openshift-utils openshift-ansible`\r\n\r\nPlace the output between the code block below:\r\n\r\n```\r\n$ ansible --version\r\nansible 2.5.0\r\n config file = /home/arthur/local/openshift/ansible.cfg\r\n configured module search path = [u'/home/arthur/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/arthur/.virtualenvs/ansible/local/lib/python2.7/site-packages/ansible\r\n executable location = /home/arthur/.virtualenvs/ansible/bin/ansible\r\n python version = 2.7.14 (default, Sep 23 2017, 22:06:14) [GCC 7.2.0]\r\n\r\n$ git describe\r\nopenshift-ansible-3.10.0-0.15.0-9-gf28aba492\r\n```\r\n\r\n##### Steps To Reproduce\r\n\r\nWhen I saw the following comment in all.yml \r\n```\r\n# If you want to use the VM storage instead of Cinder volumes, set this to `true`.\r\n```\r\nI thought this might be possible but I get the following error when executing the ansible playbook : \r\n\r\n```\r\n$ ansible-playbook [snip]\r\n[snip]\r\n in endpoint_data_for raise\r\nexceptions.EndpointNotFound(msg) keystoneauth1.exceptions.catalog.EndpointNotFound: public\r\nendpoint for volumev2 service in RegionOne region not found\r\n[snip]\r\n$ openstack volume list\r\npublic endpoint for volumev2 service in RegionOne region not found\r\n\r\n```\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nThis is an Ansible dynamic inventory for OpenStack.\n\nIt requires your OpenStack credentials to be set in clouds.yaml or your shell\nenvironment.\n\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom collections import Mapping\nimport json\nimport os\n\nimport shade\n\n\ndef base_openshift_inventory(cluster_hosts):\n '''Set the base openshift inventory.'''\n inventory = {}\n\n masters = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'master']\n\n etcd = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'etcd']\n if not etcd:\n etcd = masters\n\n infra_hosts = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'node' and\n server.metadata['sub-host-type'] == 'infra']\n\n app = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'node' and\n server.metadata['sub-host-type'] == 'app']\n\n cns = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'cns']\n\n nodes = list(set(masters + infra_hosts + app + cns))\n\n dns = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'dns']\n\n load_balancers = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'lb']\n\n osev3 = list(set(nodes + etcd + load_balancers))\n\n inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]}\n inventory['OSEv3'] = {'hosts': osev3}\n inventory['masters'] = {'hosts': masters}\n inventory['etcd'] = {'hosts': etcd}\n inventory['nodes'] = {'hosts': nodes}\n inventory['infra_hosts'] = {'hosts': infra_hosts}\n inventory['app'] = {'hosts': app}\n inventory['glusterfs'] = {'hosts': cns}\n inventory['dns'] = {'hosts': dns}\n inventory['lb'] = {'hosts': load_balancers}\n inventory['localhost'] = {'ansible_connection': 'local'}\n\n return inventory\n\n\ndef get_docker_storage_mountpoints(volumes):\n '''Check volumes to see if they're being used for docker storage'''\n docker_storage_mountpoints = {}\n for volume in volumes:\n if volume.metadata.get('purpose') == \"openshift_docker_storage\":\n for attachment in volume.attachments:\n if attachment.server_id in docker_storage_mountpoints:\n docker_storage_mountpoints[attachment.server_id].append(attachment.device)\n else:\n docker_storage_mountpoints[attachment.server_id] = [attachment.device]\n return docker_storage_mountpoints\n\n\ndef _get_hostvars(server, docker_storage_mountpoints):\n ssh_ip_address = server.public_v4 or server.private_v4\n hostvars = {\n 'ansible_host': ssh_ip_address\n }\n\n public_v4 = server.public_v4 or server.private_v4\n if public_v4:\n hostvars['public_v4'] = server.public_v4\n hostvars['openshift_public_ip'] = server.public_v4\n # TODO(shadower): what about multiple networks?\n if server.private_v4:\n hostvars['private_v4'] = server.private_v4\n hostvars['openshift_ip'] = server.private_v4\n\n # NOTE(shadower): Yes, we set both hostname and IP to the private\n # IP address for each node. OpenStack doesn't resolve nodes by\n # name at all, so using a hostname here would require an internal\n # DNS which would complicate the setup and potentially introduce\n # performance issues.\n hostvars['openshift_hostname'] = server.metadata.get(\n 'openshift_hostname', server.private_v4)\n hostvars['openshift_public_hostname'] = server.name\n\n if server.metadata['host-type'] == 'cns':\n hostvars['glusterfs_devices'] = ['/dev/nvme0n1']\n\n node_labels = server.metadata.get('node_labels')\n # NOTE(shadower): the node_labels value must be a dict not string\n if not isinstance(node_labels, Mapping):\n node_labels = json.loads(node_labels)\n\n if node_labels:\n hostvars['openshift_node_labels'] = node_labels\n\n # check for attached docker storage volumes\n if 'os-extended-volumes:volumes_attached' in server:\n if server.id in docker_storage_mountpoints:\n hostvars['docker_storage_mountpoints'] = ' '.join(\n docker_storage_mountpoints[server.id])\n return hostvars\n\n\ndef build_inventory():\n '''Build the dynamic inventory.'''\n cloud = shade.openstack_cloud()\n\n # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`\n # environment variable.\n cluster_hosts = [\n server for server in cloud.list_servers()\n if 'metadata' in server and 'clusterid' in server.metadata]\n\n inventory = base_openshift_inventory(cluster_hosts)\n\n for server in cluster_hosts:\n if 'group' in server.metadata:\n group = server.metadata.get('group')\n if group not in inventory:\n inventory[group] = {'hosts': []}\n inventory[group]['hosts'].append(server.name)\n\n inventory['_meta'] = {'hostvars': {}}\n\n # cinder volumes used for docker storage\n docker_storage_mountpoints = get_docker_storage_mountpoints(\n cloud.list_volumes())\n for server in cluster_hosts:\n inventory['_meta']['hostvars'][server.name] = _get_hostvars(\n server,\n docker_storage_mountpoints)\n\n stout = _get_stack_outputs(cloud)\n if stout is not None:\n try:\n inventory['localhost'].update({\n 'openshift_openstack_api_lb_provider':\n stout['api_lb_provider'],\n 'openshift_openstack_api_lb_port_id':\n stout['api_lb_vip_port_id'],\n 'openshift_openstack_api_lb_sg_id':\n stout['api_lb_sg_id']})\n except KeyError:\n pass # Not an API load balanced deployment\n\n try:\n inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)\n except KeyError:\n pass # Not a kuryr deployment\n return inventory\n\n\ndef _get_stack_outputs(cloud_client):\n \"\"\"Returns a dictionary with the stack outputs\"\"\"\n cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')\n\n stack = cloud_client.get_stack(cluster_name)\n if stack is None or stack['stack_status'] not in (\n 'CREATE_COMPLETE', 'UPDATE_COMPLETE'):\n return None\n\n data = {}\n for output in stack['outputs']:\n data[output['output_key']] = output['output_value']\n return data\n\n\ndef _get_kuryr_vars(cloud_client, data):\n \"\"\"Returns a dictionary of Kuryr variables resulting of heat stacking\"\"\"\n settings = {}\n settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']\n settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']\n settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']\n settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']\n settings['kuryr_openstack_pod_project_id'] = (\n cloud_client.current_project_id)\n\n settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']\n settings['kuryr_openstack_username'] = cloud_client.auth['username']\n settings['kuryr_openstack_password'] = cloud_client.auth['password']\n if 'user_domain_id' in cloud_client.auth:\n settings['kuryr_openstack_user_domain_name'] = (\n cloud_client.auth['user_domain_id'])\n else:\n settings['kuryr_openstack_user_domain_name'] = (\n cloud_client.auth['user_domain_name'])\n # FIXME(apuimedo): consolidate kuryr controller credentials into the same\n # vars the openstack playbook uses.\n settings['kuryr_openstack_project_id'] = cloud_client.current_project_id\n if 'project_domain_id' in cloud_client.auth:\n settings['kuryr_openstack_project_domain_name'] = (\n cloud_client.auth['project_domain_id'])\n else:\n settings['kuryr_openstack_project_domain_name'] = (\n cloud_client.auth['project_domain_name'])\n return settings\n\n\nif __name__ == '__main__':\n print(json.dumps(build_inventory(), indent=4, sort_keys=True))\n", "path": "playbooks/openstack/inventory.py"}]}
3,432
216
gh_patches_debug_12407
rasdani/github-patches
git_diff
zestedesavoir__zds-site-6079
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Il est possible de choisir un pseudo invalide **Description du bug** Il est possible de choisir un pseudo un peu farfelu comme par exemple `https://viki53.eu` qui est dans certains cas invalide : la fonction `reverse_lazy('tutorial:find-tutorial', args=(profile.user.username,))` qui permet de retrouver l'URL `'tutoriels/voir/(?P<username>[^/]+)/$'` retourne une erreur `NoReverseMatch`. **Comment reproduire ?** La liste des étapes qui permet de reproduire le bug : 1. Se renommer en `https://viki53.eu` 2. Aller sur son profil et constater l'erreur interne **Comportement attendu** Aucune erreur interne. **Solution possible** Il serait possible d'ajouter une petite vérification lors du changement de pseudo pour refuser les pseudos invalides : ```py try: reverse_lazy('tutorial:find-tutorial', args=(profile.user.username,)) except NoReverseMatch: # Refuser le pseudo ``` </issue> <code> [start of zds/member/validators.py] 1 from django.contrib.auth.models import User 2 from django.core.exceptions import ValidationError 3 from django.core.validators import EmailValidator 4 from django.utils.encoding import force_str 5 from django.utils.translation import gettext_lazy as _ 6 7 from zds.utils.misc import contains_utf8mb4 8 from zds.member.models import BannedEmailProvider, Profile 9 10 11 def validate_not_empty(value): 12 """ 13 Fields cannot be empty or only contain spaces. 14 15 :param value: value to validate (str or None) 16 :return: 17 """ 18 if value is None or not value.strip(): 19 raise ValidationError(_("Le champs ne peut être vide")) 20 21 22 class ZdSEmailValidator(EmailValidator): 23 """ 24 Based on https://docs.djangoproject.com/en/1.8/_modules/django/core/validators/#EmailValidator 25 Changed : 26 - check if provider is not if blacklisted 27 - check if email is not used by another user 28 - remove whitelist check 29 - add custom errors and translate them into French 30 """ 31 32 message = _("Utilisez une adresse de courriel valide.") 33 34 def __call__(self, value, check_username_available=True): 35 value = force_str(value) 36 37 if not value or "@" not in value: 38 raise ValidationError(self.message, code=self.code) 39 40 user_part, domain_part = value.rsplit("@", 1) 41 42 if not self.user_regex.match(user_part) or contains_utf8mb4(user_part): 43 raise ValidationError(self.message, code=self.code) 44 45 # check if provider is blacklisted 46 blacklist = BannedEmailProvider.objects.values_list("provider", flat=True) 47 for provider in blacklist: 48 if f"@{provider}" in value.lower(): 49 raise ValidationError(_("Ce fournisseur ne peut pas être utilisé."), code=self.code) 50 51 # check if email is used by another user 52 user_count = User.objects.filter(email=value).count() 53 if check_username_available and user_count > 0: 54 raise ValidationError(_("Cette adresse courriel est déjà utilisée"), code=self.code) 55 # check if email exists in database 56 elif not check_username_available and user_count == 0: 57 raise ValidationError(_("Cette adresse courriel n'existe pas"), code=self.code) 58 59 if domain_part and not self.validate_domain_part(domain_part): 60 # Try for possible IDN domain-part 61 try: 62 domain_part = domain_part.encode("idna").decode("ascii") 63 if self.validate_domain_part(domain_part): 64 return 65 except UnicodeError: 66 pass 67 raise ValidationError(self.message, code=self.code) 68 69 70 validate_zds_email = ZdSEmailValidator() 71 72 73 def validate_zds_username(value, check_username_available=True): 74 """ 75 Check if username is used by another user 76 77 :param value: value to validate (str or None) 78 :return: 79 """ 80 msg = None 81 user_count = User.objects.filter(username=value).count() 82 skeleton_user_count = Profile.objects.filter(username_skeleton=Profile.find_username_skeleton(value)).count() 83 if "," in value: 84 msg = _("Le nom d'utilisateur ne peut contenir de virgules") 85 elif contains_utf8mb4(value): 86 msg = _("Le nom d'utilisateur ne peut pas contenir des caractères utf8mb4") 87 elif check_username_available and user_count > 0: 88 msg = _("Ce nom d'utilisateur est déjà utilisé") 89 elif check_username_available and skeleton_user_count > 0: 90 msg = _("Un nom d'utilisateur visuellement proche du votre existe déjà") 91 elif not check_username_available and user_count == 0: 92 msg = _("Ce nom d'utilisateur n'existe pas") 93 if msg is not None: 94 raise ValidationError(msg) 95 96 97 def validate_raw_zds_username(data): 98 """ 99 Check if raw username hasn't space on left or right 100 """ 101 msg = None 102 username = data.get("username", None) 103 if username is None: 104 msg = _("Le nom d'utilisateur n'est pas fourni") 105 elif username != username.strip(): 106 msg = _("Le nom d'utilisateur ne peut commencer ou finir par des espaces") 107 108 if msg is not None: 109 raise ValidationError(msg) 110 111 112 def validate_zds_password(value): 113 """ 114 115 :param value: 116 :return: 117 """ 118 if contains_utf8mb4(value): 119 raise ValidationError(_("Le mot de passe ne peut pas contenir des caractères utf8mb4")) 120 121 122 def validate_passwords( 123 cleaned_data, password_label="password", password_confirm_label="password_confirm", username=None 124 ): 125 """ 126 Chek if cleaned_data['password'] == cleaned_data['password_confirm'] and password is not username. 127 :param cleaned_data: 128 :param password_label: 129 :param password_confirm_label: 130 :return: 131 """ 132 133 password = cleaned_data.get(password_label) 134 password_confirm = cleaned_data.get(password_confirm_label) 135 msg = None 136 137 if username is None: 138 username = cleaned_data.get("username") 139 140 if not password_confirm == password: 141 msg = _("Les mots de passe sont différents") 142 143 if password_label in cleaned_data: 144 del cleaned_data[password_label] 145 146 if password_confirm_label in cleaned_data: 147 del cleaned_data[password_confirm_label] 148 149 if username is not None: 150 # Check that password != username 151 if password == username: 152 msg = _("Le mot de passe doit être différent du pseudo") 153 if password_label in cleaned_data: 154 del cleaned_data[password_label] 155 if password_confirm_label in cleaned_data: 156 del cleaned_data[password_confirm_label] 157 158 if msg is not None: 159 raise ValidationError(msg) 160 161 return cleaned_data 162 [end of zds/member/validators.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/member/validators.py b/zds/member/validators.py --- a/zds/member/validators.py +++ b/zds/member/validators.py @@ -82,6 +82,8 @@ skeleton_user_count = Profile.objects.filter(username_skeleton=Profile.find_username_skeleton(value)).count() if "," in value: msg = _("Le nom d'utilisateur ne peut contenir de virgules") + if "/" in value: + msg = _("Le nom d'utilisateur ne peut contenir de barres obliques") elif contains_utf8mb4(value): msg = _("Le nom d'utilisateur ne peut pas contenir des caractères utf8mb4") elif check_username_available and user_count > 0:
{"golden_diff": "diff --git a/zds/member/validators.py b/zds/member/validators.py\n--- a/zds/member/validators.py\n+++ b/zds/member/validators.py\n@@ -82,6 +82,8 @@\n skeleton_user_count = Profile.objects.filter(username_skeleton=Profile.find_username_skeleton(value)).count()\n if \",\" in value:\n msg = _(\"Le nom d'utilisateur ne peut contenir de virgules\")\n+ if \"/\" in value:\n+ msg = _(\"Le nom d'utilisateur ne peut contenir de barres obliques\")\n elif contains_utf8mb4(value):\n msg = _(\"Le nom d'utilisateur ne peut pas contenir des caract\u00e8res utf8mb4\")\n elif check_username_available and user_count > 0:\n", "issue": "Il est possible de choisir un pseudo invalide\n**Description du bug**\r\n\r\nIl est possible de choisir un pseudo un peu farfelu comme par exemple `https://viki53.eu` qui est dans certains cas invalide : la fonction `reverse_lazy('tutorial:find-tutorial', args=(profile.user.username,))` qui permet de retrouver l'URL `'tutoriels/voir/(?P<username>[^/]+)/$'` retourne une erreur `NoReverseMatch`.\r\n\r\n**Comment reproduire ?**\r\n\r\nLa liste des \u00e9tapes qui permet de reproduire le bug :\r\n\r\n1. Se renommer en `https://viki53.eu`\r\n2. Aller sur son profil et constater l'erreur interne\r\n\r\n**Comportement attendu**\r\n\r\nAucune erreur interne.\r\n\r\n**Solution possible**\r\n\r\nIl serait possible d'ajouter une petite v\u00e9rification lors du changement de pseudo pour refuser les pseudos invalides : \r\n\r\n```py\r\ntry:\r\n reverse_lazy('tutorial:find-tutorial', args=(profile.user.username,))\r\nexcept NoReverseMatch:\r\n # Refuser le pseudo\r\n```\n", "before_files": [{"content": "from django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import EmailValidator\nfrom django.utils.encoding import force_str\nfrom django.utils.translation import gettext_lazy as _\n\nfrom zds.utils.misc import contains_utf8mb4\nfrom zds.member.models import BannedEmailProvider, Profile\n\n\ndef validate_not_empty(value):\n \"\"\"\n Fields cannot be empty or only contain spaces.\n\n :param value: value to validate (str or None)\n :return:\n \"\"\"\n if value is None or not value.strip():\n raise ValidationError(_(\"Le champs ne peut \u00eatre vide\"))\n\n\nclass ZdSEmailValidator(EmailValidator):\n \"\"\"\n Based on https://docs.djangoproject.com/en/1.8/_modules/django/core/validators/#EmailValidator\n Changed :\n - check if provider is not if blacklisted\n - check if email is not used by another user\n - remove whitelist check\n - add custom errors and translate them into French\n \"\"\"\n\n message = _(\"Utilisez une adresse de courriel valide.\")\n\n def __call__(self, value, check_username_available=True):\n value = force_str(value)\n\n if not value or \"@\" not in value:\n raise ValidationError(self.message, code=self.code)\n\n user_part, domain_part = value.rsplit(\"@\", 1)\n\n if not self.user_regex.match(user_part) or contains_utf8mb4(user_part):\n raise ValidationError(self.message, code=self.code)\n\n # check if provider is blacklisted\n blacklist = BannedEmailProvider.objects.values_list(\"provider\", flat=True)\n for provider in blacklist:\n if f\"@{provider}\" in value.lower():\n raise ValidationError(_(\"Ce fournisseur ne peut pas \u00eatre utilis\u00e9.\"), code=self.code)\n\n # check if email is used by another user\n user_count = User.objects.filter(email=value).count()\n if check_username_available and user_count > 0:\n raise ValidationError(_(\"Cette adresse courriel est d\u00e9j\u00e0 utilis\u00e9e\"), code=self.code)\n # check if email exists in database\n elif not check_username_available and user_count == 0:\n raise ValidationError(_(\"Cette adresse courriel n'existe pas\"), code=self.code)\n\n if domain_part and not self.validate_domain_part(domain_part):\n # Try for possible IDN domain-part\n try:\n domain_part = domain_part.encode(\"idna\").decode(\"ascii\")\n if self.validate_domain_part(domain_part):\n return\n except UnicodeError:\n pass\n raise ValidationError(self.message, code=self.code)\n\n\nvalidate_zds_email = ZdSEmailValidator()\n\n\ndef validate_zds_username(value, check_username_available=True):\n \"\"\"\n Check if username is used by another user\n\n :param value: value to validate (str or None)\n :return:\n \"\"\"\n msg = None\n user_count = User.objects.filter(username=value).count()\n skeleton_user_count = Profile.objects.filter(username_skeleton=Profile.find_username_skeleton(value)).count()\n if \",\" in value:\n msg = _(\"Le nom d'utilisateur ne peut contenir de virgules\")\n elif contains_utf8mb4(value):\n msg = _(\"Le nom d'utilisateur ne peut pas contenir des caract\u00e8res utf8mb4\")\n elif check_username_available and user_count > 0:\n msg = _(\"Ce nom d'utilisateur est d\u00e9j\u00e0 utilis\u00e9\")\n elif check_username_available and skeleton_user_count > 0:\n msg = _(\"Un nom d'utilisateur visuellement proche du votre existe d\u00e9j\u00e0\")\n elif not check_username_available and user_count == 0:\n msg = _(\"Ce nom d'utilisateur n'existe pas\")\n if msg is not None:\n raise ValidationError(msg)\n\n\ndef validate_raw_zds_username(data):\n \"\"\"\n Check if raw username hasn't space on left or right\n \"\"\"\n msg = None\n username = data.get(\"username\", None)\n if username is None:\n msg = _(\"Le nom d'utilisateur n'est pas fourni\")\n elif username != username.strip():\n msg = _(\"Le nom d'utilisateur ne peut commencer ou finir par des espaces\")\n\n if msg is not None:\n raise ValidationError(msg)\n\n\ndef validate_zds_password(value):\n \"\"\"\n\n :param value:\n :return:\n \"\"\"\n if contains_utf8mb4(value):\n raise ValidationError(_(\"Le mot de passe ne peut pas contenir des caract\u00e8res utf8mb4\"))\n\n\ndef validate_passwords(\n cleaned_data, password_label=\"password\", password_confirm_label=\"password_confirm\", username=None\n):\n \"\"\"\n Chek if cleaned_data['password'] == cleaned_data['password_confirm'] and password is not username.\n :param cleaned_data:\n :param password_label:\n :param password_confirm_label:\n :return:\n \"\"\"\n\n password = cleaned_data.get(password_label)\n password_confirm = cleaned_data.get(password_confirm_label)\n msg = None\n\n if username is None:\n username = cleaned_data.get(\"username\")\n\n if not password_confirm == password:\n msg = _(\"Les mots de passe sont diff\u00e9rents\")\n\n if password_label in cleaned_data:\n del cleaned_data[password_label]\n\n if password_confirm_label in cleaned_data:\n del cleaned_data[password_confirm_label]\n\n if username is not None:\n # Check that password != username\n if password == username:\n msg = _(\"Le mot de passe doit \u00eatre diff\u00e9rent du pseudo\")\n if password_label in cleaned_data:\n del cleaned_data[password_label]\n if password_confirm_label in cleaned_data:\n del cleaned_data[password_confirm_label]\n\n if msg is not None:\n raise ValidationError(msg)\n\n return cleaned_data\n", "path": "zds/member/validators.py"}]}
2,353
161
gh_patches_debug_30552
rasdani/github-patches
git_diff
CTFd__CTFd-1330
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Email cannot be sent if CTF name has an accent <!-- If this is a bug report please fill out the template below. If this is a feature request please describe the behavior that you'd like to see. --> **Environment**: - CTFd Version/Commit: 2.3.2 - Operating System: Debian 10.3, Python 3.7.3 - Web Browser and Version: Firefox Nightly, 76.0a1 (2020-04-06) **What happened?** After a fresh install of CTFd we set the competition name to "Inté CTF". We configure the email server to use our Postfix relay, and test it with the "forgotten password" feature. The email never arrives and is bounced by relay servers because the `From` header is invalid. The actual issue is because the `From` header has the form "CTF name <email address>" (see [here](https://github.com/CTFd/CTFd/blob/master/CTFd/utils/email/smtp.py#L25)). Once encoded in the `msg.as_string()` ([there](https://github.com/CTFd/CTFd/blob/master/CTFd/utils/email/smtp.py#L55)) it is entirely encoded in UTF-8 looking like this `=?utf-8?q?Int=C3=A9_CTF_=3Cctf-noreply=40example=2Ecom=3E?=`, which is invalid against RFC 2407 section 5 ("An 'encoded-word' MUST NOT appear in any portion of an 'addr-spec'"). A correct form would be `=?utf-8?q?Int=C3=A9?= CTF <[email protected]>`. I have a patch ready but it uses `from email.message import EmailMessage` which is available in Python 3.6+. I'll be happy to open a PR if you confirm me that support for lower Python version can be dropped ^^ <details> <summary>Patch for `CTFd/utils/email/smtp.py`</summary> ```diff -from email.mime.text import MIMEText +from email.message import EmailMessage # around line 50 - msg = MIMEText(text) + msg = EmailMessage() + msg.set_content(text) msg["Subject"] = subject msg["From"] = mailfrom_addr msg["To"] = addr - smtp.sendmail(msg["From"], [msg["To"]], msg.as_string()) + smtp.send_message(msg) ``` </details> **How to reproduce your issue** - setup a new instance of CTFd - configure the competition name with non-ascii characters - configure the email server to use a "real" mail server (not Mailgun) - try to send an email (with the "forgotten password" for example) - see the mail server logs <details> <summary> You can see the difference between both methods (`MIMEText` and `EmailMessage`) with the following snippet </summary> ```python from email.mime.text import MIMEText msg = MIMEText("This is a message with accents éèçà") msg["From"] = "René Côti <[email protected]>" msg["To"] = "[email protected]" msg.as_string() from email.message import EmailMessage msg = EmailMessage() msg.set_content("This is a message with accents éèçà") msg["From"] = "René Côti <[email protected]>" msg["To"] = "[email protected]" msg.as_string() ``` </details> **Any associated stack traces or error logs** ``` # Sent to a gmail recipient opendkim[839]: 278839FE69: can't parse From: header value ' =?utf-8?q?Int=C3=A9_CTF_=3Cctf-noreply=40example=2Ecom=3E?=' postfix/smtp[8076]: 278839FE69: to=<[email protected]>, relay=gmail-smtp-in.l.google.com[74.125.206.27]:25, delay=0.75, delays=0.31/0/0.23/0.21, dsn=5.7.1, status=bounced (host gmail-smtp-in.l.google.com[74.125.206.27] said: 550-5.7.1 [185.132.74.134 14] Messages missing a valid address in From: 550 5.7.1 header, or having no From: header, are not accepted. p64si4801636wmp.124 - gsmtp (in reply to end of DATA command)) # Sent to another recipient, with another relay server postfix/smtpd[7858]: NOQUEUE: reject: RCPT from mailcube2.domain.fr[193.51.52.6]: 550 5.1.1 <[email protected]>: Recipient address rejected: User unknown in virtual mailbox table; from=<> to=<[email protected]> proto=ESMTP helo=<mailcube2.domain.fr> ``` </issue> <code> [start of CTFd/utils/email/smtp.py] 1 import smtplib 2 from email.mime.text import MIMEText 3 from socket import timeout 4 5 from CTFd.utils import get_app_config, get_config 6 7 8 def get_smtp(host, port, username=None, password=None, TLS=None, SSL=None, auth=None): 9 if SSL is None: 10 smtp = smtplib.SMTP(host, port, timeout=3) 11 else: 12 smtp = smtplib.SMTP_SSL(host, port, timeout=3) 13 14 if TLS: 15 smtp.starttls() 16 17 if auth: 18 smtp.login(username, password) 19 return smtp 20 21 22 def sendmail(addr, text, subject): 23 ctf_name = get_config("ctf_name") 24 mailfrom_addr = get_config("mailfrom_addr") or get_app_config("MAILFROM_ADDR") 25 mailfrom_addr = "{} <{}>".format(ctf_name, mailfrom_addr) 26 27 data = { 28 "host": get_config("mail_server") or get_app_config("MAIL_SERVER"), 29 "port": int(get_config("mail_port") or get_app_config("MAIL_PORT")), 30 } 31 username = get_config("mail_username") or get_app_config("MAIL_USERNAME") 32 password = get_config("mail_password") or get_app_config("MAIL_PASSWORD") 33 TLS = get_config("mail_tls") or get_app_config("MAIL_TLS") 34 SSL = get_config("mail_ssl") or get_app_config("MAIL_SSL") 35 auth = get_config("mail_useauth") or get_app_config("MAIL_USEAUTH") 36 37 if username: 38 data["username"] = username 39 if password: 40 data["password"] = password 41 if TLS: 42 data["TLS"] = TLS 43 if SSL: 44 data["SSL"] = SSL 45 if auth: 46 data["auth"] = auth 47 48 try: 49 smtp = get_smtp(**data) 50 msg = MIMEText(text) 51 msg["Subject"] = subject 52 msg["From"] = mailfrom_addr 53 msg["To"] = addr 54 55 smtp.sendmail(msg["From"], [msg["To"]], msg.as_string()) 56 smtp.quit() 57 return True, "Email sent" 58 except smtplib.SMTPException as e: 59 return False, str(e) 60 except timeout: 61 return False, "SMTP server connection timed out" 62 except Exception as e: 63 return False, str(e) 64 [end of CTFd/utils/email/smtp.py] [start of CTFd/utils/config/__init__.py] 1 import os 2 import time 3 4 from flask import current_app as app 5 6 from CTFd.utils import get_config 7 from CTFd.utils.modes import TEAMS_MODE, USERS_MODE 8 9 10 def ctf_name(): 11 name = get_config("ctf_name") 12 return name if name else "CTFd" 13 14 15 def user_mode(): 16 return get_config("user_mode") 17 18 19 def is_users_mode(): 20 return user_mode() == USERS_MODE 21 22 23 def is_teams_mode(): 24 return user_mode() == TEAMS_MODE 25 26 27 def ctf_logo(): 28 return get_config("ctf_logo") 29 30 31 def ctf_theme(): 32 theme = get_config("ctf_theme") 33 return theme if theme else "" 34 35 36 def is_setup(): 37 return bool(get_config("setup")) is True 38 39 40 def is_scoreboard_frozen(): 41 freeze = get_config("freeze") 42 43 if freeze: 44 freeze = int(freeze) 45 if freeze < time.time(): 46 return True 47 48 return False 49 50 51 def can_send_mail(): 52 return mailserver() or mailgun() 53 54 55 def get_mail_provider(): 56 if app.config.get("MAIL_SERVER") and app.config.get("MAIL_PORT"): 57 return "smtp" 58 if get_config("mail_server") and get_config("mail_port"): 59 return "smtp" 60 if app.config.get("MAILGUN_API_KEY") and app.config.get("MAILGUN_BASE_URL"): 61 return "mailgun" 62 if get_config("mailgun_api_key") and get_config("mailgun_base_url"): 63 return "mailgun" 64 65 66 def mailgun(): 67 if app.config.get("MAILGUN_API_KEY") and app.config.get("MAILGUN_BASE_URL"): 68 return True 69 if get_config("mailgun_api_key") and get_config("mailgun_base_url"): 70 return True 71 return False 72 73 74 def mailserver(): 75 if app.config.get("MAIL_SERVER") and app.config.get("MAIL_PORT"): 76 return True 77 if get_config("mail_server") and get_config("mail_port"): 78 return True 79 return False 80 81 82 def get_themes(): 83 dir = os.path.join(app.root_path, "themes") 84 return [ 85 name 86 for name in os.listdir(dir) 87 if os.path.isdir(os.path.join(dir, name)) and name != "admin" 88 ] 89 [end of CTFd/utils/config/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/CTFd/utils/config/__init__.py b/CTFd/utils/config/__init__.py --- a/CTFd/utils/config/__init__.py +++ b/CTFd/utils/config/__init__.py @@ -53,14 +53,14 @@ def get_mail_provider(): - if app.config.get("MAIL_SERVER") and app.config.get("MAIL_PORT"): - return "smtp" if get_config("mail_server") and get_config("mail_port"): return "smtp" - if app.config.get("MAILGUN_API_KEY") and app.config.get("MAILGUN_BASE_URL"): - return "mailgun" if get_config("mailgun_api_key") and get_config("mailgun_base_url"): return "mailgun" + if app.config.get("MAIL_SERVER") and app.config.get("MAIL_PORT"): + return "smtp" + if app.config.get("MAILGUN_API_KEY") and app.config.get("MAILGUN_BASE_URL"): + return "mailgun" def mailgun(): diff --git a/CTFd/utils/email/smtp.py b/CTFd/utils/email/smtp.py --- a/CTFd/utils/email/smtp.py +++ b/CTFd/utils/email/smtp.py @@ -1,5 +1,9 @@ +import six import smtplib from email.mime.text import MIMEText + +if six.PY3: + from email.message import EmailMessage from socket import timeout from CTFd.utils import get_app_config, get_config @@ -47,12 +51,22 @@ try: smtp = get_smtp(**data) - msg = MIMEText(text) + + if six.PY2: + msg = MIMEText(text) + else: + msg = EmailMessage() + msg.set_content(text) + msg["Subject"] = subject msg["From"] = mailfrom_addr msg["To"] = addr - smtp.sendmail(msg["From"], [msg["To"]], msg.as_string()) + if six.PY2: + smtp.sendmail(msg["From"], [msg["To"]], msg.as_string()) + else: + smtp.send_message(msg) + smtp.quit() return True, "Email sent" except smtplib.SMTPException as e:
{"golden_diff": "diff --git a/CTFd/utils/config/__init__.py b/CTFd/utils/config/__init__.py\n--- a/CTFd/utils/config/__init__.py\n+++ b/CTFd/utils/config/__init__.py\n@@ -53,14 +53,14 @@\n \n \n def get_mail_provider():\n- if app.config.get(\"MAIL_SERVER\") and app.config.get(\"MAIL_PORT\"):\n- return \"smtp\"\n if get_config(\"mail_server\") and get_config(\"mail_port\"):\n return \"smtp\"\n- if app.config.get(\"MAILGUN_API_KEY\") and app.config.get(\"MAILGUN_BASE_URL\"):\n- return \"mailgun\"\n if get_config(\"mailgun_api_key\") and get_config(\"mailgun_base_url\"):\n return \"mailgun\"\n+ if app.config.get(\"MAIL_SERVER\") and app.config.get(\"MAIL_PORT\"):\n+ return \"smtp\"\n+ if app.config.get(\"MAILGUN_API_KEY\") and app.config.get(\"MAILGUN_BASE_URL\"):\n+ return \"mailgun\"\n \n \n def mailgun():\ndiff --git a/CTFd/utils/email/smtp.py b/CTFd/utils/email/smtp.py\n--- a/CTFd/utils/email/smtp.py\n+++ b/CTFd/utils/email/smtp.py\n@@ -1,5 +1,9 @@\n+import six\n import smtplib\n from email.mime.text import MIMEText\n+\n+if six.PY3:\n+ from email.message import EmailMessage\n from socket import timeout\n \n from CTFd.utils import get_app_config, get_config\n@@ -47,12 +51,22 @@\n \n try:\n smtp = get_smtp(**data)\n- msg = MIMEText(text)\n+\n+ if six.PY2:\n+ msg = MIMEText(text)\n+ else:\n+ msg = EmailMessage()\n+ msg.set_content(text)\n+\n msg[\"Subject\"] = subject\n msg[\"From\"] = mailfrom_addr\n msg[\"To\"] = addr\n \n- smtp.sendmail(msg[\"From\"], [msg[\"To\"]], msg.as_string())\n+ if six.PY2:\n+ smtp.sendmail(msg[\"From\"], [msg[\"To\"]], msg.as_string())\n+ else:\n+ smtp.send_message(msg)\n+\n smtp.quit()\n return True, \"Email sent\"\n except smtplib.SMTPException as e:\n", "issue": "Email cannot be sent if CTF name has an accent\n<!--\r\nIf this is a bug report please fill out the template below.\r\n\r\nIf this is a feature request please describe the behavior that you'd like to see.\r\n-->\r\n\r\n**Environment**:\r\n\r\n - CTFd Version/Commit: 2.3.2\r\n - Operating System: Debian 10.3, Python 3.7.3\r\n - Web Browser and Version: Firefox Nightly, 76.0a1 (2020-04-06)\r\n\r\n**What happened?**\r\n\r\nAfter a fresh install of CTFd we set the competition name to \"Int\u00e9 CTF\". We configure the email server to use our Postfix relay, and test it with the \"forgotten password\" feature.\r\nThe email never arrives and is bounced by relay servers because the `From` header is invalid.\r\n\r\nThe actual issue is because the `From` header has the form \"CTF name <email address>\" (see [here](https://github.com/CTFd/CTFd/blob/master/CTFd/utils/email/smtp.py#L25)). Once encoded in the `msg.as_string()` ([there](https://github.com/CTFd/CTFd/blob/master/CTFd/utils/email/smtp.py#L55)) it is entirely encoded in UTF-8 looking like this `=?utf-8?q?Int=C3=A9_CTF_=3Cctf-noreply=40example=2Ecom=3E?=`, which is invalid against RFC 2407 section 5 (\"An 'encoded-word' MUST NOT appear in any portion of an 'addr-spec'\"). A correct form would be `=?utf-8?q?Int=C3=A9?= CTF <[email protected]>`.\r\n\r\nI have a patch ready but it uses `from email.message import EmailMessage` which is available in Python 3.6+. I'll be happy to open a PR if you confirm me that support for lower Python version can be dropped ^^\r\n\r\n<details>\r\n<summary>Patch for `CTFd/utils/email/smtp.py`</summary>\r\n\r\n```diff\r\n-from email.mime.text import MIMEText\r\n+from email.message import EmailMessage\r\n\r\n\r\n# around line 50\r\n- msg = MIMEText(text)\r\n+ msg = EmailMessage()\r\n+ msg.set_content(text)\r\n msg[\"Subject\"] = subject\r\n msg[\"From\"] = mailfrom_addr\r\n msg[\"To\"] = addr\r\n \r\n- smtp.sendmail(msg[\"From\"], [msg[\"To\"]], msg.as_string())\r\n+ smtp.send_message(msg)\r\n```\r\n\r\n</details>\r\n\r\n**How to reproduce your issue**\r\n\r\n- setup a new instance of CTFd\r\n- configure the competition name with non-ascii characters\r\n- configure the email server to use a \"real\" mail server (not Mailgun)\r\n- try to send an email (with the \"forgotten password\" for example)\r\n- see the mail server logs\r\n\r\n<details>\r\n<summary>\r\nYou can see the difference between both methods (`MIMEText` and `EmailMessage`) with the following snippet\r\n</summary>\r\n\r\n```python\r\nfrom email.mime.text import MIMEText\r\nmsg = MIMEText(\"This is a message with accents \u00e9\u00e8\u00e7\u00e0\")\r\nmsg[\"From\"] = \"Ren\u00e9 C\u00f4ti <[email protected]>\"\r\nmsg[\"To\"] = \"[email protected]\"\r\nmsg.as_string()\r\n\r\nfrom email.message import EmailMessage\r\nmsg = EmailMessage()\r\nmsg.set_content(\"This is a message with accents \u00e9\u00e8\u00e7\u00e0\")\r\nmsg[\"From\"] = \"Ren\u00e9 C\u00f4ti <[email protected]>\"\r\nmsg[\"To\"] = \"[email protected]\"\r\nmsg.as_string()\r\n```\r\n\r\n</details>\r\n\r\n**Any associated stack traces or error logs**\r\n```\r\n# Sent to a gmail recipient\r\nopendkim[839]: 278839FE69: can't parse From: header value ' =?utf-8?q?Int=C3=A9_CTF_=3Cctf-noreply=40example=2Ecom=3E?='\r\npostfix/smtp[8076]: 278839FE69: to=<[email protected]>, relay=gmail-smtp-in.l.google.com[74.125.206.27]:25, delay=0.75, delays=0.31/0/0.23/0.21, dsn=5.7.1, status=bounced (host gmail-smtp-in.l.google.com[74.125.206.27] said: 550-5.7.1 [185.132.74.134 14] Messages missing a valid address in From: 550 5.7.1 header, or having no From: header, are not accepted. p64si4801636wmp.124 - gsmtp (in reply to end of DATA command))\r\n\r\n# Sent to another recipient, with another relay server\r\npostfix/smtpd[7858]: NOQUEUE: reject: RCPT from mailcube2.domain.fr[193.51.52.6]: 550 5.1.1 <[email protected]>: Recipient address rejected: User unknown\r\n in virtual mailbox table; from=<> to=<[email protected]> proto=ESMTP helo=<mailcube2.domain.fr>\r\n```\n", "before_files": [{"content": "import smtplib\nfrom email.mime.text import MIMEText\nfrom socket import timeout\n\nfrom CTFd.utils import get_app_config, get_config\n\n\ndef get_smtp(host, port, username=None, password=None, TLS=None, SSL=None, auth=None):\n if SSL is None:\n smtp = smtplib.SMTP(host, port, timeout=3)\n else:\n smtp = smtplib.SMTP_SSL(host, port, timeout=3)\n\n if TLS:\n smtp.starttls()\n\n if auth:\n smtp.login(username, password)\n return smtp\n\n\ndef sendmail(addr, text, subject):\n ctf_name = get_config(\"ctf_name\")\n mailfrom_addr = get_config(\"mailfrom_addr\") or get_app_config(\"MAILFROM_ADDR\")\n mailfrom_addr = \"{} <{}>\".format(ctf_name, mailfrom_addr)\n\n data = {\n \"host\": get_config(\"mail_server\") or get_app_config(\"MAIL_SERVER\"),\n \"port\": int(get_config(\"mail_port\") or get_app_config(\"MAIL_PORT\")),\n }\n username = get_config(\"mail_username\") or get_app_config(\"MAIL_USERNAME\")\n password = get_config(\"mail_password\") or get_app_config(\"MAIL_PASSWORD\")\n TLS = get_config(\"mail_tls\") or get_app_config(\"MAIL_TLS\")\n SSL = get_config(\"mail_ssl\") or get_app_config(\"MAIL_SSL\")\n auth = get_config(\"mail_useauth\") or get_app_config(\"MAIL_USEAUTH\")\n\n if username:\n data[\"username\"] = username\n if password:\n data[\"password\"] = password\n if TLS:\n data[\"TLS\"] = TLS\n if SSL:\n data[\"SSL\"] = SSL\n if auth:\n data[\"auth\"] = auth\n\n try:\n smtp = get_smtp(**data)\n msg = MIMEText(text)\n msg[\"Subject\"] = subject\n msg[\"From\"] = mailfrom_addr\n msg[\"To\"] = addr\n\n smtp.sendmail(msg[\"From\"], [msg[\"To\"]], msg.as_string())\n smtp.quit()\n return True, \"Email sent\"\n except smtplib.SMTPException as e:\n return False, str(e)\n except timeout:\n return False, \"SMTP server connection timed out\"\n except Exception as e:\n return False, str(e)\n", "path": "CTFd/utils/email/smtp.py"}, {"content": "import os\nimport time\n\nfrom flask import current_app as app\n\nfrom CTFd.utils import get_config\nfrom CTFd.utils.modes import TEAMS_MODE, USERS_MODE\n\n\ndef ctf_name():\n name = get_config(\"ctf_name\")\n return name if name else \"CTFd\"\n\n\ndef user_mode():\n return get_config(\"user_mode\")\n\n\ndef is_users_mode():\n return user_mode() == USERS_MODE\n\n\ndef is_teams_mode():\n return user_mode() == TEAMS_MODE\n\n\ndef ctf_logo():\n return get_config(\"ctf_logo\")\n\n\ndef ctf_theme():\n theme = get_config(\"ctf_theme\")\n return theme if theme else \"\"\n\n\ndef is_setup():\n return bool(get_config(\"setup\")) is True\n\n\ndef is_scoreboard_frozen():\n freeze = get_config(\"freeze\")\n\n if freeze:\n freeze = int(freeze)\n if freeze < time.time():\n return True\n\n return False\n\n\ndef can_send_mail():\n return mailserver() or mailgun()\n\n\ndef get_mail_provider():\n if app.config.get(\"MAIL_SERVER\") and app.config.get(\"MAIL_PORT\"):\n return \"smtp\"\n if get_config(\"mail_server\") and get_config(\"mail_port\"):\n return \"smtp\"\n if app.config.get(\"MAILGUN_API_KEY\") and app.config.get(\"MAILGUN_BASE_URL\"):\n return \"mailgun\"\n if get_config(\"mailgun_api_key\") and get_config(\"mailgun_base_url\"):\n return \"mailgun\"\n\n\ndef mailgun():\n if app.config.get(\"MAILGUN_API_KEY\") and app.config.get(\"MAILGUN_BASE_URL\"):\n return True\n if get_config(\"mailgun_api_key\") and get_config(\"mailgun_base_url\"):\n return True\n return False\n\n\ndef mailserver():\n if app.config.get(\"MAIL_SERVER\") and app.config.get(\"MAIL_PORT\"):\n return True\n if get_config(\"mail_server\") and get_config(\"mail_port\"):\n return True\n return False\n\n\ndef get_themes():\n dir = os.path.join(app.root_path, \"themes\")\n return [\n name\n for name in os.listdir(dir)\n if os.path.isdir(os.path.join(dir, name)) and name != \"admin\"\n ]\n", "path": "CTFd/utils/config/__init__.py"}]}
3,029
510
gh_patches_debug_29165
rasdani/github-patches
git_diff
spack__spack-7545
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> gcc v5.4.0 build fails due to mpfr patching problem There seems to be a patch application issue in the mpfr-3.1.5 build procedure I was expecting something like my previous build: ``` ==> Installing mpfr ==> Fetching file://MIRROR_DIR/mirror/mpfr/mpfr-3.1.5.tar.bz2 ==> Staging archive: WORKING_DIR/var/spack/stage/mpfr-3.1.5-rmi7bmi3oaqduvjown2v46snr6ps2zr5/mpfr-3.1.5.tar.bz2 ==> Created stage in WORKING_DIR/var/spack/stage/mpfr-3.1.5-rmi7bmi3oaqduvjown2v46snr6ps2zr5 ==> Applied patch vasprintf.patch ==> Applied patch strtofr.patch ==> Building mpfr [AutotoolsPackage] ==> Executing phase: 'autoreconf' ==> Executing phase: 'configure' ==> Executing phase: 'build' ==> Executing phase: 'install' ==> Successfully installed mpfr Fetch: 0.04s. Build: 9.54s. Total: 9.58s. [+] WORKING_DIR/opt/spack/linux-centos7-x86_64/gcc-4.8.5/mpfr-3.1.5-rmi7bmi3oaqduvjown2v46snr6ps2zr5 ``` When I tried to build the gcc compiler yesterday (and again this morning) the results were strange: ``` ==> Installing mpfr 1 out of 1 hunk FAILED -- saving rejects to file VERSION.rej 1 out of 1 hunk FAILED -- saving rejects to file src/mpfr.h.rej 1 out of 1 hunk FAILED -- saving rejects to file src/version.c.rej ==> Fetching file://MIRROR_DIR/mirror/mpfr/mpfr-3.1.5.tar.bz2 ==> Staging archive: WORKING_DIR/sat/spack/var/spack/stage/mpfr-3.1.5-rmi7bmi3oaqduvjown2v46snr6ps2zr5/mpfr-3.1.5.tar.bz2 ==> Created stage in WORKING_DIR/sat/spack/var/spack/stage/mpfr-3.1.5-rmi7bmi3oaqduvjown2v46snr6ps2zr5 ==> Patch strtofr.patch failed. ==> Error: ProcessError: Command exited with status 1: '/usr/bin/patch' '-s' '-p' '1' '-i' 'WORKING_DIR/sat/spack/var/spack/repos/builtin/packages/mpfr/strtofr.patch' '-d' '.' ==> Error: [Errno 2] No such file or directory: 'WORKING_DIR/sat/spack/var/spack/stage/mpfr-3.1.5-rmi7bmi3oaqduvjown2v46snr6ps2zr5/mpfr-3.1.5/spack-build.out' ``` Not only the error, but the order of the messages seem strange. A clean clone of the spack repo made no difference ```console $ spack install [email protected] ``` Default environment: ```linux-centos7-x86_64/gcc-4.8.5``` </issue> <code> [start of var/spack/repos/builtin/packages/mpfr/package.py] 1 ############################################################################## 2 # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. 3 # Produced at the Lawrence Livermore National Laboratory. 4 # 5 # This file is part of Spack. 6 # Created by Todd Gamblin, [email protected], All rights reserved. 7 # LLNL-CODE-647188 8 # 9 # For details, see https://github.com/spack/spack 10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL. 11 # 12 # This program is free software; you can redistribute it and/or modify 13 # it under the terms of the GNU Lesser General Public License (as 14 # published by the Free Software Foundation) version 2.1, February 1999. 15 # 16 # This program is distributed in the hope that it will be useful, but 17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF 18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and 19 # conditions of the GNU Lesser General Public License for more details. 20 # 21 # You should have received a copy of the GNU Lesser General Public 22 # License along with this program; if not, write to the Free Software 23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 ############################################################################## 25 from spack import * 26 27 28 class Mpfr(AutotoolsPackage): 29 """The MPFR library is a C library for multiple-precision 30 floating-point computations with correct rounding.""" 31 32 homepage = "http://www.mpfr.org" 33 url = "https://ftp.gnu.org/gnu/mpfr/mpfr-3.1.5.tar.bz2" 34 35 version('3.1.5', 'b1d23a55588e3b2a13e3be66bc69fd8d') 36 version('3.1.4', 'b8a2f6b0e68bef46e53da2ac439e1cf4') 37 version('3.1.3', '5fdfa3cfa5c86514ee4a241a1affa138') 38 version('3.1.2', 'ee2c3ac63bf0c2359bf08fc3ee094c19') 39 40 # mpir is a drop-in replacement for gmp 41 depends_on('[email protected]:') # 4.2.3 or higher is recommended 42 43 patch('vasprintf.patch', when='@3.1.5') 44 patch('strtofr.patch', when='@3.1.5') 45 46 def configure_args(self): 47 args = [ 48 '--with-gmp=' + self.spec['gmp'].prefix, 49 ] 50 return args 51 [end of var/spack/repos/builtin/packages/mpfr/package.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/var/spack/repos/builtin/packages/mpfr/package.py b/var/spack/repos/builtin/packages/mpfr/package.py --- a/var/spack/repos/builtin/packages/mpfr/package.py +++ b/var/spack/repos/builtin/packages/mpfr/package.py @@ -30,18 +30,33 @@ floating-point computations with correct rounding.""" homepage = "http://www.mpfr.org" - url = "https://ftp.gnu.org/gnu/mpfr/mpfr-3.1.5.tar.bz2" + url = "https://ftp.gnu.org/gnu/mpfr/mpfr-4.0.1.tar.bz2" + version('4.0.1', '8c21d8ac7460493b2b9f3ef3cc610454') + version('4.0.0', 'ef619f3bb68039e35c4a219e06be72d0') + version('3.1.6', '320c28198def956aeacdb240b46b8969') version('3.1.5', 'b1d23a55588e3b2a13e3be66bc69fd8d') version('3.1.4', 'b8a2f6b0e68bef46e53da2ac439e1cf4') version('3.1.3', '5fdfa3cfa5c86514ee4a241a1affa138') version('3.1.2', 'ee2c3ac63bf0c2359bf08fc3ee094c19') # mpir is a drop-in replacement for gmp - depends_on('[email protected]:') # 4.2.3 or higher is recommended + depends_on('[email protected]:') # 4.2.3 or higher is recommended + depends_on('[email protected]:', when='@4.0.0:') # http://www.mpfr.org/mpfr-4.0.0/ - patch('vasprintf.patch', when='@3.1.5') - patch('strtofr.patch', when='@3.1.5') + # Check the Bugs section of old release pages for patches. + # http://www.mpfr.org/mpfr-X.Y.Z/#bugs + patches = { + '3.1.6': '66a5d58364113a21405fc53f4a48f4e8', + '3.1.5': '1dc5fe65feb5607b89fe0f410d53b627', + '3.1.4': 'd124381573404fe83654c7d5a79aeabf', + '3.1.3': 'ebd1d835e0ae2fd8a9339210ccd1d0a8', + '3.1.2': '9f96a5c7cac1d6cd983ed9cf7d997074', + } + + for ver, checksum in patches.items(): + patch('http://www.mpfr.org/mpfr-{0}/allpatches'.format(ver), + when='@' + ver, sha256=checksum) def configure_args(self): args = [
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/mpfr/package.py b/var/spack/repos/builtin/packages/mpfr/package.py\n--- a/var/spack/repos/builtin/packages/mpfr/package.py\n+++ b/var/spack/repos/builtin/packages/mpfr/package.py\n@@ -30,18 +30,33 @@\n floating-point computations with correct rounding.\"\"\"\n \n homepage = \"http://www.mpfr.org\"\n- url = \"https://ftp.gnu.org/gnu/mpfr/mpfr-3.1.5.tar.bz2\"\n+ url = \"https://ftp.gnu.org/gnu/mpfr/mpfr-4.0.1.tar.bz2\"\n \n+ version('4.0.1', '8c21d8ac7460493b2b9f3ef3cc610454')\n+ version('4.0.0', 'ef619f3bb68039e35c4a219e06be72d0')\n+ version('3.1.6', '320c28198def956aeacdb240b46b8969')\n version('3.1.5', 'b1d23a55588e3b2a13e3be66bc69fd8d')\n version('3.1.4', 'b8a2f6b0e68bef46e53da2ac439e1cf4')\n version('3.1.3', '5fdfa3cfa5c86514ee4a241a1affa138')\n version('3.1.2', 'ee2c3ac63bf0c2359bf08fc3ee094c19')\n \n # mpir is a drop-in replacement for gmp\n- depends_on('[email protected]:') # 4.2.3 or higher is recommended\n+ depends_on('[email protected]:') # 4.2.3 or higher is recommended\n+ depends_on('[email protected]:', when='@4.0.0:') # http://www.mpfr.org/mpfr-4.0.0/\n \n- patch('vasprintf.patch', when='@3.1.5')\n- patch('strtofr.patch', when='@3.1.5')\n+ # Check the Bugs section of old release pages for patches.\n+ # http://www.mpfr.org/mpfr-X.Y.Z/#bugs\n+ patches = {\n+ '3.1.6': '66a5d58364113a21405fc53f4a48f4e8',\n+ '3.1.5': '1dc5fe65feb5607b89fe0f410d53b627',\n+ '3.1.4': 'd124381573404fe83654c7d5a79aeabf',\n+ '3.1.3': 'ebd1d835e0ae2fd8a9339210ccd1d0a8',\n+ '3.1.2': '9f96a5c7cac1d6cd983ed9cf7d997074',\n+ }\n+\n+ for ver, checksum in patches.items():\n+ patch('http://www.mpfr.org/mpfr-{0}/allpatches'.format(ver),\n+ when='@' + ver, sha256=checksum)\n \n def configure_args(self):\n args = [\n", "issue": "gcc v5.4.0 build fails due to mpfr patching problem\nThere seems to be a patch application issue in the mpfr-3.1.5 build procedure\r\n\r\nI was expecting something like my previous build:\r\n```\r\n==> Installing mpfr\r\n==> Fetching file://MIRROR_DIR/mirror/mpfr/mpfr-3.1.5.tar.bz2\r\n==> Staging archive: WORKING_DIR/var/spack/stage/mpfr-3.1.5-rmi7bmi3oaqduvjown2v46snr6ps2zr5/mpfr-3.1.5.tar.bz2\r\n==> Created stage in WORKING_DIR/var/spack/stage/mpfr-3.1.5-rmi7bmi3oaqduvjown2v46snr6ps2zr5\r\n==> Applied patch vasprintf.patch\r\n==> Applied patch strtofr.patch\r\n==> Building mpfr [AutotoolsPackage]\r\n==> Executing phase: 'autoreconf'\r\n==> Executing phase: 'configure'\r\n==> Executing phase: 'build'\r\n==> Executing phase: 'install'\r\n==> Successfully installed mpfr\r\n Fetch: 0.04s. Build: 9.54s. Total: 9.58s.\r\n[+] WORKING_DIR/opt/spack/linux-centos7-x86_64/gcc-4.8.5/mpfr-3.1.5-rmi7bmi3oaqduvjown2v46snr6ps2zr5\r\n```\r\nWhen I tried to build the gcc compiler yesterday (and again this morning) the results were strange:\r\n```\r\n==> Installing mpfr\r\n1 out of 1 hunk FAILED -- saving rejects to file VERSION.rej\r\n1 out of 1 hunk FAILED -- saving rejects to file src/mpfr.h.rej\r\n1 out of 1 hunk FAILED -- saving rejects to file src/version.c.rej\r\n==> Fetching file://MIRROR_DIR/mirror/mpfr/mpfr-3.1.5.tar.bz2\r\n==> Staging archive: WORKING_DIR/sat/spack/var/spack/stage/mpfr-3.1.5-rmi7bmi3oaqduvjown2v46snr6ps2zr5/mpfr-3.1.5.tar.bz2\r\n==> Created stage in WORKING_DIR/sat/spack/var/spack/stage/mpfr-3.1.5-rmi7bmi3oaqduvjown2v46snr6ps2zr5\r\n==> Patch strtofr.patch failed.\r\n==> Error: ProcessError: Command exited with status 1:\r\n '/usr/bin/patch' '-s' '-p' '1' '-i' 'WORKING_DIR/sat/spack/var/spack/repos/builtin/packages/mpfr/strtofr.patch' '-d' '.'\r\n==> Error: [Errno 2] No such file or directory: 'WORKING_DIR/sat/spack/var/spack/stage/mpfr-3.1.5-rmi7bmi3oaqduvjown2v46snr6ps2zr5/mpfr-3.1.5/spack-build.out'\r\n```\r\nNot only the error, but the order of the messages seem strange.\r\n\r\nA clean clone of the spack repo made no difference\r\n```console\r\n$ spack install [email protected]\r\n```\r\n\r\nDefault environment:\r\n```linux-centos7-x86_64/gcc-4.8.5```\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/spack/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Mpfr(AutotoolsPackage):\n \"\"\"The MPFR library is a C library for multiple-precision\n floating-point computations with correct rounding.\"\"\"\n\n homepage = \"http://www.mpfr.org\"\n url = \"https://ftp.gnu.org/gnu/mpfr/mpfr-3.1.5.tar.bz2\"\n\n version('3.1.5', 'b1d23a55588e3b2a13e3be66bc69fd8d')\n version('3.1.4', 'b8a2f6b0e68bef46e53da2ac439e1cf4')\n version('3.1.3', '5fdfa3cfa5c86514ee4a241a1affa138')\n version('3.1.2', 'ee2c3ac63bf0c2359bf08fc3ee094c19')\n\n # mpir is a drop-in replacement for gmp\n depends_on('[email protected]:') # 4.2.3 or higher is recommended\n\n patch('vasprintf.patch', when='@3.1.5')\n patch('strtofr.patch', when='@3.1.5')\n\n def configure_args(self):\n args = [\n '--with-gmp=' + self.spec['gmp'].prefix,\n ]\n return args\n", "path": "var/spack/repos/builtin/packages/mpfr/package.py"}]}
2,011
853
gh_patches_debug_4654
rasdani/github-patches
git_diff
NVIDIA__NeMo-2546
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> vad_infer.py script fails with `IndexError` if option `--dont_auto_split` is not set **Describe the bug** `NeMo/examples/asr/vad_infer.py` script fails with `IndexError` if option `--dont_auto_split` is not set and at least 1 `.wav` file is long enough so that `nemo.collections.asr.parts.utils.vad_utils.prepare_manifest()` function split the `.wav` file. **Steps/Code to reproduce bug** ``` mkdir -p ~/debug_data wget http://i13pc106.ira.uka.de/~jniehues/IWSLT-SLT/data/eval/en-de/IWSLT-SLT.tst2019.en-de.tgz -O ~/debug_data/IWSLT-SLT.tst2019.en-de.tgz tar xzf ~/debug_data/IWSLT-SLT.tst2019.en-de.tgz -C ~/debug_data/ cd ~/NeMo/examples/asr/ wget https://raw.githubusercontent.com/NVIDIA/NeMo/feat/asr/iwslt_audio_to_nemo_format/scripts/dataset_processing/prepare_iwslt_audio_data.py python prepare_iwslt_audio_data.py -a ~/debug_data/IWSLT.tst2019/wavs/ -t ~/debug_data/IWSLT.tst2019/IWSLT.TED.tst2019.en-de.en.xml -o ~/debug_data/IWSLT.tst2019/manifest.json python vad_infer.py --dataset ~/debug_data/IWSLT.tst2019/manifest.json --out_dir ~/debug_data/IWSLT.tst2019/vad --vad_model vad_marblenet ``` **Expected behavior** No errors **Environment overview (please complete the following information)** - Environment location: Bare-metal - Method of NeMo install: pip install nemo_toolkit[all] **Environment details** If NVIDIA docker image is used you don't need to specify these. Otherwise, please provide: - OS version: Ubuntu 20.04.2 LTS - PyTorch version: 1.8.1 - Python version: 3.8.10 </issue> <code> [start of examples/asr/vad_infer.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 During inference, we perform frame-level prediction by two approaches: 17 1) shift the window of length time_length (e.g. 0.63s) by shift_length (e.g. 10ms) to generate the frame and use the prediction of the window to represent the label for the frame; 18 [this script demonstrate how to do this approach] 19 2) generate predictions with overlapping input segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple segments. 20 [get frame level prediction by this script and use vad_overlap_posterior.py in NeMo/scripts/voice_activity_detection 21 One can also find posterior about converting frame level prediction 22 to speech/no-speech segment in start and end times format in that script.] 23 24 Image https://raw.githubusercontent.com/NVIDIA/NeMo/main/tutorials/asr/images/vad_post_overlap_diagram.png 25 will help you understand this method. 26 27 Usage: 28 python vad_infer.py --vad_model="vad_marblenet" --dataset=<FULL PATH OF MANIFEST TO BE PERFORMED INFERENCE ON> --out_dir='frame/demo' --time_length=0.63 29 30 """ 31 32 33 import json 34 import logging 35 import os 36 from argparse import ArgumentParser 37 38 import torch 39 40 from nemo.collections.asr.models import EncDecClassificationModel 41 from nemo.collections.asr.parts.utils.vad_utils import get_vad_stream_status, prepare_manifest 42 from nemo.utils import logging 43 44 try: 45 from torch.cuda.amp import autocast 46 except ImportError: 47 from contextlib import contextmanager 48 49 @contextmanager 50 def autocast(enabled=None): 51 yield 52 53 54 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 55 56 57 def main(): 58 parser = ArgumentParser() 59 parser.add_argument( 60 "--vad_model", type=str, default="MatchboxNet-VAD-3x2", required=False, help="Pass: 'MatchboxNet-VAD-3x2'" 61 ) 62 parser.add_argument( 63 "--dataset", 64 type=str, 65 required=True, 66 help="Path of json file of evaluation data. Audio files should have unique names.", 67 ) 68 parser.add_argument("--out_dir", type=str, default="vad_frame", help="Dir of your vad outputs") 69 parser.add_argument("--time_length", type=float, default=0.63) 70 parser.add_argument("--shift_length", type=float, default=0.01) 71 parser.add_argument("--normalize_audio", type=bool, default=False) 72 parser.add_argument("--num_workers", type=float, default=20) 73 parser.add_argument("--split_duration", type=float, default=400) 74 parser.add_argument( 75 "--dont_auto_split", 76 default=False, 77 action='store_true', 78 help="Whether to automatically split manifest entry by split_duration to avoid potential CUDA out of memory issue.", 79 ) 80 81 args = parser.parse_args() 82 83 torch.set_grad_enabled(False) 84 85 if args.vad_model.endswith('.nemo'): 86 logging.info(f"Using local VAD model from {args.vad_model}") 87 vad_model = EncDecClassificationModel.restore_from(restore_path=args.vad_model) 88 else: 89 logging.info(f"Using NGC cloud VAD model {args.vad_model}") 90 vad_model = EncDecClassificationModel.from_pretrained(model_name=args.vad_model) 91 92 if not os.path.exists(args.out_dir): 93 os.mkdir(args.out_dir) 94 95 # Prepare manifest for streaming VAD 96 manifest_vad_input = args.dataset 97 if not args.dont_auto_split: 98 logging.info("Split long audio file to avoid CUDA memory issue") 99 logging.debug("Try smaller split_duration if you still have CUDA memory issue") 100 config = { 101 'manifest_filepath': manifest_vad_input, 102 'time_length': args.time_length, 103 'split_duration': args.split_duration, 104 'num_workers': args.num_workers, 105 } 106 manifest_vad_input = prepare_manifest(config) 107 else: 108 logging.warning( 109 "If you encounter CUDA memory issue, try splitting manifest entry by split_duration to avoid it." 110 ) 111 112 # setup_test_data 113 vad_model.setup_test_data( 114 test_data_config={ 115 'vad_stream': True, 116 'sample_rate': 16000, 117 'manifest_filepath': manifest_vad_input, 118 'labels': ['infer',], 119 'num_workers': args.num_workers, 120 'shuffle': False, 121 'time_length': args.time_length, 122 'shift_length': args.shift_length, 123 'trim_silence': False, 124 'normalize_audio': args.normalize_audio, 125 } 126 ) 127 128 vad_model = vad_model.to(device) 129 vad_model.eval() 130 131 time_unit = int(args.time_length / args.shift_length) 132 trunc = int(time_unit / 2) 133 trunc_l = time_unit - trunc 134 all_len = 0 135 136 data = [] 137 for line in open(args.dataset, 'r'): 138 file = json.loads(line)['audio_filepath'].split("/")[-1] 139 data.append(file.split(".wav")[0]) 140 logging.info(f"Inference on {len(data)} audio files/json lines!") 141 142 status = get_vad_stream_status(data) 143 for i, test_batch in enumerate(vad_model.test_dataloader()): 144 test_batch = [x.to(device) for x in test_batch] 145 with autocast(): 146 log_probs = vad_model(input_signal=test_batch[0], input_signal_length=test_batch[1]) 147 probs = torch.softmax(log_probs, dim=-1) 148 pred = probs[:, 1] 149 150 if status[i] == 'start': 151 to_save = pred[:-trunc] 152 elif status[i] == 'next': 153 to_save = pred[trunc:-trunc_l] 154 elif status[i] == 'end': 155 to_save = pred[trunc_l:] 156 else: 157 to_save = pred 158 159 all_len += len(to_save) 160 outpath = os.path.join(args.out_dir, data[i] + ".frame") 161 with open(outpath, "a") as fout: 162 for f in range(len(to_save)): 163 fout.write('{0:0.4f}\n'.format(to_save[f])) 164 del test_batch 165 if status[i] == 'end' or status[i] == 'single': 166 logging.debug(f"Overall length of prediction of {data[i]} is {all_len}!") 167 all_len = 0 168 169 170 if __name__ == '__main__': 171 main() 172 [end of examples/asr/vad_infer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/asr/vad_infer.py b/examples/asr/vad_infer.py --- a/examples/asr/vad_infer.py +++ b/examples/asr/vad_infer.py @@ -134,7 +134,7 @@ all_len = 0 data = [] - for line in open(args.dataset, 'r'): + for line in open(manifest_vad_input, 'r'): file = json.loads(line)['audio_filepath'].split("/")[-1] data.append(file.split(".wav")[0]) logging.info(f"Inference on {len(data)} audio files/json lines!")
{"golden_diff": "diff --git a/examples/asr/vad_infer.py b/examples/asr/vad_infer.py\n--- a/examples/asr/vad_infer.py\n+++ b/examples/asr/vad_infer.py\n@@ -134,7 +134,7 @@\n all_len = 0\n \n data = []\n- for line in open(args.dataset, 'r'):\n+ for line in open(manifest_vad_input, 'r'):\n file = json.loads(line)['audio_filepath'].split(\"/\")[-1]\n data.append(file.split(\".wav\")[0])\n logging.info(f\"Inference on {len(data)} audio files/json lines!\")\n", "issue": "vad_infer.py script fails with `IndexError` if option `--dont_auto_split` is not set\n**Describe the bug**\r\n\r\n`NeMo/examples/asr/vad_infer.py` script fails with `IndexError` if option `--dont_auto_split` is not set and at least 1 `.wav` file is long enough so that `nemo.collections.asr.parts.utils.vad_utils.prepare_manifest()` function split the `.wav` file.\r\n\r\n**Steps/Code to reproduce bug**\r\n\r\n```\r\nmkdir -p ~/debug_data\r\nwget http://i13pc106.ira.uka.de/~jniehues/IWSLT-SLT/data/eval/en-de/IWSLT-SLT.tst2019.en-de.tgz -O ~/debug_data/IWSLT-SLT.tst2019.en-de.tgz\r\ntar xzf ~/debug_data/IWSLT-SLT.tst2019.en-de.tgz -C ~/debug_data/\r\ncd ~/NeMo/examples/asr/\r\nwget https://raw.githubusercontent.com/NVIDIA/NeMo/feat/asr/iwslt_audio_to_nemo_format/scripts/dataset_processing/prepare_iwslt_audio_data.py\r\npython prepare_iwslt_audio_data.py -a ~/debug_data/IWSLT.tst2019/wavs/ -t ~/debug_data/IWSLT.tst2019/IWSLT.TED.tst2019.en-de.en.xml -o ~/debug_data/IWSLT.tst2019/manifest.json\r\npython vad_infer.py --dataset ~/debug_data/IWSLT.tst2019/manifest.json --out_dir ~/debug_data/IWSLT.tst2019/vad --vad_model vad_marblenet\r\n```\r\n\r\n\r\n**Expected behavior**\r\n\r\nNo errors\r\n\r\n**Environment overview (please complete the following information)**\r\n\r\n - Environment location: Bare-metal\r\n - Method of NeMo install: pip install nemo_toolkit[all]\r\n\r\n**Environment details**\r\n\r\nIf NVIDIA docker image is used you don't need to specify these.\r\nOtherwise, please provide:\r\n- OS version: Ubuntu 20.04.2 LTS\r\n- PyTorch version: 1.8.1\r\n- Python version: 3.8.10\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nDuring inference, we perform frame-level prediction by two approaches: \n 1) shift the window of length time_length (e.g. 0.63s) by shift_length (e.g. 10ms) to generate the frame and use the prediction of the window to represent the label for the frame;\n [this script demonstrate how to do this approach]\n 2) generate predictions with overlapping input segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple segments. \n [get frame level prediction by this script and use vad_overlap_posterior.py in NeMo/scripts/voice_activity_detection\n One can also find posterior about converting frame level prediction \n to speech/no-speech segment in start and end times format in that script.]\n \n Image https://raw.githubusercontent.com/NVIDIA/NeMo/main/tutorials/asr/images/vad_post_overlap_diagram.png \n will help you understand this method.\n \nUsage:\npython vad_infer.py --vad_model=\"vad_marblenet\" --dataset=<FULL PATH OF MANIFEST TO BE PERFORMED INFERENCE ON> --out_dir='frame/demo' --time_length=0.63\n\n\"\"\"\n\n\nimport json\nimport logging\nimport os\nfrom argparse import ArgumentParser\n\nimport torch\n\nfrom nemo.collections.asr.models import EncDecClassificationModel\nfrom nemo.collections.asr.parts.utils.vad_utils import get_vad_stream_status, prepare_manifest\nfrom nemo.utils import logging\n\ntry:\n from torch.cuda.amp import autocast\nexcept ImportError:\n from contextlib import contextmanager\n\n @contextmanager\n def autocast(enabled=None):\n yield\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\n \"--vad_model\", type=str, default=\"MatchboxNet-VAD-3x2\", required=False, help=\"Pass: 'MatchboxNet-VAD-3x2'\"\n )\n parser.add_argument(\n \"--dataset\",\n type=str,\n required=True,\n help=\"Path of json file of evaluation data. Audio files should have unique names.\",\n )\n parser.add_argument(\"--out_dir\", type=str, default=\"vad_frame\", help=\"Dir of your vad outputs\")\n parser.add_argument(\"--time_length\", type=float, default=0.63)\n parser.add_argument(\"--shift_length\", type=float, default=0.01)\n parser.add_argument(\"--normalize_audio\", type=bool, default=False)\n parser.add_argument(\"--num_workers\", type=float, default=20)\n parser.add_argument(\"--split_duration\", type=float, default=400)\n parser.add_argument(\n \"--dont_auto_split\",\n default=False,\n action='store_true',\n help=\"Whether to automatically split manifest entry by split_duration to avoid potential CUDA out of memory issue.\",\n )\n\n args = parser.parse_args()\n\n torch.set_grad_enabled(False)\n\n if args.vad_model.endswith('.nemo'):\n logging.info(f\"Using local VAD model from {args.vad_model}\")\n vad_model = EncDecClassificationModel.restore_from(restore_path=args.vad_model)\n else:\n logging.info(f\"Using NGC cloud VAD model {args.vad_model}\")\n vad_model = EncDecClassificationModel.from_pretrained(model_name=args.vad_model)\n\n if not os.path.exists(args.out_dir):\n os.mkdir(args.out_dir)\n\n # Prepare manifest for streaming VAD\n manifest_vad_input = args.dataset\n if not args.dont_auto_split:\n logging.info(\"Split long audio file to avoid CUDA memory issue\")\n logging.debug(\"Try smaller split_duration if you still have CUDA memory issue\")\n config = {\n 'manifest_filepath': manifest_vad_input,\n 'time_length': args.time_length,\n 'split_duration': args.split_duration,\n 'num_workers': args.num_workers,\n }\n manifest_vad_input = prepare_manifest(config)\n else:\n logging.warning(\n \"If you encounter CUDA memory issue, try splitting manifest entry by split_duration to avoid it.\"\n )\n\n # setup_test_data\n vad_model.setup_test_data(\n test_data_config={\n 'vad_stream': True,\n 'sample_rate': 16000,\n 'manifest_filepath': manifest_vad_input,\n 'labels': ['infer',],\n 'num_workers': args.num_workers,\n 'shuffle': False,\n 'time_length': args.time_length,\n 'shift_length': args.shift_length,\n 'trim_silence': False,\n 'normalize_audio': args.normalize_audio,\n }\n )\n\n vad_model = vad_model.to(device)\n vad_model.eval()\n\n time_unit = int(args.time_length / args.shift_length)\n trunc = int(time_unit / 2)\n trunc_l = time_unit - trunc\n all_len = 0\n\n data = []\n for line in open(args.dataset, 'r'):\n file = json.loads(line)['audio_filepath'].split(\"/\")[-1]\n data.append(file.split(\".wav\")[0])\n logging.info(f\"Inference on {len(data)} audio files/json lines!\")\n\n status = get_vad_stream_status(data)\n for i, test_batch in enumerate(vad_model.test_dataloader()):\n test_batch = [x.to(device) for x in test_batch]\n with autocast():\n log_probs = vad_model(input_signal=test_batch[0], input_signal_length=test_batch[1])\n probs = torch.softmax(log_probs, dim=-1)\n pred = probs[:, 1]\n\n if status[i] == 'start':\n to_save = pred[:-trunc]\n elif status[i] == 'next':\n to_save = pred[trunc:-trunc_l]\n elif status[i] == 'end':\n to_save = pred[trunc_l:]\n else:\n to_save = pred\n\n all_len += len(to_save)\n outpath = os.path.join(args.out_dir, data[i] + \".frame\")\n with open(outpath, \"a\") as fout:\n for f in range(len(to_save)):\n fout.write('{0:0.4f}\\n'.format(to_save[f]))\n del test_batch\n if status[i] == 'end' or status[i] == 'single':\n logging.debug(f\"Overall length of prediction of {data[i]} is {all_len}!\")\n all_len = 0\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/asr/vad_infer.py"}]}
2,962
137
gh_patches_debug_38538
rasdani/github-patches
git_diff
sunpy__sunpy-7451
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> No map source to handle GONG H-alpha data ### Describe the bug Originally raised by nawinnga in the sunpy chat room. There is no map source to do the necessary translation from the GONG H-alpha headers to standard meta data Map can use related to #6653 and #6655. It doesn't look like its trivial to find/extract/calculate the necessary WCS information (e.g. PCi_j + CDELT or CDi_j ...) without some reference to what the fits headers mean some info on [this NSO page](https://nso.edu/data/nisp-data/h-alpha/) ### To Reproduce ``` from astropy.io import fits import sunpy # to get around issue #6655 hdul = fits.open('ftp://gong2.nso.edu/HA/haf/202203/20220318/20220318000050Bh.fits.fz') sunpy.map.Map((hdul[1].data, hdul[1].header)) Map((hdul[1].data, hdul[1].header)) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/shane/.virtualenvs/temp/lib/python3.9/site-packages/sunpy/map/map_factory.py", line 331, in __call__ new_map = self._check_registered_widgets(data, meta, **kwargs) File "/Users/shane/.virtualenvs/temp/lib/python3.9/site-packages/sunpy/map/map_factory.py", line 381, in _check_registered_widgets return WidgetType(data, meta, **kwargs) File "/Users/shane/.virtualenvs/temp/lib/python3.9/site-packages/sunpy/map/mapbase.py", line 232, in __init__ self._validate_meta() File "/Users/shane/.virtualenvs/temp/lib/python3.9/site-packages/sunpy/map/mapbase.py", line 1409, in _validate_meta raise MapMetaValidationError('\n'.join(err_message)) sunpy.map.mapbase.MapMetaValidationError: Image coordinate units for axis 1 not present in metadata. Image coordinate units for axis 2 not present in metadata. See https://docs.sunpy.org/en/stable/code_ref/map.html#fixing-map-metadata for instructions on how to add missing metadata. ``` Setting the cdelt and cunit at least get lets the map be loaded but the WCS is still non-standard ``` hdul[1].header['cdelt1'] = 1 hdul[1].header['cdelt2'] = 1 hdul[1].header['cunit1'] = 'arcsec' hdul[1].header['cunit2'] = 'arcsec' mm = Map((hdul[1].data, hdul[1].header)) WARNING: SunpyMetadataWarning: Missing metadata for observer: assuming Earth-based observer. For frame 'heliographic_stonyhurst' the following metadata is missing: hgln_obs,hglt_obs,dsun_obs For frame 'heliographic_carrington' the following metadata is missing: crlt_obs,dsun_obs,crln_obs [sunpy.map.mapbase] WARNING: SunpyUserWarning: Could not determine coordinate frame from map metadata. Could not determine celestial frame corresponding to the specified WCS object [sunpy.map.mapbase] <sunpy.map.mapbase.GenericMap object at 0x1297eb160> SunPy Map --------- Observatory: NSO-GONG Instrument: Detector: Measurement: 6562.808 Wavelength: 6562.808 Observation Date: 2022-03-18 00:00:50 Exposure Time: Unknown Dimension: [2048. 2048.] pix Coordinate System: Unknown Scale: [1. 1.] arcsec / pix Reference Pixel: [1023. 1023.] pix Reference Coord: [0. 0.] arcsec array([[0, 0, 0, ..., 0, 0, 0], [0, 0, 0, ..., 0, 0, 0], [0, 0, 0, ..., 0, 0, 0], ..., [0, 0, 0, ..., 0, 0, 0], [0, 0, 0, ..., 0, 0, 0], [0, 0, 0, ..., 0, 0, 0]], dtype=int16) ``` ### Screenshots _No response_ ### System Details OS: Mac OS 10.16 Arch: 64bit, (i386) python3.9 sunpy: 4.1.0 astropy: 5.1.1 numpy: 1.23.5 ### Installation method pip </issue> <code> [start of sunpy/map/sources/gong.py] 1 """ 2 GONG Map subclass definitions 3 """ 4 import numpy as np 5 6 import astropy.units as u 7 from astropy.time import Time 8 9 from sunpy.coordinates import get_earth 10 from sunpy.map import GenericMap 11 12 __all__ = ['GONGSynopticMap'] 13 14 from sunpy.map.mapbase import SpatialPair 15 16 17 class GONGSynopticMap(GenericMap): 18 """ 19 GONG Synoptic Map. 20 21 The Global Oscillation Network Group (GONG) operates a six-station network of velocity 22 imagers located around the Earth that observe the Sun nearly continuously. GONG 23 produces hourly photospheric magnetograms using the Ni I 676.8 nm spectral line with an 24 array of 242×256 pixels covering the solar disk. These magnetograms are used to derive 25 synoptic maps which show a full-surface picture of the solar magnetic field. 26 27 Notes 28 ----- 29 If you have ``pfsspy`` installed this map source will be used instead of the one built into ``pfsspy``. 30 31 References 32 ---------- 33 * `GONG Page <https://gong.nso.edu/>`_ 34 * `Magnetogram Synoptic Map Images Page <https://gong.nso.edu/data/magmap/>`_ 35 * `FITS header keywords <https://gong.nso.edu/data/DMAC_documentation/General/fitsdesc.html>`_ 36 * `Instrument Paper (pp. 203–208) <https://inis.iaea.org/collection/NCLCollectionStore/_Public/20/062/20062491.pdf>`_ 37 * `GONG+ Documentation <https://gong.nso.edu/data/DMAC_documentation/PipelineMap/GlobalMap.html>`_ 38 """ 39 40 @classmethod 41 def is_datasource_for(cls, data, header, **kwargs): 42 return (str(header.get('TELESCOP', '')).endswith('GONG') and 43 str(header.get('CTYPE1', '').startswith('CRLN'))) 44 45 @property 46 def date(self): 47 return Time(f"{self.meta.get('date-obs')} {self.meta.get('time-obs')}") 48 49 @property 50 def scale(self): 51 # Since, this map uses the cylindrical equal-area (CEA) projection, 52 # the spacing should be modified to 180/pi times the original value 53 # Reference: Section 5.5, Thompson 2006 54 return SpatialPair(self.meta['cdelt1'] * self.spatial_units[0] / u.pixel, 55 self.meta['cdelt2'] * 180 / np.pi * self.spatial_units[0] / u.pixel) 56 57 @property 58 def spatial_units(self): 59 return SpatialPair(u.deg, u.deg) 60 61 @property 62 def observer_coordinate(self): 63 return get_earth(self.date) 64 [end of sunpy/map/sources/gong.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sunpy/map/sources/gong.py b/sunpy/map/sources/gong.py --- a/sunpy/map/sources/gong.py +++ b/sunpy/map/sources/gong.py @@ -4,15 +4,25 @@ import numpy as np import astropy.units as u +from astropy.coordinates import EarthLocation, SkyCoord from astropy.time import Time from sunpy.coordinates import get_earth from sunpy.map import GenericMap -__all__ = ['GONGSynopticMap'] +__all__ = ['GONGSynopticMap', 'GONGHalphaMap'] from sunpy.map.mapbase import SpatialPair +_SITE_NAMES = { + 'LE': 'Learmonth', + 'UD': 'Udaipur', + 'TD': 'El Teide', + 'CT': 'Cerro Tololo', + 'BB': 'Big Bear', + 'ML': 'Mauna Loa' +} + class GONGSynopticMap(GenericMap): """ @@ -40,7 +50,7 @@ @classmethod def is_datasource_for(cls, data, header, **kwargs): return (str(header.get('TELESCOP', '')).endswith('GONG') and - str(header.get('CTYPE1', '').startswith('CRLN'))) + str(header.get('CTYPE1', '')).startswith('CRLN')) @property def date(self): @@ -61,3 +71,58 @@ @property def observer_coordinate(self): return get_earth(self.date) + + +class GONGHalphaMap(GenericMap): + """ + GONG H-Alpha Map. + + The Global Oscillation Network Group (GONG) operates a six-station network of H-alpha + imagers located around the Earth that observe the Sun nearly continuously. + + References + ---------- + * `GONG H-Alpha Page <https://nso.edu/data/nisp-data/h-alpha/>`_ + * `GONG H-Alpha Observation Details <https://nispdata.nso.edu/webProdDesc2/presenter.php?file=halpha_fulldisk_images_overview.html&echoExact=0&name=Overview%20:%20GONG%20H-alpha%20Full-disk%20Images>`_ + * `GONG Header Keywords <https://gong.nso.edu/data/HEADER_KEY.html>`_ + * `DOI:/10.25668/as28-7p13 <https://doi.org/10.25668/as28-7p13>`_ + """ + + @classmethod + def is_datasource_for(cls, data, header, **kwargs): + return (str(header.get('TELESCOP', '')).endswith('GONG') and + str(header.get('IMTYPE', '')).startswith('H-ALPHA')) + + + @property + def scale(self): + solar_r = self.meta['SOLAR-R'] * u.arcsec + return SpatialPair(solar_r / (self.meta['FNDLMBMI'] * u.pixel), + solar_r/ (self.meta['FNDLMBMA'] * u.pixel)) + + @property + def coordinate_system(self): + """ + Coordinate system used + + Overrides the values in the header which are not understood by Astropy WCS + """ + return SpatialPair("HPLN-TAN", "HPLT-TAN") + + @property + def nickname(self): + site = _SITE_NAMES.get(self.meta.get("sitename", ""), "UNKNOWN") + return f'{self.observatory}, {site}' + + @property + def spatial_units(self): + return SpatialPair(u.deg, u.deg) + + @property + def _earth_location(self): + """Location of the observatory on Earth""" + return EarthLocation.from_geodetic(lat=self.meta['site-lat'] * u.deg, lon=self.meta['site-lon'] * u.deg) + + @property + def observer_coordinate(self): + return SkyCoord(self._earth_location.get_itrs(self.date)).heliographic_stonyhurst
{"golden_diff": "diff --git a/sunpy/map/sources/gong.py b/sunpy/map/sources/gong.py\n--- a/sunpy/map/sources/gong.py\n+++ b/sunpy/map/sources/gong.py\n@@ -4,15 +4,25 @@\n import numpy as np\n \n import astropy.units as u\n+from astropy.coordinates import EarthLocation, SkyCoord\n from astropy.time import Time\n \n from sunpy.coordinates import get_earth\n from sunpy.map import GenericMap\n \n-__all__ = ['GONGSynopticMap']\n+__all__ = ['GONGSynopticMap', 'GONGHalphaMap']\n \n from sunpy.map.mapbase import SpatialPair\n \n+_SITE_NAMES = {\n+ 'LE': 'Learmonth',\n+ 'UD': 'Udaipur',\n+ 'TD': 'El Teide',\n+ 'CT': 'Cerro Tololo',\n+ 'BB': 'Big Bear',\n+ 'ML': 'Mauna Loa'\n+}\n+\n \n class GONGSynopticMap(GenericMap):\n \"\"\"\n@@ -40,7 +50,7 @@\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n return (str(header.get('TELESCOP', '')).endswith('GONG') and\n- str(header.get('CTYPE1', '').startswith('CRLN')))\n+ str(header.get('CTYPE1', '')).startswith('CRLN'))\n \n @property\n def date(self):\n@@ -61,3 +71,58 @@\n @property\n def observer_coordinate(self):\n return get_earth(self.date)\n+\n+\n+class GONGHalphaMap(GenericMap):\n+ \"\"\"\n+ GONG H-Alpha Map.\n+\n+ The Global Oscillation Network Group (GONG) operates a six-station network of H-alpha\n+ imagers located around the Earth that observe the Sun nearly continuously.\n+\n+ References\n+ ----------\n+ * `GONG H-Alpha Page <https://nso.edu/data/nisp-data/h-alpha/>`_\n+ * `GONG H-Alpha Observation Details <https://nispdata.nso.edu/webProdDesc2/presenter.php?file=halpha_fulldisk_images_overview.html&echoExact=0&name=Overview%20:%20GONG%20H-alpha%20Full-disk%20Images>`_\n+ * `GONG Header Keywords <https://gong.nso.edu/data/HEADER_KEY.html>`_\n+ * `DOI:/10.25668/as28-7p13 <https://doi.org/10.25668/as28-7p13>`_\n+ \"\"\"\n+\n+ @classmethod\n+ def is_datasource_for(cls, data, header, **kwargs):\n+ return (str(header.get('TELESCOP', '')).endswith('GONG') and\n+ str(header.get('IMTYPE', '')).startswith('H-ALPHA'))\n+\n+\n+ @property\n+ def scale(self):\n+ solar_r = self.meta['SOLAR-R'] * u.arcsec\n+ return SpatialPair(solar_r / (self.meta['FNDLMBMI'] * u.pixel),\n+ solar_r/ (self.meta['FNDLMBMA'] * u.pixel))\n+\n+ @property\n+ def coordinate_system(self):\n+ \"\"\"\n+ Coordinate system used\n+\n+ Overrides the values in the header which are not understood by Astropy WCS\n+ \"\"\"\n+ return SpatialPair(\"HPLN-TAN\", \"HPLT-TAN\")\n+\n+ @property\n+ def nickname(self):\n+ site = _SITE_NAMES.get(self.meta.get(\"sitename\", \"\"), \"UNKNOWN\")\n+ return f'{self.observatory}, {site}'\n+\n+ @property\n+ def spatial_units(self):\n+ return SpatialPair(u.deg, u.deg)\n+\n+ @property\n+ def _earth_location(self):\n+ \"\"\"Location of the observatory on Earth\"\"\"\n+ return EarthLocation.from_geodetic(lat=self.meta['site-lat'] * u.deg, lon=self.meta['site-lon'] * u.deg)\n+\n+ @property\n+ def observer_coordinate(self):\n+ return SkyCoord(self._earth_location.get_itrs(self.date)).heliographic_stonyhurst\n", "issue": "No map source to handle GONG H-alpha data\n### Describe the bug\r\nOriginally raised by nawinnga in the sunpy chat room.\r\n\r\nThere is no map source to do the necessary translation from the GONG H-alpha headers to standard meta data Map can use related to #6653 and #6655.\r\n\r\nIt doesn't look like its trivial to find/extract/calculate the necessary WCS information (e.g. PCi_j + CDELT or CDi_j ...) without some reference to what the fits headers mean some info on [this NSO page](https://nso.edu/data/nisp-data/h-alpha/)\r\n\r\n### To Reproduce\r\n\r\n```\r\nfrom astropy.io import fits\r\nimport sunpy\r\n\r\n# to get around issue #6655\r\nhdul = fits.open('ftp://gong2.nso.edu/HA/haf/202203/20220318/20220318000050Bh.fits.fz')\r\nsunpy.map.Map((hdul[1].data, hdul[1].header))\r\n\r\nMap((hdul[1].data, hdul[1].header))\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/Users/shane/.virtualenvs/temp/lib/python3.9/site-packages/sunpy/map/map_factory.py\", line 331, in __call__\r\n new_map = self._check_registered_widgets(data, meta, **kwargs)\r\n File \"/Users/shane/.virtualenvs/temp/lib/python3.9/site-packages/sunpy/map/map_factory.py\", line 381, in _check_registered_widgets\r\n return WidgetType(data, meta, **kwargs)\r\n File \"/Users/shane/.virtualenvs/temp/lib/python3.9/site-packages/sunpy/map/mapbase.py\", line 232, in __init__\r\n self._validate_meta()\r\n File \"/Users/shane/.virtualenvs/temp/lib/python3.9/site-packages/sunpy/map/mapbase.py\", line 1409, in _validate_meta\r\n raise MapMetaValidationError('\\n'.join(err_message))\r\nsunpy.map.mapbase.MapMetaValidationError: Image coordinate units for axis 1 not present in metadata.\r\nImage coordinate units for axis 2 not present in metadata.\r\nSee https://docs.sunpy.org/en/stable/code_ref/map.html#fixing-map-metadata for instructions on how to add missing metadata.\r\n```\r\nSetting the cdelt and cunit at least get lets the map be loaded but the WCS is still non-standard\r\n```\r\nhdul[1].header['cdelt1'] = 1\r\nhdul[1].header['cdelt2'] = 1\r\nhdul[1].header['cunit1'] = 'arcsec'\r\nhdul[1].header['cunit2'] = 'arcsec'\r\n\r\nmm = Map((hdul[1].data, hdul[1].header))\r\nWARNING: SunpyMetadataWarning: Missing metadata for observer: assuming Earth-based observer.\r\nFor frame 'heliographic_stonyhurst' the following metadata is missing: hgln_obs,hglt_obs,dsun_obs\r\nFor frame 'heliographic_carrington' the following metadata is missing: crlt_obs,dsun_obs,crln_obs\r\n [sunpy.map.mapbase]\r\nWARNING: SunpyUserWarning: Could not determine coordinate frame from map metadata.\r\nCould not determine celestial frame corresponding to the specified WCS object [sunpy.map.mapbase]\r\n<sunpy.map.mapbase.GenericMap object at 0x1297eb160>\r\nSunPy Map\r\n---------\r\nObservatory:\t\t NSO-GONG\r\nInstrument:\r\nDetector:\r\nMeasurement:\t\t 6562.808\r\nWavelength:\t\t 6562.808\r\nObservation Date:\t 2022-03-18 00:00:50\r\nExposure Time:\t\t Unknown\r\nDimension:\t\t [2048. 2048.] pix\r\nCoordinate System:\t Unknown\r\nScale:\t\t\t [1. 1.] arcsec / pix\r\nReference Pixel:\t [1023. 1023.] pix\r\nReference Coord:\t [0. 0.] arcsec\r\narray([[0, 0, 0, ..., 0, 0, 0],\r\n [0, 0, 0, ..., 0, 0, 0],\r\n [0, 0, 0, ..., 0, 0, 0],\r\n ...,\r\n [0, 0, 0, ..., 0, 0, 0],\r\n [0, 0, 0, ..., 0, 0, 0],\r\n [0, 0, 0, ..., 0, 0, 0]], dtype=int16)\r\n```\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### System Details\r\n\r\nOS: Mac OS 10.16 Arch: 64bit, (i386)\r\npython3.9\r\nsunpy: 4.1.0\r\nastropy: 5.1.1\r\nnumpy: 1.23.5\r\n\r\n### Installation method\r\n\r\npip\n", "before_files": [{"content": "\"\"\"\nGONG Map subclass definitions\n\"\"\"\nimport numpy as np\n\nimport astropy.units as u\nfrom astropy.time import Time\n\nfrom sunpy.coordinates import get_earth\nfrom sunpy.map import GenericMap\n\n__all__ = ['GONGSynopticMap']\n\nfrom sunpy.map.mapbase import SpatialPair\n\n\nclass GONGSynopticMap(GenericMap):\n \"\"\"\n GONG Synoptic Map.\n\n The Global Oscillation Network Group (GONG) operates a six-station network of velocity\n imagers located around the Earth that observe the Sun nearly continuously. GONG\n produces hourly photospheric magnetograms using the Ni I 676.8 nm spectral line with an\n array of 242\u00d7256 pixels covering the solar disk. These magnetograms are used to derive\n synoptic maps which show a full-surface picture of the solar magnetic field.\n\n Notes\n -----\n If you have ``pfsspy`` installed this map source will be used instead of the one built into ``pfsspy``.\n\n References\n ----------\n * `GONG Page <https://gong.nso.edu/>`_\n * `Magnetogram Synoptic Map Images Page <https://gong.nso.edu/data/magmap/>`_\n * `FITS header keywords <https://gong.nso.edu/data/DMAC_documentation/General/fitsdesc.html>`_\n * `Instrument Paper (pp. 203\u2013208) <https://inis.iaea.org/collection/NCLCollectionStore/_Public/20/062/20062491.pdf>`_\n * `GONG+ Documentation <https://gong.nso.edu/data/DMAC_documentation/PipelineMap/GlobalMap.html>`_\n \"\"\"\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n return (str(header.get('TELESCOP', '')).endswith('GONG') and\n str(header.get('CTYPE1', '').startswith('CRLN')))\n\n @property\n def date(self):\n return Time(f\"{self.meta.get('date-obs')} {self.meta.get('time-obs')}\")\n\n @property\n def scale(self):\n # Since, this map uses the cylindrical equal-area (CEA) projection,\n # the spacing should be modified to 180/pi times the original value\n # Reference: Section 5.5, Thompson 2006\n return SpatialPair(self.meta['cdelt1'] * self.spatial_units[0] / u.pixel,\n self.meta['cdelt2'] * 180 / np.pi * self.spatial_units[0] / u.pixel)\n\n @property\n def spatial_units(self):\n return SpatialPair(u.deg, u.deg)\n\n @property\n def observer_coordinate(self):\n return get_earth(self.date)\n", "path": "sunpy/map/sources/gong.py"}]}
2,409
954
gh_patches_debug_35167
rasdani/github-patches
git_diff
translate__pootle-4148
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Translation of the Report Email I would like to translate the words of the report email, if you could integrate this kind of template on the po file, it would be amazing... naturally title of the email included, which it would be `[(name-site)] Unit #(num) ((lang))` ``` Username: (username) Current URL: (url) IP address: (ip_address) User-Agent: (user_agent) Unit: (url_string) Source: (source_string) Current translation: Your question or comment: ``` Thx in advance ;) </issue> <code> [start of pootle/apps/contact/views.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright (C) Pootle contributors. 5 # 6 # This file is a part of the Pootle project. It is distributed under the GPL3 7 # or later license. See the LICENSE file for a copy of the license and the 8 # AUTHORS file for copyright and authorship information. 9 10 from django.core.urlresolvers import reverse 11 from django.views.generic import TemplateView 12 13 from contact_form.views import ContactFormView as OriginalContactFormView 14 15 from pootle.core.views import AjaxResponseMixin 16 17 from .forms import ContactForm, ReportForm 18 19 20 SUBJECT_TEMPLATE = 'Unit #%d (%s)' 21 BODY_TEMPLATE = ''' 22 Unit: %s 23 24 Source: %s 25 26 Current translation: %s 27 28 Your question or comment: 29 ''' 30 31 32 class ContactFormTemplateView(TemplateView): 33 template_name = 'contact_form/contact_form.html' 34 35 36 class ContactFormView(AjaxResponseMixin, OriginalContactFormView): 37 form_class = ContactForm 38 template_name = 'contact_form/xhr_contact_form.html' 39 40 def get_context_data(self, **kwargs): 41 ctx = super(ContactFormView, self).get_context_data(**kwargs) 42 # Provide the form action URL to use in the template that renders the 43 # contact dialog. 44 ctx.update({ 45 'contact_form_url': reverse('pootle-contact-xhr'), 46 }) 47 return ctx 48 49 def get_initial(self): 50 initial = super(ContactFormView, self).get_initial() 51 52 user = self.request.user 53 if user.is_authenticated(): 54 initial.update({ 55 'name': user.full_name, 56 'email': user.email, 57 }) 58 59 return initial 60 61 def get_success_url(self): 62 # XXX: This is unused. We don't need a `/contact/sent/` URL, but 63 # the parent :cls:`ContactView` enforces us to set some value here 64 return reverse('pootle-contact') 65 66 67 class ReportFormView(ContactFormView): 68 form_class = ReportForm 69 70 def get_context_data(self, **kwargs): 71 ctx = super(ReportFormView, self).get_context_data(**kwargs) 72 # Provide the form action URL to use in the template that renders the 73 # contact dialog. 74 ctx.update({ 75 'contact_form_url': reverse('pootle-contact-report-error'), 76 }) 77 return ctx 78 79 def get_initial(self): 80 initial = super(ReportFormView, self).get_initial() 81 82 report = self.request.GET.get('report', False) 83 if report: 84 try: 85 from pootle_store.models import Unit 86 uid = int(report) 87 try: 88 unit = Unit.objects.select_related( 89 'store__translation_project__project', 90 ).get(id=uid) 91 if unit.is_accessible_by(self.request.user): 92 unit_absolute_url = self.request.build_absolute_uri( 93 unit.get_translate_url() 94 ) 95 initial.update({ 96 'subject': SUBJECT_TEMPLATE % ( 97 unit.id, 98 unit.store.translation_project.language.code 99 ), 100 'body': BODY_TEMPLATE % ( 101 unit_absolute_url, 102 unit.source, 103 unit.target 104 ), 105 'report_email': unit.store.translation_project \ 106 .project.report_email, 107 }) 108 except Unit.DoesNotExist: 109 pass 110 except ValueError: 111 pass 112 113 return initial 114 [end of pootle/apps/contact/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pootle/apps/contact/views.py b/pootle/apps/contact/views.py --- a/pootle/apps/contact/views.py +++ b/pootle/apps/contact/views.py @@ -8,6 +8,7 @@ # AUTHORS file for copyright and authorship information. from django.core.urlresolvers import reverse +from django.template.loader import render_to_string from django.views.generic import TemplateView from contact_form.views import ContactFormView as OriginalContactFormView @@ -17,18 +18,6 @@ from .forms import ContactForm, ReportForm -SUBJECT_TEMPLATE = 'Unit #%d (%s)' -BODY_TEMPLATE = ''' -Unit: %s - -Source: %s - -Current translation: %s - -Your question or comment: -''' - - class ContactFormTemplateView(TemplateView): template_name = 'contact_form/contact_form.html' @@ -93,15 +82,18 @@ unit.get_translate_url() ) initial.update({ - 'subject': SUBJECT_TEMPLATE % ( - unit.id, - unit.store.translation_project.language.code - ), - 'body': BODY_TEMPLATE % ( - unit_absolute_url, - unit.source, - unit.target - ), + 'subject': render_to_string( + 'contact_form/report_form_subject.txt', { + 'unit': unit, + 'language': unit.store \ + .translation_project \ + .language.code, + }), + 'body': render_to_string( + 'contact_form/report_form_body.txt', { + 'unit': unit, + 'unit_absolute_url': unit_absolute_url, + }), 'report_email': unit.store.translation_project \ .project.report_email, })
{"golden_diff": "diff --git a/pootle/apps/contact/views.py b/pootle/apps/contact/views.py\n--- a/pootle/apps/contact/views.py\n+++ b/pootle/apps/contact/views.py\n@@ -8,6 +8,7 @@\n # AUTHORS file for copyright and authorship information.\n \n from django.core.urlresolvers import reverse\n+from django.template.loader import render_to_string\n from django.views.generic import TemplateView\n \n from contact_form.views import ContactFormView as OriginalContactFormView\n@@ -17,18 +18,6 @@\n from .forms import ContactForm, ReportForm\n \n \n-SUBJECT_TEMPLATE = 'Unit #%d (%s)'\n-BODY_TEMPLATE = '''\n-Unit: %s\n-\n-Source: %s\n-\n-Current translation: %s\n-\n-Your question or comment:\n-'''\n-\n-\n class ContactFormTemplateView(TemplateView):\n template_name = 'contact_form/contact_form.html'\n \n@@ -93,15 +82,18 @@\n unit.get_translate_url()\n )\n initial.update({\n- 'subject': SUBJECT_TEMPLATE % (\n- unit.id,\n- unit.store.translation_project.language.code\n- ),\n- 'body': BODY_TEMPLATE % (\n- unit_absolute_url,\n- unit.source,\n- unit.target\n- ),\n+ 'subject': render_to_string(\n+ 'contact_form/report_form_subject.txt', {\n+ 'unit': unit,\n+ 'language': unit.store \\\n+ .translation_project \\\n+ .language.code,\n+ }),\n+ 'body': render_to_string(\n+ 'contact_form/report_form_body.txt', {\n+ 'unit': unit,\n+ 'unit_absolute_url': unit_absolute_url,\n+ }),\n 'report_email': unit.store.translation_project \\\n .project.report_email,\n })\n", "issue": "Translation of the Report Email\nI would like to translate the words of the report email, if you could integrate this kind of template on the po file, it would be amazing... naturally title of the email included, which it would be `[(name-site)] Unit #(num) ((lang))`\n\n```\nUsername: (username)\nCurrent URL: (url)\nIP address: (ip_address)\nUser-Agent: (user_agent)\n\nUnit: (url_string)\n\nSource: (source_string)\n\nCurrent translation: \n\nYour question or comment:\n```\n\nThx in advance ;)\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.core.urlresolvers import reverse\nfrom django.views.generic import TemplateView\n\nfrom contact_form.views import ContactFormView as OriginalContactFormView\n\nfrom pootle.core.views import AjaxResponseMixin\n\nfrom .forms import ContactForm, ReportForm\n\n\nSUBJECT_TEMPLATE = 'Unit #%d (%s)'\nBODY_TEMPLATE = '''\nUnit: %s\n\nSource: %s\n\nCurrent translation: %s\n\nYour question or comment:\n'''\n\n\nclass ContactFormTemplateView(TemplateView):\n template_name = 'contact_form/contact_form.html'\n\n\nclass ContactFormView(AjaxResponseMixin, OriginalContactFormView):\n form_class = ContactForm\n template_name = 'contact_form/xhr_contact_form.html'\n\n def get_context_data(self, **kwargs):\n ctx = super(ContactFormView, self).get_context_data(**kwargs)\n # Provide the form action URL to use in the template that renders the\n # contact dialog.\n ctx.update({\n 'contact_form_url': reverse('pootle-contact-xhr'),\n })\n return ctx\n\n def get_initial(self):\n initial = super(ContactFormView, self).get_initial()\n\n user = self.request.user\n if user.is_authenticated():\n initial.update({\n 'name': user.full_name,\n 'email': user.email,\n })\n\n return initial\n\n def get_success_url(self):\n # XXX: This is unused. We don't need a `/contact/sent/` URL, but\n # the parent :cls:`ContactView` enforces us to set some value here\n return reverse('pootle-contact')\n\n\nclass ReportFormView(ContactFormView):\n form_class = ReportForm\n\n def get_context_data(self, **kwargs):\n ctx = super(ReportFormView, self).get_context_data(**kwargs)\n # Provide the form action URL to use in the template that renders the\n # contact dialog.\n ctx.update({\n 'contact_form_url': reverse('pootle-contact-report-error'),\n })\n return ctx\n\n def get_initial(self):\n initial = super(ReportFormView, self).get_initial()\n\n report = self.request.GET.get('report', False)\n if report:\n try:\n from pootle_store.models import Unit\n uid = int(report)\n try:\n unit = Unit.objects.select_related(\n 'store__translation_project__project',\n ).get(id=uid)\n if unit.is_accessible_by(self.request.user):\n unit_absolute_url = self.request.build_absolute_uri(\n unit.get_translate_url()\n )\n initial.update({\n 'subject': SUBJECT_TEMPLATE % (\n unit.id,\n unit.store.translation_project.language.code\n ),\n 'body': BODY_TEMPLATE % (\n unit_absolute_url,\n unit.source,\n unit.target\n ),\n 'report_email': unit.store.translation_project \\\n .project.report_email,\n })\n except Unit.DoesNotExist:\n pass\n except ValueError:\n pass\n\n return initial\n", "path": "pootle/apps/contact/views.py"}]}
1,603
385
gh_patches_debug_16638
rasdani/github-patches
git_diff
python-poetry__poetry-6338
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `poetry cache clear` no longer respects `--no-interaction` flag <!-- Hi there! Thank you for discovering and submitting an issue. Before you submit this; let's make sure of a few things. Please make sure the following boxes are ticked if they are correct. If not, please try and fulfill these first. --> <!-- Checked checkbox should look like this: [x] --> - [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version. - [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate. - [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option). <!-- Once those are done, if you're able to fill in the following list with your information, it'd be very helpful to whoever handles the issue. --> - **OS version and name**: Ubuntu 22.04 - **Poetry version**: 1.2.0 - **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: <!-- Gist Link Here --> ## Issue <!-- Now feel free to write your issue, but please be descriptive! Thanks again 🙌 ❤️ --> Since poetry version 1.2.0, the `poetry cache clear` command no longer respects the `--no-interaction` flag: ``` $ poetry cache clear --all --no-interaction . Delete 1882 entries? (yes/no) [no] ^C ``` </issue> <code> [start of src/poetry/console/commands/cache/clear.py] 1 from __future__ import annotations 2 3 import os 4 5 from cleo.helpers import argument 6 from cleo.helpers import option 7 8 from poetry.config.config import Config 9 from poetry.console.commands.command import Command 10 11 12 class CacheClearCommand(Command): 13 name = "cache clear" 14 description = "Clears Poetry's cache." 15 16 arguments = [argument("cache", description="The name of the cache to clear.")] 17 options = [option("all", description="Clear all entries in the cache.")] 18 19 def handle(self) -> int: 20 from cachy import CacheManager 21 22 cache = self.argument("cache") 23 24 parts = cache.split(":") 25 root = parts[0] 26 27 config = Config.create() 28 cache_dir = config.repository_cache_directory / root 29 30 try: 31 cache_dir.relative_to(config.repository_cache_directory) 32 except ValueError: 33 raise ValueError(f"{root} is not a valid repository cache") 34 35 cache = CacheManager( 36 { 37 "default": parts[0], 38 "serializer": "json", 39 "stores": {parts[0]: {"driver": "file", "path": str(cache_dir)}}, 40 } 41 ) 42 43 if len(parts) == 1: 44 if not self.option("all"): 45 raise RuntimeError( 46 f"Add the --all option if you want to clear all {parts[0]} caches" 47 ) 48 49 if not cache_dir.exists(): 50 self.line(f"No cache entries for {parts[0]}") 51 return 0 52 53 # Calculate number of entries 54 entries_count = sum( 55 len(files) for _path, _dirs, files in os.walk(str(cache_dir)) 56 ) 57 58 delete = self.confirm(f"<question>Delete {entries_count} entries?</>") 59 if not delete: 60 return 0 61 62 cache.flush() 63 elif len(parts) == 2: 64 raise RuntimeError( 65 "Only specifying the package name is not yet supported. " 66 "Add a specific version to clear" 67 ) 68 elif len(parts) == 3: 69 package = parts[1] 70 version = parts[2] 71 72 if not cache.has(f"{package}:{version}"): 73 self.line(f"No cache entries for {package}:{version}") 74 return 0 75 76 delete = self.confirm(f"Delete cache entry {package}:{version}") 77 if not delete: 78 return 0 79 80 cache.forget(f"{package}:{version}") 81 else: 82 raise ValueError("Invalid cache key") 83 84 return 0 85 [end of src/poetry/console/commands/cache/clear.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/poetry/console/commands/cache/clear.py b/src/poetry/console/commands/cache/clear.py --- a/src/poetry/console/commands/cache/clear.py +++ b/src/poetry/console/commands/cache/clear.py @@ -55,7 +55,7 @@ len(files) for _path, _dirs, files in os.walk(str(cache_dir)) ) - delete = self.confirm(f"<question>Delete {entries_count} entries?</>") + delete = self.confirm(f"<question>Delete {entries_count} entries?</>", True) if not delete: return 0 @@ -73,7 +73,7 @@ self.line(f"No cache entries for {package}:{version}") return 0 - delete = self.confirm(f"Delete cache entry {package}:{version}") + delete = self.confirm(f"Delete cache entry {package}:{version}", True) if not delete: return 0
{"golden_diff": "diff --git a/src/poetry/console/commands/cache/clear.py b/src/poetry/console/commands/cache/clear.py\n--- a/src/poetry/console/commands/cache/clear.py\n+++ b/src/poetry/console/commands/cache/clear.py\n@@ -55,7 +55,7 @@\n len(files) for _path, _dirs, files in os.walk(str(cache_dir))\n )\n \n- delete = self.confirm(f\"<question>Delete {entries_count} entries?</>\")\n+ delete = self.confirm(f\"<question>Delete {entries_count} entries?</>\", True)\n if not delete:\n return 0\n \n@@ -73,7 +73,7 @@\n self.line(f\"No cache entries for {package}:{version}\")\n return 0\n \n- delete = self.confirm(f\"Delete cache entry {package}:{version}\")\n+ delete = self.confirm(f\"Delete cache entry {package}:{version}\", True)\n if not delete:\n return 0\n", "issue": "`poetry cache clear` no longer respects `--no-interaction` flag\n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **OS version and name**: Ubuntu 22.04\r\n- **Poetry version**: 1.2.0\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: <!-- Gist Link Here -->\r\n\r\n## Issue\r\n<!-- Now feel free to write your issue, but please be descriptive! Thanks again \ud83d\ude4c \u2764\ufe0f -->\r\nSince poetry version 1.2.0, the `poetry cache clear` command no longer respects the `--no-interaction` flag:\r\n\r\n```\r\n$ poetry cache clear --all --no-interaction .\r\nDelete 1882 entries? (yes/no) [no] ^C\r\n```\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\n\nfrom cleo.helpers import argument\nfrom cleo.helpers import option\n\nfrom poetry.config.config import Config\nfrom poetry.console.commands.command import Command\n\n\nclass CacheClearCommand(Command):\n name = \"cache clear\"\n description = \"Clears Poetry's cache.\"\n\n arguments = [argument(\"cache\", description=\"The name of the cache to clear.\")]\n options = [option(\"all\", description=\"Clear all entries in the cache.\")]\n\n def handle(self) -> int:\n from cachy import CacheManager\n\n cache = self.argument(\"cache\")\n\n parts = cache.split(\":\")\n root = parts[0]\n\n config = Config.create()\n cache_dir = config.repository_cache_directory / root\n\n try:\n cache_dir.relative_to(config.repository_cache_directory)\n except ValueError:\n raise ValueError(f\"{root} is not a valid repository cache\")\n\n cache = CacheManager(\n {\n \"default\": parts[0],\n \"serializer\": \"json\",\n \"stores\": {parts[0]: {\"driver\": \"file\", \"path\": str(cache_dir)}},\n }\n )\n\n if len(parts) == 1:\n if not self.option(\"all\"):\n raise RuntimeError(\n f\"Add the --all option if you want to clear all {parts[0]} caches\"\n )\n\n if not cache_dir.exists():\n self.line(f\"No cache entries for {parts[0]}\")\n return 0\n\n # Calculate number of entries\n entries_count = sum(\n len(files) for _path, _dirs, files in os.walk(str(cache_dir))\n )\n\n delete = self.confirm(f\"<question>Delete {entries_count} entries?</>\")\n if not delete:\n return 0\n\n cache.flush()\n elif len(parts) == 2:\n raise RuntimeError(\n \"Only specifying the package name is not yet supported. \"\n \"Add a specific version to clear\"\n )\n elif len(parts) == 3:\n package = parts[1]\n version = parts[2]\n\n if not cache.has(f\"{package}:{version}\"):\n self.line(f\"No cache entries for {package}:{version}\")\n return 0\n\n delete = self.confirm(f\"Delete cache entry {package}:{version}\")\n if not delete:\n return 0\n\n cache.forget(f\"{package}:{version}\")\n else:\n raise ValueError(\"Invalid cache key\")\n\n return 0\n", "path": "src/poetry/console/commands/cache/clear.py"}]}
1,600
210
gh_patches_debug_63509
rasdani/github-patches
git_diff
MongoEngine__mongoengine-879
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `BaseDict` does not follow `setdefault` `mongoengine.base.datastructures.BaseDict` does not follow changes made through `setdefault`. I have a DictField in a model: ``` python success_rates = DictField() ``` I update the field using `serdefault` and the changes are not saved: ``` python user_state.success_rates.setdefault(topic_area_group_id, {}).setdefault( task_date, {})[str(task_id)] = action.data['is_solved'] ``` I currently do this: ``` python user_state._changed_fields.append('success_rates') ``` `BaseDict` does not follow `setdefault` `mongoengine.base.datastructures.BaseDict` does not follow changes made through `setdefault`. I have a DictField in a model: ``` python success_rates = DictField() ``` I update the field using `serdefault` and the changes are not saved: ``` python user_state.success_rates.setdefault(topic_area_group_id, {}).setdefault( task_date, {})[str(task_id)] = action.data['is_solved'] ``` I currently do this: ``` python user_state._changed_fields.append('success_rates') ``` </issue> <code> [start of mongoengine/base/datastructures.py] 1 import weakref 2 import functools 3 import itertools 4 from mongoengine.common import _import_class 5 6 __all__ = ("BaseDict", "BaseList") 7 8 9 class BaseDict(dict): 10 """A special dict so we can watch any changes""" 11 12 _dereferenced = False 13 _instance = None 14 _name = None 15 16 def __init__(self, dict_items, instance, name): 17 Document = _import_class('Document') 18 EmbeddedDocument = _import_class('EmbeddedDocument') 19 20 if isinstance(instance, (Document, EmbeddedDocument)): 21 self._instance = weakref.proxy(instance) 22 self._name = name 23 return super(BaseDict, self).__init__(dict_items) 24 25 def __getitem__(self, key, *args, **kwargs): 26 value = super(BaseDict, self).__getitem__(key) 27 28 EmbeddedDocument = _import_class('EmbeddedDocument') 29 if isinstance(value, EmbeddedDocument) and value._instance is None: 30 value._instance = self._instance 31 elif not isinstance(value, BaseDict) and isinstance(value, dict): 32 value = BaseDict(value, None, '%s.%s' % (self._name, key)) 33 super(BaseDict, self).__setitem__(key, value) 34 value._instance = self._instance 35 elif not isinstance(value, BaseList) and isinstance(value, list): 36 value = BaseList(value, None, '%s.%s' % (self._name, key)) 37 super(BaseDict, self).__setitem__(key, value) 38 value._instance = self._instance 39 return value 40 41 def __setitem__(self, key, value, *args, **kwargs): 42 self._mark_as_changed(key) 43 return super(BaseDict, self).__setitem__(key, value) 44 45 def __delete__(self, *args, **kwargs): 46 self._mark_as_changed() 47 return super(BaseDict, self).__delete__(*args, **kwargs) 48 49 def __delitem__(self, key, *args, **kwargs): 50 self._mark_as_changed(key) 51 return super(BaseDict, self).__delitem__(key) 52 53 def __delattr__(self, key, *args, **kwargs): 54 self._mark_as_changed(key) 55 return super(BaseDict, self).__delattr__(key) 56 57 def __getstate__(self): 58 self.instance = None 59 self._dereferenced = False 60 return self 61 62 def __setstate__(self, state): 63 self = state 64 return self 65 66 def clear(self, *args, **kwargs): 67 self._mark_as_changed() 68 return super(BaseDict, self).clear(*args, **kwargs) 69 70 def pop(self, *args, **kwargs): 71 self._mark_as_changed() 72 return super(BaseDict, self).pop(*args, **kwargs) 73 74 def popitem(self, *args, **kwargs): 75 self._mark_as_changed() 76 return super(BaseDict, self).popitem(*args, **kwargs) 77 78 def update(self, *args, **kwargs): 79 self._mark_as_changed() 80 return super(BaseDict, self).update(*args, **kwargs) 81 82 def _mark_as_changed(self, key=None): 83 if hasattr(self._instance, '_mark_as_changed'): 84 if key: 85 self._instance._mark_as_changed('%s.%s' % (self._name, key)) 86 else: 87 self._instance._mark_as_changed(self._name) 88 89 90 class BaseList(list): 91 """A special list so we can watch any changes 92 """ 93 94 _dereferenced = False 95 _instance = None 96 _name = None 97 98 def __init__(self, list_items, instance, name): 99 Document = _import_class('Document') 100 EmbeddedDocument = _import_class('EmbeddedDocument') 101 102 if isinstance(instance, (Document, EmbeddedDocument)): 103 self._instance = weakref.proxy(instance) 104 self._name = name 105 return super(BaseList, self).__init__(list_items) 106 107 def __getitem__(self, key, *args, **kwargs): 108 value = super(BaseList, self).__getitem__(key) 109 110 EmbeddedDocument = _import_class('EmbeddedDocument') 111 if isinstance(value, EmbeddedDocument) and value._instance is None: 112 value._instance = self._instance 113 elif not isinstance(value, BaseDict) and isinstance(value, dict): 114 value = BaseDict(value, None, '%s.%s' % (self._name, key)) 115 super(BaseList, self).__setitem__(key, value) 116 value._instance = self._instance 117 elif not isinstance(value, BaseList) and isinstance(value, list): 118 value = BaseList(value, None, '%s.%s' % (self._name, key)) 119 super(BaseList, self).__setitem__(key, value) 120 value._instance = self._instance 121 return value 122 123 def __setitem__(self, key, value, *args, **kwargs): 124 if isinstance(key, slice): 125 self._mark_as_changed() 126 else: 127 self._mark_as_changed(key) 128 return super(BaseList, self).__setitem__(key, value) 129 130 def __delitem__(self, key, *args, **kwargs): 131 if isinstance(key, slice): 132 self._mark_as_changed() 133 else: 134 self._mark_as_changed(key) 135 return super(BaseList, self).__delitem__(key) 136 137 def __setslice__(self, *args, **kwargs): 138 self._mark_as_changed() 139 return super(BaseList, self).__setslice__(*args, **kwargs) 140 141 def __delslice__(self, *args, **kwargs): 142 self._mark_as_changed() 143 return super(BaseList, self).__delslice__(*args, **kwargs) 144 145 def __getstate__(self): 146 self.instance = None 147 self._dereferenced = False 148 return self 149 150 def __setstate__(self, state): 151 self = state 152 return self 153 154 def append(self, *args, **kwargs): 155 self._mark_as_changed() 156 return super(BaseList, self).append(*args, **kwargs) 157 158 def extend(self, *args, **kwargs): 159 self._mark_as_changed() 160 return super(BaseList, self).extend(*args, **kwargs) 161 162 def insert(self, *args, **kwargs): 163 self._mark_as_changed() 164 return super(BaseList, self).insert(*args, **kwargs) 165 166 def pop(self, *args, **kwargs): 167 self._mark_as_changed() 168 return super(BaseList, self).pop(*args, **kwargs) 169 170 def remove(self, *args, **kwargs): 171 self._mark_as_changed() 172 return super(BaseList, self).remove(*args, **kwargs) 173 174 def reverse(self, *args, **kwargs): 175 self._mark_as_changed() 176 return super(BaseList, self).reverse(*args, **kwargs) 177 178 def sort(self, *args, **kwargs): 179 self._mark_as_changed() 180 return super(BaseList, self).sort(*args, **kwargs) 181 182 def _mark_as_changed(self, key=None): 183 if hasattr(self._instance, '_mark_as_changed'): 184 if key: 185 self._instance._mark_as_changed('%s.%s' % (self._name, key)) 186 else: 187 self._instance._mark_as_changed(self._name) 188 189 190 class StrictDict(object): 191 __slots__ = () 192 _special_fields = set(['get', 'pop', 'iteritems', 'items', 'keys', 'create']) 193 _classes = {} 194 def __init__(self, **kwargs): 195 for k,v in kwargs.iteritems(): 196 setattr(self, k, v) 197 def __getitem__(self, key): 198 key = '_reserved_' + key if key in self._special_fields else key 199 try: 200 return getattr(self, key) 201 except AttributeError: 202 raise KeyError(key) 203 def __setitem__(self, key, value): 204 key = '_reserved_' + key if key in self._special_fields else key 205 return setattr(self, key, value) 206 def __contains__(self, key): 207 return hasattr(self, key) 208 def get(self, key, default=None): 209 try: 210 return self[key] 211 except KeyError: 212 return default 213 def pop(self, key, default=None): 214 v = self.get(key, default) 215 try: 216 delattr(self, key) 217 except AttributeError: 218 pass 219 return v 220 def iteritems(self): 221 for key in self: 222 yield key, self[key] 223 def items(self): 224 return [(k, self[k]) for k in iter(self)] 225 def keys(self): 226 return list(iter(self)) 227 def __iter__(self): 228 return (key for key in self.__slots__ if hasattr(self, key)) 229 def __len__(self): 230 return len(list(self.iteritems())) 231 def __eq__(self, other): 232 return self.items() == other.items() 233 def __neq__(self, other): 234 return self.items() != other.items() 235 236 @classmethod 237 def create(cls, allowed_keys): 238 allowed_keys_tuple = tuple(('_reserved_' + k if k in cls._special_fields else k) for k in allowed_keys) 239 allowed_keys = frozenset(allowed_keys_tuple) 240 if allowed_keys not in cls._classes: 241 class SpecificStrictDict(cls): 242 __slots__ = allowed_keys_tuple 243 def __repr__(self): 244 return "{%s}" % ', '.join('"{0!s}": {0!r}'.format(k,v) for (k,v) in self.iteritems()) 245 cls._classes[allowed_keys] = SpecificStrictDict 246 return cls._classes[allowed_keys] 247 248 249 class SemiStrictDict(StrictDict): 250 __slots__ = ('_extras') 251 _classes = {} 252 def __getattr__(self, attr): 253 try: 254 super(SemiStrictDict, self).__getattr__(attr) 255 except AttributeError: 256 try: 257 return self.__getattribute__('_extras')[attr] 258 except KeyError as e: 259 raise AttributeError(e) 260 def __setattr__(self, attr, value): 261 try: 262 super(SemiStrictDict, self).__setattr__(attr, value) 263 except AttributeError: 264 try: 265 self._extras[attr] = value 266 except AttributeError: 267 self._extras = {attr: value} 268 269 def __delattr__(self, attr): 270 try: 271 super(SemiStrictDict, self).__delattr__(attr) 272 except AttributeError: 273 try: 274 del self._extras[attr] 275 except KeyError as e: 276 raise AttributeError(e) 277 278 def __iter__(self): 279 try: 280 extras_iter = iter(self.__getattribute__('_extras')) 281 except AttributeError: 282 extras_iter = () 283 return itertools.chain(super(SemiStrictDict, self).__iter__(), extras_iter) 284 [end of mongoengine/base/datastructures.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mongoengine/base/datastructures.py b/mongoengine/base/datastructures.py --- a/mongoengine/base/datastructures.py +++ b/mongoengine/base/datastructures.py @@ -75,6 +75,10 @@ self._mark_as_changed() return super(BaseDict, self).popitem(*args, **kwargs) + def setdefault(self, *args, **kwargs): + self._mark_as_changed() + return super(BaseDict, self).setdefault(*args, **kwargs) + def update(self, *args, **kwargs): self._mark_as_changed() return super(BaseDict, self).update(*args, **kwargs)
{"golden_diff": "diff --git a/mongoengine/base/datastructures.py b/mongoengine/base/datastructures.py\n--- a/mongoengine/base/datastructures.py\n+++ b/mongoengine/base/datastructures.py\n@@ -75,6 +75,10 @@\n self._mark_as_changed()\n return super(BaseDict, self).popitem(*args, **kwargs)\n \n+ def setdefault(self, *args, **kwargs):\n+ self._mark_as_changed()\n+ return super(BaseDict, self).setdefault(*args, **kwargs)\n+\n def update(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).update(*args, **kwargs)\n", "issue": "`BaseDict` does not follow `setdefault`\n`mongoengine.base.datastructures.BaseDict` does not follow changes made through `setdefault`.\n\nI have a DictField in a model:\n\n``` python\n success_rates = DictField()\n```\n\nI update the field using `serdefault` and the changes are not saved:\n\n``` python\n user_state.success_rates.setdefault(topic_area_group_id, {}).setdefault(\n task_date, {})[str(task_id)] = action.data['is_solved']\n```\n\nI currently do this:\n\n``` python\n user_state._changed_fields.append('success_rates')\n```\n\n`BaseDict` does not follow `setdefault`\n`mongoengine.base.datastructures.BaseDict` does not follow changes made through `setdefault`.\n\nI have a DictField in a model:\n\n``` python\n success_rates = DictField()\n```\n\nI update the field using `serdefault` and the changes are not saved:\n\n``` python\n user_state.success_rates.setdefault(topic_area_group_id, {}).setdefault(\n task_date, {})[str(task_id)] = action.data['is_solved']\n```\n\nI currently do this:\n\n``` python\n user_state._changed_fields.append('success_rates')\n```\n\n", "before_files": [{"content": "import weakref\nimport functools\nimport itertools\nfrom mongoengine.common import _import_class\n\n__all__ = (\"BaseDict\", \"BaseList\")\n\n\nclass BaseDict(dict):\n \"\"\"A special dict so we can watch any changes\"\"\"\n\n _dereferenced = False\n _instance = None\n _name = None\n\n def __init__(self, dict_items, instance, name):\n Document = _import_class('Document')\n EmbeddedDocument = _import_class('EmbeddedDocument')\n\n if isinstance(instance, (Document, EmbeddedDocument)):\n self._instance = weakref.proxy(instance)\n self._name = name\n return super(BaseDict, self).__init__(dict_items)\n\n def __getitem__(self, key, *args, **kwargs):\n value = super(BaseDict, self).__getitem__(key)\n\n EmbeddedDocument = _import_class('EmbeddedDocument')\n if isinstance(value, EmbeddedDocument) and value._instance is None:\n value._instance = self._instance\n elif not isinstance(value, BaseDict) and isinstance(value, dict):\n value = BaseDict(value, None, '%s.%s' % (self._name, key))\n super(BaseDict, self).__setitem__(key, value)\n value._instance = self._instance\n elif not isinstance(value, BaseList) and isinstance(value, list):\n value = BaseList(value, None, '%s.%s' % (self._name, key))\n super(BaseDict, self).__setitem__(key, value)\n value._instance = self._instance\n return value\n\n def __setitem__(self, key, value, *args, **kwargs):\n self._mark_as_changed(key)\n return super(BaseDict, self).__setitem__(key, value)\n\n def __delete__(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).__delete__(*args, **kwargs)\n\n def __delitem__(self, key, *args, **kwargs):\n self._mark_as_changed(key)\n return super(BaseDict, self).__delitem__(key)\n\n def __delattr__(self, key, *args, **kwargs):\n self._mark_as_changed(key)\n return super(BaseDict, self).__delattr__(key)\n\n def __getstate__(self):\n self.instance = None\n self._dereferenced = False\n return self\n\n def __setstate__(self, state):\n self = state\n return self\n\n def clear(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).clear(*args, **kwargs)\n\n def pop(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).pop(*args, **kwargs)\n\n def popitem(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).popitem(*args, **kwargs)\n\n def update(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).update(*args, **kwargs)\n\n def _mark_as_changed(self, key=None):\n if hasattr(self._instance, '_mark_as_changed'):\n if key:\n self._instance._mark_as_changed('%s.%s' % (self._name, key))\n else:\n self._instance._mark_as_changed(self._name)\n\n\nclass BaseList(list):\n \"\"\"A special list so we can watch any changes\n \"\"\"\n\n _dereferenced = False\n _instance = None\n _name = None\n\n def __init__(self, list_items, instance, name):\n Document = _import_class('Document')\n EmbeddedDocument = _import_class('EmbeddedDocument')\n\n if isinstance(instance, (Document, EmbeddedDocument)):\n self._instance = weakref.proxy(instance)\n self._name = name\n return super(BaseList, self).__init__(list_items)\n\n def __getitem__(self, key, *args, **kwargs):\n value = super(BaseList, self).__getitem__(key)\n\n EmbeddedDocument = _import_class('EmbeddedDocument')\n if isinstance(value, EmbeddedDocument) and value._instance is None:\n value._instance = self._instance\n elif not isinstance(value, BaseDict) and isinstance(value, dict):\n value = BaseDict(value, None, '%s.%s' % (self._name, key))\n super(BaseList, self).__setitem__(key, value)\n value._instance = self._instance\n elif not isinstance(value, BaseList) and isinstance(value, list):\n value = BaseList(value, None, '%s.%s' % (self._name, key))\n super(BaseList, self).__setitem__(key, value)\n value._instance = self._instance\n return value\n\n def __setitem__(self, key, value, *args, **kwargs):\n if isinstance(key, slice):\n self._mark_as_changed()\n else:\n self._mark_as_changed(key)\n return super(BaseList, self).__setitem__(key, value)\n\n def __delitem__(self, key, *args, **kwargs):\n if isinstance(key, slice):\n self._mark_as_changed()\n else:\n self._mark_as_changed(key)\n return super(BaseList, self).__delitem__(key)\n\n def __setslice__(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).__setslice__(*args, **kwargs)\n\n def __delslice__(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).__delslice__(*args, **kwargs)\n\n def __getstate__(self):\n self.instance = None\n self._dereferenced = False\n return self\n\n def __setstate__(self, state):\n self = state\n return self\n\n def append(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).append(*args, **kwargs)\n\n def extend(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).extend(*args, **kwargs)\n\n def insert(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).insert(*args, **kwargs)\n\n def pop(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).pop(*args, **kwargs)\n\n def remove(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).remove(*args, **kwargs)\n\n def reverse(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).reverse(*args, **kwargs)\n\n def sort(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).sort(*args, **kwargs)\n\n def _mark_as_changed(self, key=None):\n if hasattr(self._instance, '_mark_as_changed'):\n if key:\n self._instance._mark_as_changed('%s.%s' % (self._name, key))\n else:\n self._instance._mark_as_changed(self._name)\n\n\nclass StrictDict(object):\n __slots__ = ()\n _special_fields = set(['get', 'pop', 'iteritems', 'items', 'keys', 'create'])\n _classes = {}\n def __init__(self, **kwargs):\n for k,v in kwargs.iteritems():\n setattr(self, k, v)\n def __getitem__(self, key):\n key = '_reserved_' + key if key in self._special_fields else key\n try:\n return getattr(self, key)\n except AttributeError:\n raise KeyError(key)\n def __setitem__(self, key, value):\n key = '_reserved_' + key if key in self._special_fields else key\n return setattr(self, key, value)\n def __contains__(self, key):\n return hasattr(self, key)\n def get(self, key, default=None):\n try:\n return self[key]\n except KeyError:\n return default\n def pop(self, key, default=None):\n v = self.get(key, default)\n try:\n delattr(self, key)\n except AttributeError:\n pass\n return v\n def iteritems(self):\n for key in self:\n yield key, self[key]\n def items(self):\n return [(k, self[k]) for k in iter(self)]\n def keys(self):\n return list(iter(self))\n def __iter__(self):\n return (key for key in self.__slots__ if hasattr(self, key))\n def __len__(self):\n return len(list(self.iteritems()))\n def __eq__(self, other):\n return self.items() == other.items()\n def __neq__(self, other):\n return self.items() != other.items()\n\n @classmethod\n def create(cls, allowed_keys):\n allowed_keys_tuple = tuple(('_reserved_' + k if k in cls._special_fields else k) for k in allowed_keys)\n allowed_keys = frozenset(allowed_keys_tuple)\n if allowed_keys not in cls._classes:\n class SpecificStrictDict(cls):\n __slots__ = allowed_keys_tuple\n def __repr__(self):\n return \"{%s}\" % ', '.join('\"{0!s}\": {0!r}'.format(k,v) for (k,v) in self.iteritems())\n cls._classes[allowed_keys] = SpecificStrictDict\n return cls._classes[allowed_keys]\n\n\nclass SemiStrictDict(StrictDict):\n __slots__ = ('_extras')\n _classes = {}\n def __getattr__(self, attr):\n try:\n super(SemiStrictDict, self).__getattr__(attr)\n except AttributeError:\n try:\n return self.__getattribute__('_extras')[attr]\n except KeyError as e:\n raise AttributeError(e)\n def __setattr__(self, attr, value):\n try:\n super(SemiStrictDict, self).__setattr__(attr, value)\n except AttributeError:\n try:\n self._extras[attr] = value\n except AttributeError:\n self._extras = {attr: value}\n\n def __delattr__(self, attr):\n try:\n super(SemiStrictDict, self).__delattr__(attr)\n except AttributeError:\n try:\n del self._extras[attr]\n except KeyError as e:\n raise AttributeError(e)\n\n def __iter__(self):\n try:\n extras_iter = iter(self.__getattribute__('_extras'))\n except AttributeError:\n extras_iter = ()\n return itertools.chain(super(SemiStrictDict, self).__iter__(), extras_iter)\n", "path": "mongoengine/base/datastructures.py"}]}
3,870
149
gh_patches_debug_890
rasdani/github-patches
git_diff
falconry__falcon-801
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Default OPTIONS responder does not set Content-Length to "0" Per RFC 7231: > A server MUST generate a Content-Length field with a value of "0" if no payload body is to be sent in the response. </issue> <code> [start of falcon/responders.py] 1 # Copyright 2013 by Rackspace Hosting, Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from falcon.errors import HTTPBadRequest 16 from falcon.errors import HTTPNotFound 17 from falcon.status_codes import HTTP_204 18 from falcon.status_codes import HTTP_405 19 20 21 def path_not_found(req, resp, **kwargs): 22 """Raise 404 HTTPNotFound error""" 23 raise HTTPNotFound() 24 25 26 def bad_request(req, resp, **kwargs): 27 """Raise 400 HTTPBadRequest error""" 28 raise HTTPBadRequest('Bad request', 'Invalid HTTP method') 29 30 31 def create_method_not_allowed(allowed_methods): 32 """Creates a responder for "405 Method Not Allowed" 33 34 Args: 35 allowed_methods: A list of HTTP methods (uppercase) that should be 36 returned in the Allow header. 37 38 """ 39 allowed = ', '.join(allowed_methods) 40 41 def method_not_allowed(req, resp, **kwargs): 42 resp.status = HTTP_405 43 resp.set_header('Allow', allowed) 44 45 return method_not_allowed 46 47 48 def create_default_options(allowed_methods): 49 """Creates a default responder for the OPTIONS method 50 51 Args: 52 allowed_methods: A list of HTTP methods (uppercase) that should be 53 returned in the Allow header. 54 55 """ 56 allowed = ', '.join(allowed_methods) 57 58 def on_options(req, resp, **kwargs): 59 resp.status = HTTP_204 60 resp.set_header('Allow', allowed) 61 62 return on_options 63 [end of falcon/responders.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/falcon/responders.py b/falcon/responders.py --- a/falcon/responders.py +++ b/falcon/responders.py @@ -58,5 +58,6 @@ def on_options(req, resp, **kwargs): resp.status = HTTP_204 resp.set_header('Allow', allowed) + resp.set_header('Content-Length', '0') return on_options
{"golden_diff": "diff --git a/falcon/responders.py b/falcon/responders.py\n--- a/falcon/responders.py\n+++ b/falcon/responders.py\n@@ -58,5 +58,6 @@\n def on_options(req, resp, **kwargs):\n resp.status = HTTP_204\n resp.set_header('Allow', allowed)\n+ resp.set_header('Content-Length', '0')\n \n return on_options\n", "issue": "Default OPTIONS responder does not set Content-Length to \"0\"\nPer RFC 7231:\n\n> A server MUST generate a Content-Length field with a value of \"0\" if no payload body is to be sent in the response.\n\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom falcon.errors import HTTPBadRequest\nfrom falcon.errors import HTTPNotFound\nfrom falcon.status_codes import HTTP_204\nfrom falcon.status_codes import HTTP_405\n\n\ndef path_not_found(req, resp, **kwargs):\n \"\"\"Raise 404 HTTPNotFound error\"\"\"\n raise HTTPNotFound()\n\n\ndef bad_request(req, resp, **kwargs):\n \"\"\"Raise 400 HTTPBadRequest error\"\"\"\n raise HTTPBadRequest('Bad request', 'Invalid HTTP method')\n\n\ndef create_method_not_allowed(allowed_methods):\n \"\"\"Creates a responder for \"405 Method Not Allowed\"\n\n Args:\n allowed_methods: A list of HTTP methods (uppercase) that should be\n returned in the Allow header.\n\n \"\"\"\n allowed = ', '.join(allowed_methods)\n\n def method_not_allowed(req, resp, **kwargs):\n resp.status = HTTP_405\n resp.set_header('Allow', allowed)\n\n return method_not_allowed\n\n\ndef create_default_options(allowed_methods):\n \"\"\"Creates a default responder for the OPTIONS method\n\n Args:\n allowed_methods: A list of HTTP methods (uppercase) that should be\n returned in the Allow header.\n\n \"\"\"\n allowed = ', '.join(allowed_methods)\n\n def on_options(req, resp, **kwargs):\n resp.status = HTTP_204\n resp.set_header('Allow', allowed)\n\n return on_options\n", "path": "falcon/responders.py"}]}
1,143
92
gh_patches_debug_8506
rasdani/github-patches
git_diff
nipy__nipype-2422
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ICC: Cannot set the undefined 'sessions_F_map' attribute of a 'ICCOutputSpec' object ### Summary Trying to run ICC raises a `TraitError` with the following description: "TraitError: Cannot set the undefined 'sessions_F_map' attribute of a 'ICCOutputSpec' object." I may be doing things wrong, in which case a more informative error message would be welcome. Please look at the code snippet under _Script/Workflow Details_. _Note:_ I do get maps (icc_map.nii, session_var_map.nii, subject_var_map.nii) as an output in my `os.getcwd()`. ### Actual behavior ICC triggers an exception. ### Expected behavior ICC runs smoothly. ### How to replicate the behavior ```bash mkdir /Users/user/test mv mask.nii.gz sub-{01,02}_ses-{01,02}.nii.gz /Users/user/test # Where these niftis are sensible "ICC compatible" files # And now run code pasted below ``` ### Script/Workflow details Please put URL to code or code here (if not too long). ```python import os.path from nipype.algorithms import icc project_dir = '/Users/user/test/' def fname(sub, ses): return os.path.join(project_dir, f'sub-{sub}_ses-{ses}.nii.gz') lst = [[fname(1, 1), fname(1, 2)], [fname(2, 1), fname(2, 2)]] mask = os.path.join(project_dir, 'mask.nii.gz') x = icc.ICC(subjects_sessions=lst, mask=mask) x.run() ``` ### Platform details: Please paste the output of: `python -c "import nipype; print(nipype.get_info()); print(nipype.__version__)"` ```python {'commit_hash': '%h', 'commit_source': 'archive substitution', 'networkx_version': '2.0', 'nibabel_version': '2.2.1', 'nipype_version': '1.0.0', 'numpy_version': '1.13.3', 'pkg_path': '/Users/user/anaconda3/lib/python3.6/site-packages/nipype', 'scipy_version': '1.0.0', 'sys_executable': '/Users/user/anaconda3/bin/python', 'sys_platform': 'darwin', 'sys_version': '3.6.4 | packaged by conda-forge | (default, Dec 23 2017, 16:54:01) \n[GCC 4.2.1 Compatible Apple LLVM 6.1.0 (clang-602.0.53)]', 'traits_version': '4.6.0'} ``` ### Execution environment Choose one - My python environment outside container </issue> <code> [start of nipype/algorithms/icc.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import (print_function, division, unicode_literals, 3 absolute_import) 4 from builtins import range 5 import os 6 import numpy as np 7 from numpy import ones, kron, mean, eye, hstack, dot, tile 8 import nibabel as nb 9 from scipy.linalg import pinv 10 from ..interfaces.base import BaseInterfaceInputSpec, TraitedSpec, \ 11 BaseInterface, traits, File 12 from ..utils import NUMPY_MMAP 13 14 15 class ICCInputSpec(BaseInterfaceInputSpec): 16 subjects_sessions = traits.List( 17 traits.List(File(exists=True)), 18 desc="n subjects m sessions 3D stat files", 19 mandatory=True) 20 mask = File(exists=True, mandatory=True) 21 22 23 class ICCOutputSpec(TraitedSpec): 24 icc_map = File(exists=True) 25 session_var_map = File(exists=True, desc="variance between sessions") 26 subject_var_map = File(exists=True, desc="variance between subjects") 27 28 29 class ICC(BaseInterface): 30 ''' 31 Calculates Interclass Correlation Coefficient (3,1) as defined in 32 P. E. Shrout & Joseph L. Fleiss (1979). "Intraclass Correlations: Uses in 33 Assessing Rater Reliability". Psychological Bulletin 86 (2): 420-428. This 34 particular implementation is aimed at relaibility (test-retest) studies. 35 ''' 36 input_spec = ICCInputSpec 37 output_spec = ICCOutputSpec 38 39 def _run_interface(self, runtime): 40 maskdata = nb.load(self.inputs.mask).get_data() 41 maskdata = np.logical_not( 42 np.logical_or(maskdata == 0, np.isnan(maskdata))) 43 44 session_datas = [[ 45 nb.load(fname, mmap=NUMPY_MMAP).get_data()[maskdata].reshape( 46 -1, 1) for fname in sessions 47 ] for sessions in self.inputs.subjects_sessions] 48 list_of_sessions = [ 49 np.dstack(session_data) for session_data in session_datas 50 ] 51 all_data = np.hstack(list_of_sessions) 52 icc = np.zeros(session_datas[0][0].shape) 53 session_F = np.zeros(session_datas[0][0].shape) 54 session_var = np.zeros(session_datas[0][0].shape) 55 subject_var = np.zeros(session_datas[0][0].shape) 56 57 for x in range(icc.shape[0]): 58 Y = all_data[x, :, :] 59 icc[x], subject_var[x], session_var[x], session_F[ 60 x], _, _ = ICC_rep_anova(Y) 61 62 nim = nb.load(self.inputs.subjects_sessions[0][0]) 63 new_data = np.zeros(nim.shape) 64 new_data[maskdata] = icc.reshape(-1, ) 65 new_img = nb.Nifti1Image(new_data, nim.affine, nim.header) 66 nb.save(new_img, 'icc_map.nii') 67 68 new_data = np.zeros(nim.shape) 69 new_data[maskdata] = session_var.reshape(-1, ) 70 new_img = nb.Nifti1Image(new_data, nim.affine, nim.header) 71 nb.save(new_img, 'session_var_map.nii') 72 73 new_data = np.zeros(nim.shape) 74 new_data[maskdata] = subject_var.reshape(-1, ) 75 new_img = nb.Nifti1Image(new_data, nim.affine, nim.header) 76 nb.save(new_img, 'subject_var_map.nii') 77 78 return runtime 79 80 def _list_outputs(self): 81 outputs = self._outputs().get() 82 outputs['icc_map'] = os.path.abspath('icc_map.nii') 83 outputs['sessions_F_map'] = os.path.abspath('sessions_F_map.nii') 84 outputs['session_var_map'] = os.path.abspath('session_var_map.nii') 85 outputs['subject_var_map'] = os.path.abspath('subject_var_map.nii') 86 return outputs 87 88 89 def ICC_rep_anova(Y): 90 ''' 91 the data Y are entered as a 'table' ie subjects are in rows and repeated 92 measures in columns 93 94 One Sample Repeated measure ANOVA 95 96 Y = XB + E with X = [FaTor / Subjects] 97 ''' 98 99 [nb_subjects, nb_conditions] = Y.shape 100 dfc = nb_conditions - 1 101 dfe = (nb_subjects - 1) * dfc 102 dfr = nb_subjects - 1 103 104 # Compute the repeated measure effect 105 # ------------------------------------ 106 107 # Sum Square Total 108 mean_Y = mean(Y) 109 SST = ((Y - mean_Y)**2).sum() 110 111 # create the design matrix for the different levels 112 x = kron(eye(nb_conditions), ones((nb_subjects, 1))) # sessions 113 x0 = tile(eye(nb_subjects), (nb_conditions, 1)) # subjects 114 X = hstack([x, x0]) 115 116 # Sum Square Error 117 predicted_Y = dot(dot(dot(X, pinv(dot(X.T, X))), X.T), Y.flatten('F')) 118 residuals = Y.flatten('F') - predicted_Y 119 SSE = (residuals**2).sum() 120 121 residuals.shape = Y.shape 122 123 MSE = SSE / dfe 124 125 # Sum square session effect - between colums/sessions 126 SSC = ((mean(Y, 0) - mean_Y)**2).sum() * nb_subjects 127 MSC = SSC / dfc / nb_subjects 128 129 session_effect_F = MSC / MSE 130 131 # Sum Square subject effect - between rows/subjects 132 SSR = SST - SSC - SSE 133 MSR = SSR / dfr 134 135 # ICC(3,1) = (mean square subjeT - mean square error) / 136 # (mean square subjeT + (k-1)*-mean square error) 137 ICC = (MSR - MSE) / (MSR + dfc * MSE) 138 139 e_var = MSE # variance of error 140 r_var = (MSR - MSE) / nb_conditions # variance between subjects 141 142 return ICC, r_var, e_var, session_effect_F, dfc, dfe 143 [end of nipype/algorithms/icc.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nipype/algorithms/icc.py b/nipype/algorithms/icc.py --- a/nipype/algorithms/icc.py +++ b/nipype/algorithms/icc.py @@ -80,7 +80,6 @@ def _list_outputs(self): outputs = self._outputs().get() outputs['icc_map'] = os.path.abspath('icc_map.nii') - outputs['sessions_F_map'] = os.path.abspath('sessions_F_map.nii') outputs['session_var_map'] = os.path.abspath('session_var_map.nii') outputs['subject_var_map'] = os.path.abspath('subject_var_map.nii') return outputs
{"golden_diff": "diff --git a/nipype/algorithms/icc.py b/nipype/algorithms/icc.py\n--- a/nipype/algorithms/icc.py\n+++ b/nipype/algorithms/icc.py\n@@ -80,7 +80,6 @@\n def _list_outputs(self):\n outputs = self._outputs().get()\n outputs['icc_map'] = os.path.abspath('icc_map.nii')\n- outputs['sessions_F_map'] = os.path.abspath('sessions_F_map.nii')\n outputs['session_var_map'] = os.path.abspath('session_var_map.nii')\n outputs['subject_var_map'] = os.path.abspath('subject_var_map.nii')\n return outputs\n", "issue": "ICC: Cannot set the undefined 'sessions_F_map' attribute of a 'ICCOutputSpec' object\n### Summary\r\n\r\nTrying to run ICC raises a `TraitError` with the following description: \"TraitError: Cannot set the undefined 'sessions_F_map' attribute of a 'ICCOutputSpec' object.\"\r\nI may be doing things wrong, in which case a more informative error message would be welcome. Please look at the code snippet under _Script/Workflow Details_.\r\n\r\n_Note:_ I do get maps (icc_map.nii, session_var_map.nii, subject_var_map.nii) as an output in my `os.getcwd()`.\r\n\r\n### Actual behavior\r\nICC triggers an exception.\r\n\r\n### Expected behavior\r\nICC runs smoothly.\r\n\r\n### How to replicate the behavior\r\n```bash\r\nmkdir /Users/user/test\r\nmv mask.nii.gz sub-{01,02}_ses-{01,02}.nii.gz /Users/user/test\r\n# Where these niftis are sensible \"ICC compatible\" files\r\n# And now run code pasted below\r\n```\r\n### Script/Workflow details\r\n\r\nPlease put URL to code or code here (if not too long).\r\n```python\r\nimport os.path\r\nfrom nipype.algorithms import icc\r\n\r\nproject_dir = '/Users/user/test/'\r\n\r\ndef fname(sub, ses):\r\n return os.path.join(project_dir, f'sub-{sub}_ses-{ses}.nii.gz')\r\n\r\nlst = [[fname(1, 1), fname(1, 2)], \r\n [fname(2, 1), fname(2, 2)]]\r\nmask = os.path.join(project_dir, 'mask.nii.gz')\r\n\r\nx = icc.ICC(subjects_sessions=lst, mask=mask)\r\nx.run()\r\n```\r\n\r\n### Platform details:\r\n\r\nPlease paste the output of: `python -c \"import nipype; print(nipype.get_info()); print(nipype.__version__)\"`\r\n\r\n```python\r\n{'commit_hash': '%h',\r\n 'commit_source': 'archive substitution',\r\n 'networkx_version': '2.0',\r\n 'nibabel_version': '2.2.1',\r\n 'nipype_version': '1.0.0',\r\n 'numpy_version': '1.13.3',\r\n 'pkg_path': '/Users/user/anaconda3/lib/python3.6/site-packages/nipype',\r\n 'scipy_version': '1.0.0',\r\n 'sys_executable': '/Users/user/anaconda3/bin/python',\r\n 'sys_platform': 'darwin',\r\n 'sys_version': '3.6.4 | packaged by conda-forge | (default, Dec 23 2017, 16:54:01) \\n[GCC 4.2.1 Compatible Apple LLVM 6.1.0 (clang-602.0.53)]',\r\n 'traits_version': '4.6.0'}\r\n\r\n```\r\n\r\n### Execution environment\r\n\r\nChoose one\r\n- My python environment outside container\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import (print_function, division, unicode_literals,\n absolute_import)\nfrom builtins import range\nimport os\nimport numpy as np\nfrom numpy import ones, kron, mean, eye, hstack, dot, tile\nimport nibabel as nb\nfrom scipy.linalg import pinv\nfrom ..interfaces.base import BaseInterfaceInputSpec, TraitedSpec, \\\n BaseInterface, traits, File\nfrom ..utils import NUMPY_MMAP\n\n\nclass ICCInputSpec(BaseInterfaceInputSpec):\n subjects_sessions = traits.List(\n traits.List(File(exists=True)),\n desc=\"n subjects m sessions 3D stat files\",\n mandatory=True)\n mask = File(exists=True, mandatory=True)\n\n\nclass ICCOutputSpec(TraitedSpec):\n icc_map = File(exists=True)\n session_var_map = File(exists=True, desc=\"variance between sessions\")\n subject_var_map = File(exists=True, desc=\"variance between subjects\")\n\n\nclass ICC(BaseInterface):\n '''\n Calculates Interclass Correlation Coefficient (3,1) as defined in\n P. E. Shrout & Joseph L. Fleiss (1979). \"Intraclass Correlations: Uses in\n Assessing Rater Reliability\". Psychological Bulletin 86 (2): 420-428. This\n particular implementation is aimed at relaibility (test-retest) studies.\n '''\n input_spec = ICCInputSpec\n output_spec = ICCOutputSpec\n\n def _run_interface(self, runtime):\n maskdata = nb.load(self.inputs.mask).get_data()\n maskdata = np.logical_not(\n np.logical_or(maskdata == 0, np.isnan(maskdata)))\n\n session_datas = [[\n nb.load(fname, mmap=NUMPY_MMAP).get_data()[maskdata].reshape(\n -1, 1) for fname in sessions\n ] for sessions in self.inputs.subjects_sessions]\n list_of_sessions = [\n np.dstack(session_data) for session_data in session_datas\n ]\n all_data = np.hstack(list_of_sessions)\n icc = np.zeros(session_datas[0][0].shape)\n session_F = np.zeros(session_datas[0][0].shape)\n session_var = np.zeros(session_datas[0][0].shape)\n subject_var = np.zeros(session_datas[0][0].shape)\n\n for x in range(icc.shape[0]):\n Y = all_data[x, :, :]\n icc[x], subject_var[x], session_var[x], session_F[\n x], _, _ = ICC_rep_anova(Y)\n\n nim = nb.load(self.inputs.subjects_sessions[0][0])\n new_data = np.zeros(nim.shape)\n new_data[maskdata] = icc.reshape(-1, )\n new_img = nb.Nifti1Image(new_data, nim.affine, nim.header)\n nb.save(new_img, 'icc_map.nii')\n\n new_data = np.zeros(nim.shape)\n new_data[maskdata] = session_var.reshape(-1, )\n new_img = nb.Nifti1Image(new_data, nim.affine, nim.header)\n nb.save(new_img, 'session_var_map.nii')\n\n new_data = np.zeros(nim.shape)\n new_data[maskdata] = subject_var.reshape(-1, )\n new_img = nb.Nifti1Image(new_data, nim.affine, nim.header)\n nb.save(new_img, 'subject_var_map.nii')\n\n return runtime\n\n def _list_outputs(self):\n outputs = self._outputs().get()\n outputs['icc_map'] = os.path.abspath('icc_map.nii')\n outputs['sessions_F_map'] = os.path.abspath('sessions_F_map.nii')\n outputs['session_var_map'] = os.path.abspath('session_var_map.nii')\n outputs['subject_var_map'] = os.path.abspath('subject_var_map.nii')\n return outputs\n\n\ndef ICC_rep_anova(Y):\n '''\n the data Y are entered as a 'table' ie subjects are in rows and repeated\n measures in columns\n\n One Sample Repeated measure ANOVA\n\n Y = XB + E with X = [FaTor / Subjects]\n '''\n\n [nb_subjects, nb_conditions] = Y.shape\n dfc = nb_conditions - 1\n dfe = (nb_subjects - 1) * dfc\n dfr = nb_subjects - 1\n\n # Compute the repeated measure effect\n # ------------------------------------\n\n # Sum Square Total\n mean_Y = mean(Y)\n SST = ((Y - mean_Y)**2).sum()\n\n # create the design matrix for the different levels\n x = kron(eye(nb_conditions), ones((nb_subjects, 1))) # sessions\n x0 = tile(eye(nb_subjects), (nb_conditions, 1)) # subjects\n X = hstack([x, x0])\n\n # Sum Square Error\n predicted_Y = dot(dot(dot(X, pinv(dot(X.T, X))), X.T), Y.flatten('F'))\n residuals = Y.flatten('F') - predicted_Y\n SSE = (residuals**2).sum()\n\n residuals.shape = Y.shape\n\n MSE = SSE / dfe\n\n # Sum square session effect - between colums/sessions\n SSC = ((mean(Y, 0) - mean_Y)**2).sum() * nb_subjects\n MSC = SSC / dfc / nb_subjects\n\n session_effect_F = MSC / MSE\n\n # Sum Square subject effect - between rows/subjects\n SSR = SST - SSC - SSE\n MSR = SSR / dfr\n\n # ICC(3,1) = (mean square subjeT - mean square error) /\n # (mean square subjeT + (k-1)*-mean square error)\n ICC = (MSR - MSE) / (MSR + dfc * MSE)\n\n e_var = MSE # variance of error\n r_var = (MSR - MSE) / nb_conditions # variance between subjects\n\n return ICC, r_var, e_var, session_effect_F, dfc, dfe\n", "path": "nipype/algorithms/icc.py"}]}
2,816
143
gh_patches_debug_13547
rasdani/github-patches
git_diff
kartoza__prj.app-263
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Disqus functionality is currently broken There should be disqus inline chat widgets on each version page and each entry page. Currently these are not working - can we work to fix it. </issue> <code> [start of django_project/core/settings/project.py] 1 # coding=utf-8 2 3 """Project level settings. 4 5 Adjust these values as needed but don't commit passwords etc. to any public 6 repository! 7 """ 8 9 import os # noqa 10 from django.utils.translation import ugettext_lazy as _ 11 from .utils import absolute_path 12 from .contrib import * # noqa 13 14 # Project apps 15 INSTALLED_APPS += ( 16 'base', 17 'changes', 18 'github_issue', 19 'vota', 20 'disqus', 21 ) 22 23 # Due to profile page does not available, this will redirect to home page after login 24 LOGIN_REDIRECT_URL = '/' 25 26 # How many versions to list in each project box 27 PROJECT_VERSION_LIST_SIZE = 10 28 29 # Set debug to false for production 30 DEBUG = TEMPLATE_DEBUG = False 31 32 SOUTH_TESTS_MIGRATE = False 33 34 35 # Set languages which want to be translated 36 LANGUAGES = ( 37 ('en', _('English')), 38 ('af', _('Afrikaans')), 39 ('id', _('Indonesian')), 40 ('ko', _('Korean')), 41 ) 42 43 # Set storage path for the translation files 44 LOCALE_PATHS = (absolute_path('locale'),) 45 46 47 MIDDLEWARE_CLASSES = ( 48 # For nav bar generation 49 'core.custom_middleware.NavContextMiddleware', 50 ) + MIDDLEWARE_CLASSES 51 52 # Project specific javascript files to be pipelined 53 # For third party libs like jquery should go in contrib.py 54 PIPELINE_JS['project'] = { 55 'source_filenames': ( 56 'js/csrf-ajax.js', 57 'js/changelog.js', 58 'js/github-issue.js' 59 ), 60 'output_filename': 'js/project.js', 61 } 62 63 # Project specific css files to be pipelined 64 # For third party libs like bootstrap should go in contrib.py 65 PIPELINE_CSS['project'] = { 66 'source_filenames': ( 67 'css/changelog.css', 68 ), 69 'output_filename': 'css/project.css', 70 'extra_context': { 71 'media': 'screen, projection', 72 }, 73 } 74 [end of django_project/core/settings/project.py] [start of django_project/core/settings/contrib.py] 1 # coding=utf-8 2 """ 3 core.settings.contrib 4 """ 5 from .base import * # noqa 6 7 # Extra installed apps - grapelli needs to be added before others 8 INSTALLED_APPS = ( 9 'grappelli', 10 ) + INSTALLED_APPS 11 12 INSTALLED_APPS += ( 13 'raven.contrib.django.raven_compat', # enable Raven plugin 14 'crispy_forms', 15 'widget_tweaks', # lets us add some bootstrap css to form elements 16 'easy_thumbnails', 17 'reversion', 18 'rosetta', 19 'embed_video', 20 'django_hashedfilenamestorage', 21 'django_countries', # for sponsor addresses 22 # 'user_map', 23 ) 24 25 26 MIGRATION_MODULES = {'accounts': 'core.migration'} 27 28 GRAPPELLI_ADMIN_TITLE = 'Site administration panel' 29 30 STOP_WORDS = ( 31 'a', 'an', 'and', 'if', 'is', 'the', 'in', 'i', 'you', 'other', 32 'this', 'that' 33 ) 34 35 CRISPY_TEMPLATE_PACK = 'bootstrap3' 36 37 # Easy-thumbnails options 38 THUMBNAIL_SUBDIR = 'thumbnails' 39 THUMBNAIL_ALIASES = { 40 '': { 41 'entry': {'size': (50, 50), 'crop': True}, 42 'medium-entry': {'size': (100, 100), 'crop': True}, 43 'large-entry': {'size': (400, 300), 'crop': True}, 44 'thumb300x200': {'size': (300, 200), 'crop': True}, 45 }, 46 } 47 48 # Pipeline related settings 49 50 INSTALLED_APPS += ( 51 'pipeline',) 52 53 MIDDLEWARE_CLASSES += ( 54 # For rosetta localisation 55 'django.middleware.locale.LocaleMiddleware', 56 ) 57 58 DEFAULT_FILE_STORAGE = ( 59 'django_hashedfilenamestorage.storage.HashedFilenameFileSystemStorage') 60 61 # use underscore template function 62 PIPELINE_TEMPLATE_FUNC = '_.template' 63 64 # enable cached storage - requires uglify.js (node.js) 65 STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage' 66 67 # Contributed / third party js libs for pipeline compression 68 # For hand rolled js for this app, use project.py 69 PIPELINE_JS = {} 70 71 # Contributed / third party css for pipeline compression 72 # For hand rolled css for this app, use project.py 73 PIPELINE_CSS = {} 74 75 # These get enabled in prod.py 76 PIPELINE_ENABLED = False 77 PIPELINE_CSS_COMPRESSOR = None 78 PIPELINE_JS_COMPRESSOR = None 79 [end of django_project/core/settings/contrib.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django_project/core/settings/contrib.py b/django_project/core/settings/contrib.py --- a/django_project/core/settings/contrib.py +++ b/django_project/core/settings/contrib.py @@ -20,8 +20,12 @@ 'django_hashedfilenamestorage', 'django_countries', # for sponsor addresses # 'user_map', + 'disqus', ) +# Set disqus and shortname +# noinspection PyUnresolvedReferences +from .secret import DISQUS_WEBSITE_SHORTNAME # noqa MIGRATION_MODULES = {'accounts': 'core.migration'} diff --git a/django_project/core/settings/project.py b/django_project/core/settings/project.py --- a/django_project/core/settings/project.py +++ b/django_project/core/settings/project.py @@ -17,7 +17,6 @@ 'changes', 'github_issue', 'vota', - 'disqus', ) # Due to profile page does not available, this will redirect to home page after login
{"golden_diff": "diff --git a/django_project/core/settings/contrib.py b/django_project/core/settings/contrib.py\n--- a/django_project/core/settings/contrib.py\n+++ b/django_project/core/settings/contrib.py\n@@ -20,8 +20,12 @@\n 'django_hashedfilenamestorage',\n 'django_countries', # for sponsor addresses\n # 'user_map',\n+ 'disqus',\n )\n \n+# Set disqus and shortname\n+# noinspection PyUnresolvedReferences\n+from .secret import DISQUS_WEBSITE_SHORTNAME # noqa\n \n MIGRATION_MODULES = {'accounts': 'core.migration'}\n \ndiff --git a/django_project/core/settings/project.py b/django_project/core/settings/project.py\n--- a/django_project/core/settings/project.py\n+++ b/django_project/core/settings/project.py\n@@ -17,7 +17,6 @@\n 'changes',\n 'github_issue',\n 'vota',\n- 'disqus',\n )\n \n # Due to profile page does not available, this will redirect to home page after login\n", "issue": "Disqus functionality is currently broken\nThere should be disqus inline chat widgets on each version page and each entry page. Currently these are not working - can we work to fix it.\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Project level settings.\n\nAdjust these values as needed but don't commit passwords etc. to any public\nrepository!\n\"\"\"\n\nimport os # noqa\nfrom django.utils.translation import ugettext_lazy as _\nfrom .utils import absolute_path\nfrom .contrib import * # noqa\n\n# Project apps\nINSTALLED_APPS += (\n 'base',\n 'changes',\n 'github_issue',\n 'vota',\n 'disqus',\n)\n\n# Due to profile page does not available, this will redirect to home page after login\nLOGIN_REDIRECT_URL = '/'\n\n# How many versions to list in each project box\nPROJECT_VERSION_LIST_SIZE = 10\n\n# Set debug to false for production\nDEBUG = TEMPLATE_DEBUG = False\n\nSOUTH_TESTS_MIGRATE = False\n\n\n# Set languages which want to be translated\nLANGUAGES = (\n ('en', _('English')),\n ('af', _('Afrikaans')),\n ('id', _('Indonesian')),\n ('ko', _('Korean')),\n)\n\n# Set storage path for the translation files\nLOCALE_PATHS = (absolute_path('locale'),)\n\n\nMIDDLEWARE_CLASSES = (\n # For nav bar generation\n 'core.custom_middleware.NavContextMiddleware',\n) + MIDDLEWARE_CLASSES\n\n# Project specific javascript files to be pipelined\n# For third party libs like jquery should go in contrib.py\nPIPELINE_JS['project'] = {\n 'source_filenames': (\n 'js/csrf-ajax.js',\n 'js/changelog.js',\n 'js/github-issue.js'\n ),\n 'output_filename': 'js/project.js',\n}\n\n# Project specific css files to be pipelined\n# For third party libs like bootstrap should go in contrib.py\nPIPELINE_CSS['project'] = {\n 'source_filenames': (\n 'css/changelog.css',\n ),\n 'output_filename': 'css/project.css',\n 'extra_context': {\n 'media': 'screen, projection',\n },\n}\n", "path": "django_project/core/settings/project.py"}, {"content": "# coding=utf-8\n\"\"\"\ncore.settings.contrib\n\"\"\"\nfrom .base import * # noqa\n\n# Extra installed apps - grapelli needs to be added before others\nINSTALLED_APPS = (\n 'grappelli',\n) + INSTALLED_APPS\n\nINSTALLED_APPS += (\n 'raven.contrib.django.raven_compat', # enable Raven plugin\n 'crispy_forms',\n 'widget_tweaks', # lets us add some bootstrap css to form elements\n 'easy_thumbnails',\n 'reversion',\n 'rosetta',\n 'embed_video',\n 'django_hashedfilenamestorage',\n 'django_countries', # for sponsor addresses\n # 'user_map',\n)\n\n\nMIGRATION_MODULES = {'accounts': 'core.migration'}\n\nGRAPPELLI_ADMIN_TITLE = 'Site administration panel'\n\nSTOP_WORDS = (\n 'a', 'an', 'and', 'if', 'is', 'the', 'in', 'i', 'you', 'other',\n 'this', 'that'\n)\n\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\n# Easy-thumbnails options\nTHUMBNAIL_SUBDIR = 'thumbnails'\nTHUMBNAIL_ALIASES = {\n '': {\n 'entry': {'size': (50, 50), 'crop': True},\n 'medium-entry': {'size': (100, 100), 'crop': True},\n 'large-entry': {'size': (400, 300), 'crop': True},\n 'thumb300x200': {'size': (300, 200), 'crop': True},\n },\n}\n\n# Pipeline related settings\n\nINSTALLED_APPS += (\n 'pipeline',)\n\nMIDDLEWARE_CLASSES += (\n # For rosetta localisation\n 'django.middleware.locale.LocaleMiddleware',\n)\n\nDEFAULT_FILE_STORAGE = (\n 'django_hashedfilenamestorage.storage.HashedFilenameFileSystemStorage')\n\n# use underscore template function\nPIPELINE_TEMPLATE_FUNC = '_.template'\n\n# enable cached storage - requires uglify.js (node.js)\nSTATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'\n\n# Contributed / third party js libs for pipeline compression\n# For hand rolled js for this app, use project.py\nPIPELINE_JS = {}\n\n# Contributed / third party css for pipeline compression\n# For hand rolled css for this app, use project.py\nPIPELINE_CSS = {}\n\n# These get enabled in prod.py\nPIPELINE_ENABLED = False\nPIPELINE_CSS_COMPRESSOR = None\nPIPELINE_JS_COMPRESSOR = None\n", "path": "django_project/core/settings/contrib.py"}]}
1,864
228
gh_patches_debug_20418
rasdani/github-patches
git_diff
nonebot__nonebot2-238
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: 内置的single_session插件有一些bug **描述问题:** 内置的`single_session`插件只能处理有`get_session_id`方法的`event`,如果一个`matcher`监听了`metaevent`,那么其中的`run_preprocessor`会报错 **如何复现?** [这一行](https://github.com/nonebot/nonebot2/blob/93ffc93a80cf9e3103eb4a164e7b32ab3cdd0882/nonebot/plugins/single_session.py#L13)限制了只能监听有`get_session_id`的事件,但是对没有这个方法的事件没有做额外的处理,导致报错。 除此之外,下面的[判断语句](https://github.com/nonebot/nonebot2/blob/93ffc93a80cf9e3103eb4a164e7b32ab3cdd0882/nonebot/plugins/single_session.py#L16)也有问题,如果这个事件第一次遇到的话不应该被忽略 **期望的结果** 插件正常使用 ```` </issue> <code> [start of nonebot/plugins/single_session.py] 1 from typing import Dict, Optional 2 3 from nonebot.typing import T_State 4 from nonebot.matcher import Matcher 5 from nonebot.adapters import Bot, Event 6 from nonebot.message import run_preprocessor, run_postprocessor, IgnoredException 7 8 _running_matcher: Dict[str, int] = {} 9 10 11 @run_preprocessor 12 async def _(matcher: Matcher, bot: Bot, event: Event, state: T_State): 13 session_id = event.get_session_id() 14 event_id = id(event) 15 16 if _running_matcher.get(session_id, None) != event_id: 17 raise IgnoredException("Annother matcher running") 18 19 _running_matcher[session_id] = event_id 20 21 22 @run_postprocessor 23 async def _(matcher: Matcher, exception: Optional[Exception], bot: Bot, event: Event, state: T_State): 24 session_id = event.get_session_id() 25 if session_id in _running_matcher: 26 del _running_matcher[session_id] 27 [end of nonebot/plugins/single_session.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nonebot/plugins/single_session.py b/nonebot/plugins/single_session.py --- a/nonebot/plugins/single_session.py +++ b/nonebot/plugins/single_session.py @@ -10,17 +10,23 @@ @run_preprocessor async def _(matcher: Matcher, bot: Bot, event: Event, state: T_State): - session_id = event.get_session_id() - event_id = id(event) - - if _running_matcher.get(session_id, None) != event_id: + try: + session_id = event.get_session_id() + except Exception: + return + current_event_id = id(event) + event_id = _running_matcher.get(session_id, None) + if event_id and event_id != current_event_id: raise IgnoredException("Annother matcher running") - _running_matcher[session_id] = event_id + _running_matcher[session_id] = current_event_id @run_postprocessor async def _(matcher: Matcher, exception: Optional[Exception], bot: Bot, event: Event, state: T_State): - session_id = event.get_session_id() + try: + session_id = event.get_session_id() + except Exception: + return if session_id in _running_matcher: del _running_matcher[session_id]
{"golden_diff": "diff --git a/nonebot/plugins/single_session.py b/nonebot/plugins/single_session.py\n--- a/nonebot/plugins/single_session.py\n+++ b/nonebot/plugins/single_session.py\n@@ -10,17 +10,23 @@\n \n @run_preprocessor\n async def _(matcher: Matcher, bot: Bot, event: Event, state: T_State):\n- session_id = event.get_session_id()\n- event_id = id(event)\n-\n- if _running_matcher.get(session_id, None) != event_id:\n+ try:\n+ session_id = event.get_session_id()\n+ except Exception:\n+ return\n+ current_event_id = id(event)\n+ event_id = _running_matcher.get(session_id, None)\n+ if event_id and event_id != current_event_id:\n raise IgnoredException(\"Annother matcher running\")\n \n- _running_matcher[session_id] = event_id\n+ _running_matcher[session_id] = current_event_id\n \n \n @run_postprocessor\n async def _(matcher: Matcher, exception: Optional[Exception], bot: Bot, event: Event, state: T_State):\n- session_id = event.get_session_id()\n+ try:\n+ session_id = event.get_session_id()\n+ except Exception:\n+ return\n if session_id in _running_matcher:\n del _running_matcher[session_id]\n", "issue": "Bug: \u5185\u7f6e\u7684single_session\u63d2\u4ef6\u6709\u4e00\u4e9bbug\n**\u63cf\u8ff0\u95ee\u9898\uff1a**\r\n\r\n\u5185\u7f6e\u7684`single_session`\u63d2\u4ef6\u53ea\u80fd\u5904\u7406\u6709`get_session_id`\u65b9\u6cd5\u7684`event`\uff0c\u5982\u679c\u4e00\u4e2a`matcher`\u76d1\u542c\u4e86`metaevent`\uff0c\u90a3\u4e48\u5176\u4e2d\u7684`run_preprocessor`\u4f1a\u62a5\u9519\r\n\r\n**\u5982\u4f55\u590d\u73b0\uff1f**\r\n\r\n[\u8fd9\u4e00\u884c](https://github.com/nonebot/nonebot2/blob/93ffc93a80cf9e3103eb4a164e7b32ab3cdd0882/nonebot/plugins/single_session.py#L13)\u9650\u5236\u4e86\u53ea\u80fd\u76d1\u542c\u6709`get_session_id`\u7684\u4e8b\u4ef6\uff0c\u4f46\u662f\u5bf9\u6ca1\u6709\u8fd9\u4e2a\u65b9\u6cd5\u7684\u4e8b\u4ef6\u6ca1\u6709\u505a\u989d\u5916\u7684\u5904\u7406\uff0c\u5bfc\u81f4\u62a5\u9519\u3002\r\n\u9664\u6b64\u4e4b\u5916\uff0c\u4e0b\u9762\u7684[\u5224\u65ad\u8bed\u53e5](https://github.com/nonebot/nonebot2/blob/93ffc93a80cf9e3103eb4a164e7b32ab3cdd0882/nonebot/plugins/single_session.py#L16)\u4e5f\u6709\u95ee\u9898\uff0c\u5982\u679c\u8fd9\u4e2a\u4e8b\u4ef6\u7b2c\u4e00\u6b21\u9047\u5230\u7684\u8bdd\u4e0d\u5e94\u8be5\u88ab\u5ffd\u7565\r\n\r\n**\u671f\u671b\u7684\u7ed3\u679c**\r\n\u63d2\u4ef6\u6b63\u5e38\u4f7f\u7528\r\n\r\n````\r\n\n", "before_files": [{"content": "from typing import Dict, Optional\n\nfrom nonebot.typing import T_State\nfrom nonebot.matcher import Matcher\nfrom nonebot.adapters import Bot, Event\nfrom nonebot.message import run_preprocessor, run_postprocessor, IgnoredException\n\n_running_matcher: Dict[str, int] = {}\n\n\n@run_preprocessor\nasync def _(matcher: Matcher, bot: Bot, event: Event, state: T_State):\n session_id = event.get_session_id()\n event_id = id(event)\n\n if _running_matcher.get(session_id, None) != event_id:\n raise IgnoredException(\"Annother matcher running\")\n\n _running_matcher[session_id] = event_id\n\n\n@run_postprocessor\nasync def _(matcher: Matcher, exception: Optional[Exception], bot: Bot, event: Event, state: T_State):\n session_id = event.get_session_id()\n if session_id in _running_matcher:\n del _running_matcher[session_id]\n", "path": "nonebot/plugins/single_session.py"}]}
1,050
302
gh_patches_debug_42013
rasdani/github-patches
git_diff
fonttools__fonttools-2762
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> TTFont.getGlyphSet() returning _TTVarGlyphSet where type hints/typechecker expect _TTGlyphSet the new `_TTVarGlyphSet` (#2738) that is returned from `TTFont.getGlyphSet()` is not a subclass of the old `_TTGlyphSet`, and neither is the `_TTVarGlyphGlyf` that it contains a subclass of the old `_TTGlyph`. We have some internal code that uses type hints and is typechecked using pytype, which breaks with the latest fonttools 4.36.0 following the above change. ``` bad option 'fontTools.ttLib.ttGlyphSet._TTVarGlyphGlyf' in return type [bad-return-type] Expected: fontTools.ttLib.ttGlyphSet._TTGlyph ``` I think we should revise the class hierarchy of these glyphset/glyph objects and make sure that we continue to return a `_TTGlyphSet` that contains generic `_TTGlyph` objects and work around their respective differences inside subclasses. I think it's doable. </issue> <code> [start of Lib/fontTools/ttLib/ttGlyphSet.py] 1 """GlyphSets returned by a TTFont.""" 2 3 from fontTools.misc.fixedTools import otRound 4 from copy import copy 5 6 class _TTGlyphSet(object): 7 8 """Generic dict-like GlyphSet class that pulls metrics from hmtx and 9 glyph shape from TrueType or CFF. 10 """ 11 12 def __init__(self, ttFont, glyphs, glyphType): 13 """Construct a new glyphset. 14 15 Args: 16 font (TTFont): The font object (used to get metrics). 17 glyphs (dict): A dictionary mapping glyph names to ``_TTGlyph`` objects. 18 glyphType (class): Either ``_TTGlyphCFF`` or ``_TTGlyphGlyf``. 19 """ 20 self._glyphs = glyphs 21 self._hmtx = ttFont['hmtx'] 22 self._vmtx = ttFont['vmtx'] if 'vmtx' in ttFont else None 23 self._glyphType = glyphType 24 25 def keys(self): 26 return list(self._glyphs.keys()) 27 28 def has_key(self, glyphName): 29 return glyphName in self._glyphs 30 31 __contains__ = has_key 32 33 def __getitem__(self, glyphName): 34 horizontalMetrics = self._hmtx[glyphName] 35 verticalMetrics = self._vmtx[glyphName] if self._vmtx else None 36 return self._glyphType( 37 self, self._glyphs[glyphName], horizontalMetrics, verticalMetrics) 38 39 def __len__(self): 40 return len(self._glyphs) 41 42 def get(self, glyphName, default=None): 43 try: 44 return self[glyphName] 45 except KeyError: 46 return default 47 48 class _TTGlyph(object): 49 50 """Wrapper for a TrueType glyph that supports the Pen protocol, meaning 51 that it has .draw() and .drawPoints() methods that take a pen object as 52 their only argument. Additionally there are 'width' and 'lsb' attributes, 53 read from the 'hmtx' table. 54 55 If the font contains a 'vmtx' table, there will also be 'height' and 'tsb' 56 attributes. 57 """ 58 59 def __init__(self, glyphset, glyph, horizontalMetrics, verticalMetrics=None): 60 """Construct a new _TTGlyph. 61 62 Args: 63 glyphset (_TTGlyphSet): A glyphset object used to resolve components. 64 glyph (ttLib.tables._g_l_y_f.Glyph): The glyph object. 65 horizontalMetrics (int, int): The glyph's width and left sidebearing. 66 """ 67 self._glyphset = glyphset 68 self._glyph = glyph 69 self.width, self.lsb = horizontalMetrics 70 if verticalMetrics: 71 self.height, self.tsb = verticalMetrics 72 else: 73 self.height, self.tsb = None, None 74 75 def draw(self, pen): 76 """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details 77 how that works. 78 """ 79 self._glyph.draw(pen) 80 81 def drawPoints(self, pen): 82 # drawPoints is only implemented for _TTGlyphGlyf at this time. 83 raise NotImplementedError() 84 85 class _TTGlyphCFF(_TTGlyph): 86 pass 87 88 class _TTGlyphGlyf(_TTGlyph): 89 90 def draw(self, pen): 91 """Draw the glyph onto Pen. See fontTools.pens.basePen for details 92 how that works. 93 """ 94 glyfTable = self._glyphset._glyphs 95 glyph = self._glyph 96 offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0 97 glyph.draw(pen, glyfTable, offset) 98 99 def drawPoints(self, pen): 100 """Draw the glyph onto PointPen. See fontTools.pens.pointPen 101 for details how that works. 102 """ 103 glyfTable = self._glyphset._glyphs 104 glyph = self._glyph 105 offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0 106 glyph.drawPoints(pen, glyfTable, offset) 107 108 109 110 class _TTVarGlyphSet(object): 111 112 def __init__(self, font, location, normalized=False): 113 from fontTools.varLib.models import normalizeLocation, piecewiseLinearMap 114 self._ttFont = font 115 if not normalized: 116 axes = {a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in font['fvar'].axes} 117 location = normalizeLocation(location, axes) 118 if 'avar' in font: 119 avar = font['avar'] 120 avarSegments = avar.segments 121 new_location = {} 122 for axis_tag,value in location.items(): 123 avarMapping = avarSegments.get(axis_tag, None) 124 if avarMapping is not None: 125 value = piecewiseLinearMap(value, avarMapping) 126 new_location[axis_tag] = value 127 location = new_location 128 del new_location 129 130 self.location = location 131 132 def keys(self): 133 return list(self._ttFont['glyf'].keys()) 134 135 def has_key(self, glyphName): 136 return glyphName in self._ttFont['glyf'] 137 __contains__ = has_key 138 139 def __getitem__(self, glyphName): 140 return _TTVarGlyphGlyf(self._ttFont, glyphName, self.location) 141 142 def get(self, glyphName, default=None): 143 try: 144 return self[glyphName] 145 except KeyError: 146 return default 147 148 def _setCoordinates(glyph, coord, glyfTable): 149 # Handle phantom points for (left, right, top, bottom) positions. 150 assert len(coord) >= 4 151 if not hasattr(glyph, 'xMin'): 152 glyph.recalcBounds(glyfTable) 153 leftSideX = coord[-4][0] 154 rightSideX = coord[-3][0] 155 topSideY = coord[-2][1] 156 bottomSideY = coord[-1][1] 157 158 for _ in range(4): 159 del coord[-1] 160 161 if glyph.isComposite(): 162 assert len(coord) == len(glyph.components) 163 for p,comp in zip(coord, glyph.components): 164 if hasattr(comp, 'x'): 165 comp.x,comp.y = p 166 elif glyph.numberOfContours == 0: 167 assert len(coord) == 0 168 else: 169 assert len(coord) == len(glyph.coordinates) 170 glyph.coordinates = coord 171 172 glyph.recalcBounds(glyfTable) 173 174 horizontalAdvanceWidth = otRound(rightSideX - leftSideX) 175 verticalAdvanceWidth = otRound(topSideY - bottomSideY) 176 leftSideBearing = otRound(glyph.xMin - leftSideX) 177 return horizontalAdvanceWidth, leftSideBearing, verticalAdvanceWidth 178 179 180 class _TTVarGlyphGlyf(object): 181 182 def __init__(self, ttFont, glyphName, location): 183 self._ttFont = ttFont 184 self._glyphName = glyphName 185 self._location = location 186 self.width = None # draw fills it in 187 188 def draw(self, pen): 189 from fontTools.varLib.iup import iup_delta 190 from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates 191 from fontTools.varLib.models import supportScalar 192 193 glyf = self._ttFont['glyf'] 194 hMetrics = self._ttFont['hmtx'].metrics 195 vMetrics = getattr(self._ttFont.get('vmtx'), 'metrics', None) 196 197 variations = self._ttFont['gvar'].variations[self._glyphName] 198 coordinates, _ = glyf._getCoordinatesAndControls(self._glyphName, hMetrics, vMetrics) 199 origCoords, endPts = None, None 200 for var in variations: 201 scalar = supportScalar(self._location, var.axes) 202 if not scalar: 203 continue 204 delta = var.coordinates 205 if None in delta: 206 if origCoords is None: 207 origCoords,control = glyf._getCoordinatesAndControls(self._glyphName, hMetrics, vMetrics) 208 endPts = control[1] if control[0] >= 1 else list(range(len(control[1]))) 209 delta = iup_delta(delta, origCoords, endPts) 210 coordinates += GlyphCoordinates(delta) * scalar 211 212 glyph = copy(glyf[self._glyphName]) # Shallow copy 213 horizontalAdvanceWidth, leftSideBearing, verticalAdvanceWidth = _setCoordinates(glyph, coordinates, glyf) 214 self.width = horizontalAdvanceWidth 215 self.height = verticalAdvanceWidth 216 offset = leftSideBearing - glyph.xMin if hasattr(glyph, "xMin") else 0 217 glyph.draw(pen, glyf, offset) 218 [end of Lib/fontTools/ttLib/ttGlyphSet.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Lib/fontTools/ttLib/ttGlyphSet.py b/Lib/fontTools/ttLib/ttGlyphSet.py --- a/Lib/fontTools/ttLib/ttGlyphSet.py +++ b/Lib/fontTools/ttLib/ttGlyphSet.py @@ -107,19 +107,22 @@ -class _TTVarGlyphSet(object): +class _TTVarGlyphSet(_TTGlyphSet): def __init__(self, font, location, normalized=False): - from fontTools.varLib.models import normalizeLocation, piecewiseLinearMap self._ttFont = font + self._glyphs = font['glyf'] + if not normalized: + from fontTools.varLib.models import normalizeLocation, piecewiseLinearMap + axes = {a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in font['fvar'].axes} location = normalizeLocation(location, axes) if 'avar' in font: avar = font['avar'] avarSegments = avar.segments new_location = {} - for axis_tag,value in location.items(): + for axis_tag, value in location.items(): avarMapping = avarSegments.get(axis_tag, None) if avarMapping is not None: value = piecewiseLinearMap(value, avarMapping) @@ -129,21 +132,11 @@ self.location = location - def keys(self): - return list(self._ttFont['glyf'].keys()) - - def has_key(self, glyphName): - return glyphName in self._ttFont['glyf'] - __contains__ = has_key - def __getitem__(self, glyphName): + if glyphName not in self._glyphs: + raise KeyError(glyphName) return _TTVarGlyphGlyf(self._ttFont, glyphName, self.location) - def get(self, glyphName, default=None): - try: - return self[glyphName] - except KeyError: - return default def _setCoordinates(glyph, coord, glyfTable): # Handle phantom points for (left, right, top, bottom) positions. @@ -174,16 +167,25 @@ horizontalAdvanceWidth = otRound(rightSideX - leftSideX) verticalAdvanceWidth = otRound(topSideY - bottomSideY) leftSideBearing = otRound(glyph.xMin - leftSideX) - return horizontalAdvanceWidth, leftSideBearing, verticalAdvanceWidth + topSideBearing = otRound(topSideY - glyph.yMax) + return ( + horizontalAdvanceWidth, + leftSideBearing, + verticalAdvanceWidth, + topSideBearing, + ) -class _TTVarGlyphGlyf(object): - +class _TTVarGlyph(_TTGlyph): def __init__(self, ttFont, glyphName, location): self._ttFont = ttFont self._glyphName = glyphName self._location = location - self.width = None # draw fills it in + # draw() fills these in + self.width = self.height = self.lsb = self.tsb = None + + +class _TTVarGlyphGlyf(_TTVarGlyph): def draw(self, pen): from fontTools.varLib.iup import iup_delta @@ -210,8 +212,10 @@ coordinates += GlyphCoordinates(delta) * scalar glyph = copy(glyf[self._glyphName]) # Shallow copy - horizontalAdvanceWidth, leftSideBearing, verticalAdvanceWidth = _setCoordinates(glyph, coordinates, glyf) - self.width = horizontalAdvanceWidth - self.height = verticalAdvanceWidth - offset = leftSideBearing - glyph.xMin if hasattr(glyph, "xMin") else 0 + width, lsb, height, tsb = _setCoordinates(glyph, coordinates, glyf) + self.width = width + self.lsb = lsb + self.height = height + self.tsb = tsb + offset = lsb - glyph.xMin if hasattr(glyph, "xMin") else 0 glyph.draw(pen, glyf, offset)
{"golden_diff": "diff --git a/Lib/fontTools/ttLib/ttGlyphSet.py b/Lib/fontTools/ttLib/ttGlyphSet.py\n--- a/Lib/fontTools/ttLib/ttGlyphSet.py\n+++ b/Lib/fontTools/ttLib/ttGlyphSet.py\n@@ -107,19 +107,22 @@\n \n \n \n-class _TTVarGlyphSet(object):\n+class _TTVarGlyphSet(_TTGlyphSet):\n \n \tdef __init__(self, font, location, normalized=False):\n-\t\tfrom fontTools.varLib.models import normalizeLocation, piecewiseLinearMap\n \t\tself._ttFont = font\n+\t\tself._glyphs = font['glyf']\n+\n \t\tif not normalized:\n+\t\t\tfrom fontTools.varLib.models import normalizeLocation, piecewiseLinearMap\n+\n \t\t\taxes = {a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in font['fvar'].axes}\n \t\t\tlocation = normalizeLocation(location, axes)\n \t\t\tif 'avar' in font:\n \t\t\t\tavar = font['avar']\n \t\t\t\tavarSegments = avar.segments\n \t\t\t\tnew_location = {}\n-\t\t\t\tfor axis_tag,value in location.items():\n+\t\t\t\tfor axis_tag, value in location.items():\n \t\t\t\t\tavarMapping = avarSegments.get(axis_tag, None)\n \t\t\t\t\tif avarMapping is not None:\n \t\t\t\t\t\tvalue = piecewiseLinearMap(value, avarMapping)\n@@ -129,21 +132,11 @@\n \n \t\tself.location = location\n \n-\tdef keys(self):\n-\t\treturn list(self._ttFont['glyf'].keys())\n-\n-\tdef has_key(self, glyphName):\n-\t\treturn glyphName in self._ttFont['glyf']\n-\t__contains__ = has_key\n-\n \tdef __getitem__(self, glyphName):\n+\t\tif glyphName not in self._glyphs:\n+\t\t\traise KeyError(glyphName)\n \t\treturn _TTVarGlyphGlyf(self._ttFont, glyphName, self.location)\n \n-\tdef get(self, glyphName, default=None):\n-\t\ttry:\n-\t\t\treturn self[glyphName]\n-\t\texcept KeyError:\n-\t\t\treturn default\n \n def _setCoordinates(glyph, coord, glyfTable):\n \t# Handle phantom points for (left, right, top, bottom) positions.\n@@ -174,16 +167,25 @@\n \thorizontalAdvanceWidth = otRound(rightSideX - leftSideX)\n \tverticalAdvanceWidth = otRound(topSideY - bottomSideY)\n \tleftSideBearing = otRound(glyph.xMin - leftSideX)\n-\treturn horizontalAdvanceWidth, leftSideBearing, verticalAdvanceWidth\n+\ttopSideBearing = otRound(topSideY - glyph.yMax)\n+\treturn (\n+\t\thorizontalAdvanceWidth,\n+\t\tleftSideBearing,\n+\t\tverticalAdvanceWidth,\n+\t\ttopSideBearing,\n+\t)\n \n \n-class _TTVarGlyphGlyf(object):\n-\n+class _TTVarGlyph(_TTGlyph):\n \tdef __init__(self, ttFont, glyphName, location):\n \t\tself._ttFont = ttFont\n \t\tself._glyphName = glyphName\n \t\tself._location = location\n-\t\tself.width = None # draw fills it in\n+\t\t# draw() fills these in\n+\t\tself.width = self.height = self.lsb = self.tsb = None\n+\n+\n+class _TTVarGlyphGlyf(_TTVarGlyph):\n \n \tdef draw(self, pen):\n \t\tfrom fontTools.varLib.iup import iup_delta\n@@ -210,8 +212,10 @@\n \t\t\tcoordinates += GlyphCoordinates(delta) * scalar\n \n \t\tglyph = copy(glyf[self._glyphName]) # Shallow copy\n-\t\thorizontalAdvanceWidth, leftSideBearing, verticalAdvanceWidth = _setCoordinates(glyph, coordinates, glyf)\n-\t\tself.width = horizontalAdvanceWidth\n-\t\tself.height = verticalAdvanceWidth\n-\t\toffset = leftSideBearing - glyph.xMin if hasattr(glyph, \"xMin\") else 0\n+\t\twidth, lsb, height, tsb = _setCoordinates(glyph, coordinates, glyf)\n+\t\tself.width = width\n+\t\tself.lsb = lsb\n+\t\tself.height = height\n+\t\tself.tsb = tsb\n+\t\toffset = lsb - glyph.xMin if hasattr(glyph, \"xMin\") else 0\n \t\tglyph.draw(pen, glyf, offset)\n", "issue": "TTFont.getGlyphSet() returning _TTVarGlyphSet where type hints/typechecker expect _TTGlyphSet\nthe new `_TTVarGlyphSet` (#2738) that is returned from `TTFont.getGlyphSet()` is not a subclass of the old `_TTGlyphSet`, and neither is the `_TTVarGlyphGlyf` that it contains a subclass of the old `_TTGlyph`.\r\nWe have some internal code that uses type hints and is typechecked using pytype, which breaks with the latest fonttools 4.36.0 following the above change.\r\n\r\n```\r\nbad option 'fontTools.ttLib.ttGlyphSet._TTVarGlyphGlyf' in return type [bad-return-type]\r\n Expected: fontTools.ttLib.ttGlyphSet._TTGlyph\r\n```\r\n\r\nI think we should revise the class hierarchy of these glyphset/glyph objects and make sure that we continue to return a `_TTGlyphSet` that contains generic `_TTGlyph` objects and work around their respective differences inside subclasses.\r\nI think it's doable.\n", "before_files": [{"content": "\"\"\"GlyphSets returned by a TTFont.\"\"\"\n\nfrom fontTools.misc.fixedTools import otRound\nfrom copy import copy\n\nclass _TTGlyphSet(object):\n\n\t\"\"\"Generic dict-like GlyphSet class that pulls metrics from hmtx and\n\tglyph shape from TrueType or CFF.\n\t\"\"\"\n\n\tdef __init__(self, ttFont, glyphs, glyphType):\n\t\t\"\"\"Construct a new glyphset.\n\n\t\tArgs:\n\t\t\tfont (TTFont): The font object (used to get metrics).\n\t\t\tglyphs (dict): A dictionary mapping glyph names to ``_TTGlyph`` objects.\n\t\t\tglyphType (class): Either ``_TTGlyphCFF`` or ``_TTGlyphGlyf``.\n\t\t\"\"\"\n\t\tself._glyphs = glyphs\n\t\tself._hmtx = ttFont['hmtx']\n\t\tself._vmtx = ttFont['vmtx'] if 'vmtx' in ttFont else None\n\t\tself._glyphType = glyphType\n\n\tdef keys(self):\n\t\treturn list(self._glyphs.keys())\n\n\tdef has_key(self, glyphName):\n\t\treturn glyphName in self._glyphs\n\n\t__contains__ = has_key\n\n\tdef __getitem__(self, glyphName):\n\t\thorizontalMetrics = self._hmtx[glyphName]\n\t\tverticalMetrics = self._vmtx[glyphName] if self._vmtx else None\n\t\treturn self._glyphType(\n\t\t\tself, self._glyphs[glyphName], horizontalMetrics, verticalMetrics)\n\n\tdef __len__(self):\n\t\treturn len(self._glyphs)\n\n\tdef get(self, glyphName, default=None):\n\t\ttry:\n\t\t\treturn self[glyphName]\n\t\texcept KeyError:\n\t\t\treturn default\n\nclass _TTGlyph(object):\n\n\t\"\"\"Wrapper for a TrueType glyph that supports the Pen protocol, meaning\n\tthat it has .draw() and .drawPoints() methods that take a pen object as\n\ttheir only argument. Additionally there are 'width' and 'lsb' attributes,\n\tread from the 'hmtx' table.\n\n\tIf the font contains a 'vmtx' table, there will also be 'height' and 'tsb'\n\tattributes.\n\t\"\"\"\n\n\tdef __init__(self, glyphset, glyph, horizontalMetrics, verticalMetrics=None):\n\t\t\"\"\"Construct a new _TTGlyph.\n\n\t\tArgs:\n\t\t\tglyphset (_TTGlyphSet): A glyphset object used to resolve components.\n\t\t\tglyph (ttLib.tables._g_l_y_f.Glyph): The glyph object.\n\t\t\thorizontalMetrics (int, int): The glyph's width and left sidebearing.\n\t\t\"\"\"\n\t\tself._glyphset = glyphset\n\t\tself._glyph = glyph\n\t\tself.width, self.lsb = horizontalMetrics\n\t\tif verticalMetrics:\n\t\t\tself.height, self.tsb = verticalMetrics\n\t\telse:\n\t\t\tself.height, self.tsb = None, None\n\n\tdef draw(self, pen):\n\t\t\"\"\"Draw the glyph onto ``pen``. See fontTools.pens.basePen for details\n\t\thow that works.\n\t\t\"\"\"\n\t\tself._glyph.draw(pen)\n\n\tdef drawPoints(self, pen):\n\t\t# drawPoints is only implemented for _TTGlyphGlyf at this time.\n\t\traise NotImplementedError()\n\nclass _TTGlyphCFF(_TTGlyph):\n\tpass\n\nclass _TTGlyphGlyf(_TTGlyph):\n\n\tdef draw(self, pen):\n\t\t\"\"\"Draw the glyph onto Pen. See fontTools.pens.basePen for details\n\t\thow that works.\n\t\t\"\"\"\n\t\tglyfTable = self._glyphset._glyphs\n\t\tglyph = self._glyph\n\t\toffset = self.lsb - glyph.xMin if hasattr(glyph, \"xMin\") else 0\n\t\tglyph.draw(pen, glyfTable, offset)\n\n\tdef drawPoints(self, pen):\n\t\t\"\"\"Draw the glyph onto PointPen. See fontTools.pens.pointPen\n\t\tfor details how that works.\n\t\t\"\"\"\n\t\tglyfTable = self._glyphset._glyphs\n\t\tglyph = self._glyph\n\t\toffset = self.lsb - glyph.xMin if hasattr(glyph, \"xMin\") else 0\n\t\tglyph.drawPoints(pen, glyfTable, offset)\n\n\n\nclass _TTVarGlyphSet(object):\n\n\tdef __init__(self, font, location, normalized=False):\n\t\tfrom fontTools.varLib.models import normalizeLocation, piecewiseLinearMap\n\t\tself._ttFont = font\n\t\tif not normalized:\n\t\t\taxes = {a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in font['fvar'].axes}\n\t\t\tlocation = normalizeLocation(location, axes)\n\t\t\tif 'avar' in font:\n\t\t\t\tavar = font['avar']\n\t\t\t\tavarSegments = avar.segments\n\t\t\t\tnew_location = {}\n\t\t\t\tfor axis_tag,value in location.items():\n\t\t\t\t\tavarMapping = avarSegments.get(axis_tag, None)\n\t\t\t\t\tif avarMapping is not None:\n\t\t\t\t\t\tvalue = piecewiseLinearMap(value, avarMapping)\n\t\t\t\t\tnew_location[axis_tag] = value\n\t\t\t\tlocation = new_location\n\t\t\t\tdel new_location\n\n\t\tself.location = location\n\n\tdef keys(self):\n\t\treturn list(self._ttFont['glyf'].keys())\n\n\tdef has_key(self, glyphName):\n\t\treturn glyphName in self._ttFont['glyf']\n\t__contains__ = has_key\n\n\tdef __getitem__(self, glyphName):\n\t\treturn _TTVarGlyphGlyf(self._ttFont, glyphName, self.location)\n\n\tdef get(self, glyphName, default=None):\n\t\ttry:\n\t\t\treturn self[glyphName]\n\t\texcept KeyError:\n\t\t\treturn default\n\ndef _setCoordinates(glyph, coord, glyfTable):\n\t# Handle phantom points for (left, right, top, bottom) positions.\n\tassert len(coord) >= 4\n\tif not hasattr(glyph, 'xMin'):\n\t\tglyph.recalcBounds(glyfTable)\n\tleftSideX = coord[-4][0]\n\trightSideX = coord[-3][0]\n\ttopSideY = coord[-2][1]\n\tbottomSideY = coord[-1][1]\n\n\tfor _ in range(4):\n\t\tdel coord[-1]\n\n\tif glyph.isComposite():\n\t\tassert len(coord) == len(glyph.components)\n\t\tfor p,comp in zip(coord, glyph.components):\n\t\t\tif hasattr(comp, 'x'):\n\t\t\t\tcomp.x,comp.y = p\n\telif glyph.numberOfContours == 0:\n\t\tassert len(coord) == 0\n\telse:\n\t\tassert len(coord) == len(glyph.coordinates)\n\t\tglyph.coordinates = coord\n\n\tglyph.recalcBounds(glyfTable)\n\n\thorizontalAdvanceWidth = otRound(rightSideX - leftSideX)\n\tverticalAdvanceWidth = otRound(topSideY - bottomSideY)\n\tleftSideBearing = otRound(glyph.xMin - leftSideX)\n\treturn horizontalAdvanceWidth, leftSideBearing, verticalAdvanceWidth\n\n\nclass _TTVarGlyphGlyf(object):\n\n\tdef __init__(self, ttFont, glyphName, location):\n\t\tself._ttFont = ttFont\n\t\tself._glyphName = glyphName\n\t\tself._location = location\n\t\tself.width = None # draw fills it in\n\n\tdef draw(self, pen):\n\t\tfrom fontTools.varLib.iup import iup_delta\n\t\tfrom fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates\n\t\tfrom fontTools.varLib.models import supportScalar\n\n\t\tglyf = self._ttFont['glyf']\n\t\thMetrics = self._ttFont['hmtx'].metrics\n\t\tvMetrics = getattr(self._ttFont.get('vmtx'), 'metrics', None)\n\n\t\tvariations = self._ttFont['gvar'].variations[self._glyphName]\n\t\tcoordinates, _ = glyf._getCoordinatesAndControls(self._glyphName, hMetrics, vMetrics)\n\t\torigCoords, endPts = None, None\n\t\tfor var in variations:\n\t\t\tscalar = supportScalar(self._location, var.axes)\n\t\t\tif not scalar:\n\t\t\t\tcontinue\n\t\t\tdelta = var.coordinates\n\t\t\tif None in delta:\n\t\t\t\tif origCoords is None:\n\t\t\t\t\torigCoords,control = glyf._getCoordinatesAndControls(self._glyphName, hMetrics, vMetrics)\n\t\t\t\t\tendPts = control[1] if control[0] >= 1 else list(range(len(control[1])))\n\t\t\t\tdelta = iup_delta(delta, origCoords, endPts)\n\t\t\tcoordinates += GlyphCoordinates(delta) * scalar\n\n\t\tglyph = copy(glyf[self._glyphName]) # Shallow copy\n\t\thorizontalAdvanceWidth, leftSideBearing, verticalAdvanceWidth = _setCoordinates(glyph, coordinates, glyf)\n\t\tself.width = horizontalAdvanceWidth\n\t\tself.height = verticalAdvanceWidth\n\t\toffset = leftSideBearing - glyph.xMin if hasattr(glyph, \"xMin\") else 0\n\t\tglyph.draw(pen, glyf, offset)\n", "path": "Lib/fontTools/ttLib/ttGlyphSet.py"}]}
3,260
965
gh_patches_debug_31910
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-1947
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> conda env creation fails if Python 3 selected I put this in `readthedocs.yml`: ``` yaml conda: file: docs/conda-env.yml python: version: 3 setup_py_install: true ``` It creates a conda env with Python 3.5, and then tries to install the standard docs machinery. But docutils is pinned to 0.11, and there isn't a build of this for Python 3.5 (there is a package of docutils 0.12). So I see this failure: ``` conda install --yes --name docs-build-w-conda sphinx==1.3.1 Pygments==2.0.2 docutils==0.11 mock==1.0.1 pillow==3.0.0 sphinx_rtd_theme==0.1.7 alabaster>=0.7,<0.8,!=0.7.5 Fetching package metadata: /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:315: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#snimissingwarning. SNIMissingWarning /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:120: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning. InsecurePlatformWarning .... Solving package specifications: .. Error: Unsatisfiable package specifications. Generating hint: [ ]| | 0% [2/8 ]|############ | 25% [3/8 ]|################## | 37% [5/8 ]|############################### | 62% [6/8 ]|##################################### | 75% [ COMPLETE ]|##################################################| 100% Hint: the following packages conflict with each other: - docutils ==0.11 - python 3.5* Use 'conda info docutils' etc. to see the dependencies for each package. ``` The obvious solution is not to pin docutils so it automatically picks the latest version available. If you prefer to keep it pinned, I think that installing dependencies at the same time as you create the environment should work; in this instance, it would fall back to Python 3.4 so it could satisfy the dependencies: ``` conda create --yes --name docs-build-w-conda python=3 sphinx==1.3.1 Pygments==2.0.2 docutils==0.11 ... ``` </issue> <code> [start of readthedocs/doc_builder/python_environments.py] 1 import logging 2 import os 3 import shutil 4 5 from django.conf import settings 6 7 from readthedocs.doc_builder.config import ConfigWrapper 8 from readthedocs.doc_builder.loader import get_builder_class 9 from readthedocs.projects.constants import LOG_TEMPLATE 10 11 log = logging.getLogger(__name__) 12 13 14 class PythonEnvironment(object): 15 16 def __init__(self, version, build_env, config=None): 17 self.version = version 18 self.project = version.project 19 self.build_env = build_env 20 if config: 21 self.config = config 22 else: 23 self.config = ConfigWrapper(version=version, yaml_config={}) 24 # Compute here, since it's used a lot 25 self.checkout_path = self.project.checkout_path(self.version.slug) 26 27 def _log(self, msg): 28 log.info(LOG_TEMPLATE 29 .format(project=self.project.slug, 30 version=self.version.slug, 31 msg=msg)) 32 33 def delete_existing_build_dir(self): 34 35 # Handle deleting old build dir 36 build_dir = os.path.join( 37 self.venv_path(), 38 'build') 39 if os.path.exists(build_dir): 40 self._log('Removing existing build directory') 41 shutil.rmtree(build_dir) 42 43 def install_package(self): 44 setup_path = os.path.join(self.checkout_path, 'setup.py') 45 if os.path.isfile(setup_path) and self.config.install_project: 46 if getattr(settings, 'USE_PIP_INSTALL', False): 47 self.build_env.run( 48 'python', 49 self.venv_bin(filename='pip'), 50 'install', 51 '--ignore-installed', 52 '--cache-dir', 53 self.project.pip_cache_path, 54 '.', 55 cwd=self.checkout_path, 56 bin_path=self.venv_bin() 57 ) 58 else: 59 self.build_env.run( 60 'python', 61 'setup.py', 62 'install', 63 '--force', 64 cwd=self.checkout_path, 65 bin_path=self.venv_bin() 66 ) 67 68 def venv_bin(self, filename=None): 69 """Return path to the virtualenv bin path, or a specific binary 70 71 :param filename: If specified, add this filename to the path return 72 :returns: Path to virtualenv bin or filename in virtualenv bin 73 """ 74 parts = [self.venv_path(), 'bin'] 75 if filename is not None: 76 parts.append(filename) 77 return os.path.join(*parts) 78 79 80 class Virtualenv(PythonEnvironment): 81 82 def venv_path(self): 83 return os.path.join(self.project.doc_path, 'envs', self.version.slug) 84 85 def setup_base(self): 86 site_packages = '--no-site-packages' 87 if self.config.use_system_site_packages: 88 site_packages = '--system-site-packages' 89 env_path = self.venv_path() 90 self.build_env.run( 91 self.config.python_interpreter, 92 '-mvirtualenv', 93 site_packages, 94 env_path, 95 bin_path=None, # Don't use virtualenv bin that doesn't exist yet 96 ) 97 98 def install_core_requirements(self): 99 requirements = [ 100 'sphinx==1.3.1', 101 'Pygments==2.0.2', 102 'setuptools==18.6.1', 103 'docutils==0.11', 104 'mkdocs==0.14.0', 105 'mock==1.0.1', 106 'pillow==2.6.1', 107 'readthedocs-sphinx-ext==0.5.4', 108 'sphinx-rtd-theme==0.1.9', 109 'alabaster>=0.7,<0.8,!=0.7.5', 110 'commonmark==0.5.4', 111 'recommonmark==0.1.1', 112 ] 113 114 cmd = [ 115 'python', 116 self.venv_bin(filename='pip'), 117 'install', 118 '--use-wheel', 119 '-U', 120 '--cache-dir', 121 self.project.pip_cache_path, 122 ] 123 if self.config.use_system_site_packages: 124 # Other code expects sphinx-build to be installed inside the 125 # virtualenv. Using the -I option makes sure it gets installed 126 # even if it is already installed system-wide (and 127 # --system-site-packages is used) 128 cmd.append('-I') 129 cmd.extend(requirements) 130 self.build_env.run( 131 *cmd, 132 bin_path=self.venv_bin() 133 ) 134 135 def install_user_requirements(self): 136 requirements_file_path = self.config.requirements_file 137 if not requirements_file_path: 138 builder_class = get_builder_class(self.project.documentation_type) 139 docs_dir = (builder_class(build_env=self.build_env, python_env=self) 140 .docs_dir()) 141 for path in [docs_dir, '']: 142 for req_file in ['pip_requirements.txt', 'requirements.txt']: 143 test_path = os.path.join(self.checkout_path, path, req_file) 144 if os.path.exists(test_path): 145 requirements_file_path = test_path 146 break 147 148 if requirements_file_path: 149 self.build_env.run( 150 'python', 151 self.venv_bin(filename='pip'), 152 'install', 153 '--exists-action=w', 154 '--cache-dir', 155 self.project.pip_cache_path, 156 '-r{0}'.format(requirements_file_path), 157 cwd=self.checkout_path, 158 bin_path=self.venv_bin() 159 ) 160 161 162 class Conda(PythonEnvironment): 163 164 def venv_path(self): 165 return os.path.join(self.project.doc_path, 'conda', self.version.slug) 166 167 def setup_base(self): 168 conda_env_path = os.path.join(self.project.doc_path, 'conda') 169 version_path = os.path.join(conda_env_path, self.version.slug) 170 171 if os.path.exists(version_path): 172 # Re-create conda directory each time to keep fresh state 173 self._log('Removing existing conda directory') 174 shutil.rmtree(version_path) 175 self.build_env.run( 176 'conda', 177 'create', 178 '--yes', 179 '--name', 180 self.version.slug, 181 'python={python_version}'.format(python_version=self.config.python_version), 182 bin_path=None, # Don't use conda bin that doesn't exist yet 183 ) 184 185 def install_core_requirements(self): 186 conda_env_path = os.path.join(self.project.doc_path, 'conda') 187 188 # Use conda for requirements it packages 189 requirements = [ 190 'sphinx==1.3.1', 191 'Pygments==2.0.2', 192 'docutils==0.11', 193 'mock==1.0.1', 194 'pillow==3.0.0', 195 'sphinx_rtd_theme==0.1.7', 196 'alabaster>=0.7,<0.8,!=0.7.5', 197 ] 198 199 cmd = [ 200 'conda', 201 'install', 202 '--yes', 203 '--name', 204 self.version.slug, 205 ] 206 cmd.extend(requirements) 207 self.build_env.run( 208 *cmd 209 ) 210 211 # Install pip-only things. 212 pip_requirements = [ 213 'mkdocs==0.14.0', 214 'readthedocs-sphinx-ext==0.5.4', 215 'commonmark==0.5.4', 216 'recommonmark==0.1.1', 217 ] 218 219 pip_cmd = [ 220 'python', 221 self.venv_bin(filename='pip'), 222 'install', 223 '-U', 224 '--cache-dir', 225 self.project.pip_cache_path, 226 ] 227 pip_cmd.extend(pip_requirements) 228 self.build_env.run( 229 *pip_cmd, 230 bin_path=self.venv_bin() 231 ) 232 233 def install_user_requirements(self): 234 conda_env_path = os.path.join(self.project.doc_path, 'conda') 235 self.build_env.run( 236 'conda', 237 'env', 238 'update', 239 '--name', 240 self.version.slug, 241 '--file', 242 self.config.conda_file, 243 ) 244 [end of readthedocs/doc_builder/python_environments.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/readthedocs/doc_builder/python_environments.py b/readthedocs/doc_builder/python_environments.py --- a/readthedocs/doc_builder/python_environments.py +++ b/readthedocs/doc_builder/python_environments.py @@ -100,7 +100,7 @@ 'sphinx==1.3.1', 'Pygments==2.0.2', 'setuptools==18.6.1', - 'docutils==0.11', + 'docutils==0.12', 'mkdocs==0.14.0', 'mock==1.0.1', 'pillow==2.6.1', @@ -174,22 +174,22 @@ shutil.rmtree(version_path) self.build_env.run( 'conda', + 'env', 'create', - '--yes', '--name', self.version.slug, - 'python={python_version}'.format(python_version=self.config.python_version), + '--file', + self.config.conda_file, bin_path=None, # Don't use conda bin that doesn't exist yet ) def install_core_requirements(self): - conda_env_path = os.path.join(self.project.doc_path, 'conda') # Use conda for requirements it packages requirements = [ 'sphinx==1.3.1', 'Pygments==2.0.2', - 'docutils==0.11', + 'docutils==0.12', 'mock==1.0.1', 'pillow==3.0.0', 'sphinx_rtd_theme==0.1.7', @@ -231,7 +231,6 @@ ) def install_user_requirements(self): - conda_env_path = os.path.join(self.project.doc_path, 'conda') self.build_env.run( 'conda', 'env',
{"golden_diff": "diff --git a/readthedocs/doc_builder/python_environments.py b/readthedocs/doc_builder/python_environments.py\n--- a/readthedocs/doc_builder/python_environments.py\n+++ b/readthedocs/doc_builder/python_environments.py\n@@ -100,7 +100,7 @@\n 'sphinx==1.3.1',\n 'Pygments==2.0.2',\n 'setuptools==18.6.1',\n- 'docutils==0.11',\n+ 'docutils==0.12',\n 'mkdocs==0.14.0',\n 'mock==1.0.1',\n 'pillow==2.6.1',\n@@ -174,22 +174,22 @@\n shutil.rmtree(version_path)\n self.build_env.run(\n 'conda',\n+ 'env',\n 'create',\n- '--yes',\n '--name',\n self.version.slug,\n- 'python={python_version}'.format(python_version=self.config.python_version),\n+ '--file',\n+ self.config.conda_file,\n bin_path=None, # Don't use conda bin that doesn't exist yet\n )\n \n def install_core_requirements(self):\n- conda_env_path = os.path.join(self.project.doc_path, 'conda')\n \n # Use conda for requirements it packages\n requirements = [\n 'sphinx==1.3.1',\n 'Pygments==2.0.2',\n- 'docutils==0.11',\n+ 'docutils==0.12',\n 'mock==1.0.1',\n 'pillow==3.0.0',\n 'sphinx_rtd_theme==0.1.7',\n@@ -231,7 +231,6 @@\n )\n \n def install_user_requirements(self):\n- conda_env_path = os.path.join(self.project.doc_path, 'conda')\n self.build_env.run(\n 'conda',\n 'env',\n", "issue": "conda env creation fails if Python 3 selected\nI put this in `readthedocs.yml`:\n\n``` yaml\nconda:\n file: docs/conda-env.yml\npython:\n version: 3\n setup_py_install: true\n```\n\nIt creates a conda env with Python 3.5, and then tries to install the standard docs machinery. But docutils is pinned to 0.11, and there isn't a build of this for Python 3.5 (there is a package of docutils 0.12). So I see this failure:\n\n```\nconda install --yes --name docs-build-w-conda sphinx==1.3.1 Pygments==2.0.2 docutils==0.11 mock==1.0.1 pillow==3.0.0 sphinx_rtd_theme==0.1.7 alabaster>=0.7,<0.8,!=0.7.5\nFetching package metadata: /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:315: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#snimissingwarning.\n SNIMissingWarning\n/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:120: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.\n InsecurePlatformWarning\n....\nSolving package specifications: ..\nError: Unsatisfiable package specifications.\nGenerating hint: \n[ ]| | 0%\n[2/8 ]|############ | 25%\n[3/8 ]|################## | 37%\n[5/8 ]|############################### | 62%\n[6/8 ]|##################################### | 75%\n[ COMPLETE ]|##################################################| 100%\n\n\n\nHint: the following packages conflict with each other:\n - docutils ==0.11\n - python 3.5*\n\nUse 'conda info docutils' etc. to see the dependencies for each package.\n```\n\nThe obvious solution is not to pin docutils so it automatically picks the latest version available. If you prefer to keep it pinned, I think that installing dependencies at the same time as you create the environment should work; in this instance, it would fall back to Python 3.4 so it could satisfy the dependencies:\n\n```\nconda create --yes --name docs-build-w-conda python=3 sphinx==1.3.1 Pygments==2.0.2 docutils==0.11 ...\n```\n\n", "before_files": [{"content": "import logging\nimport os\nimport shutil\n\nfrom django.conf import settings\n\nfrom readthedocs.doc_builder.config import ConfigWrapper\nfrom readthedocs.doc_builder.loader import get_builder_class\nfrom readthedocs.projects.constants import LOG_TEMPLATE\n\nlog = logging.getLogger(__name__)\n\n\nclass PythonEnvironment(object):\n\n def __init__(self, version, build_env, config=None):\n self.version = version\n self.project = version.project\n self.build_env = build_env\n if config:\n self.config = config\n else:\n self.config = ConfigWrapper(version=version, yaml_config={})\n # Compute here, since it's used a lot\n self.checkout_path = self.project.checkout_path(self.version.slug)\n\n def _log(self, msg):\n log.info(LOG_TEMPLATE\n .format(project=self.project.slug,\n version=self.version.slug,\n msg=msg))\n\n def delete_existing_build_dir(self):\n\n # Handle deleting old build dir\n build_dir = os.path.join(\n self.venv_path(),\n 'build')\n if os.path.exists(build_dir):\n self._log('Removing existing build directory')\n shutil.rmtree(build_dir)\n\n def install_package(self):\n setup_path = os.path.join(self.checkout_path, 'setup.py')\n if os.path.isfile(setup_path) and self.config.install_project:\n if getattr(settings, 'USE_PIP_INSTALL', False):\n self.build_env.run(\n 'python',\n self.venv_bin(filename='pip'),\n 'install',\n '--ignore-installed',\n '--cache-dir',\n self.project.pip_cache_path,\n '.',\n cwd=self.checkout_path,\n bin_path=self.venv_bin()\n )\n else:\n self.build_env.run(\n 'python',\n 'setup.py',\n 'install',\n '--force',\n cwd=self.checkout_path,\n bin_path=self.venv_bin()\n )\n\n def venv_bin(self, filename=None):\n \"\"\"Return path to the virtualenv bin path, or a specific binary\n\n :param filename: If specified, add this filename to the path return\n :returns: Path to virtualenv bin or filename in virtualenv bin\n \"\"\"\n parts = [self.venv_path(), 'bin']\n if filename is not None:\n parts.append(filename)\n return os.path.join(*parts)\n\n\nclass Virtualenv(PythonEnvironment):\n\n def venv_path(self):\n return os.path.join(self.project.doc_path, 'envs', self.version.slug)\n\n def setup_base(self):\n site_packages = '--no-site-packages'\n if self.config.use_system_site_packages:\n site_packages = '--system-site-packages'\n env_path = self.venv_path()\n self.build_env.run(\n self.config.python_interpreter,\n '-mvirtualenv',\n site_packages,\n env_path,\n bin_path=None, # Don't use virtualenv bin that doesn't exist yet\n )\n\n def install_core_requirements(self):\n requirements = [\n 'sphinx==1.3.1',\n 'Pygments==2.0.2',\n 'setuptools==18.6.1',\n 'docutils==0.11',\n 'mkdocs==0.14.0',\n 'mock==1.0.1',\n 'pillow==2.6.1',\n 'readthedocs-sphinx-ext==0.5.4',\n 'sphinx-rtd-theme==0.1.9',\n 'alabaster>=0.7,<0.8,!=0.7.5',\n 'commonmark==0.5.4',\n 'recommonmark==0.1.1',\n ]\n\n cmd = [\n 'python',\n self.venv_bin(filename='pip'),\n 'install',\n '--use-wheel',\n '-U',\n '--cache-dir',\n self.project.pip_cache_path,\n ]\n if self.config.use_system_site_packages:\n # Other code expects sphinx-build to be installed inside the\n # virtualenv. Using the -I option makes sure it gets installed\n # even if it is already installed system-wide (and\n # --system-site-packages is used)\n cmd.append('-I')\n cmd.extend(requirements)\n self.build_env.run(\n *cmd,\n bin_path=self.venv_bin()\n )\n\n def install_user_requirements(self):\n requirements_file_path = self.config.requirements_file\n if not requirements_file_path:\n builder_class = get_builder_class(self.project.documentation_type)\n docs_dir = (builder_class(build_env=self.build_env, python_env=self)\n .docs_dir())\n for path in [docs_dir, '']:\n for req_file in ['pip_requirements.txt', 'requirements.txt']:\n test_path = os.path.join(self.checkout_path, path, req_file)\n if os.path.exists(test_path):\n requirements_file_path = test_path\n break\n\n if requirements_file_path:\n self.build_env.run(\n 'python',\n self.venv_bin(filename='pip'),\n 'install',\n '--exists-action=w',\n '--cache-dir',\n self.project.pip_cache_path,\n '-r{0}'.format(requirements_file_path),\n cwd=self.checkout_path,\n bin_path=self.venv_bin()\n )\n\n\nclass Conda(PythonEnvironment):\n\n def venv_path(self):\n return os.path.join(self.project.doc_path, 'conda', self.version.slug)\n\n def setup_base(self):\n conda_env_path = os.path.join(self.project.doc_path, 'conda')\n version_path = os.path.join(conda_env_path, self.version.slug)\n\n if os.path.exists(version_path):\n # Re-create conda directory each time to keep fresh state\n self._log('Removing existing conda directory')\n shutil.rmtree(version_path)\n self.build_env.run(\n 'conda',\n 'create',\n '--yes',\n '--name',\n self.version.slug,\n 'python={python_version}'.format(python_version=self.config.python_version),\n bin_path=None, # Don't use conda bin that doesn't exist yet\n )\n\n def install_core_requirements(self):\n conda_env_path = os.path.join(self.project.doc_path, 'conda')\n\n # Use conda for requirements it packages\n requirements = [\n 'sphinx==1.3.1',\n 'Pygments==2.0.2',\n 'docutils==0.11',\n 'mock==1.0.1',\n 'pillow==3.0.0',\n 'sphinx_rtd_theme==0.1.7',\n 'alabaster>=0.7,<0.8,!=0.7.5',\n ]\n\n cmd = [\n 'conda',\n 'install',\n '--yes',\n '--name',\n self.version.slug,\n ]\n cmd.extend(requirements)\n self.build_env.run(\n *cmd\n )\n\n # Install pip-only things.\n pip_requirements = [\n 'mkdocs==0.14.0',\n 'readthedocs-sphinx-ext==0.5.4',\n 'commonmark==0.5.4',\n 'recommonmark==0.1.1',\n ]\n\n pip_cmd = [\n 'python',\n self.venv_bin(filename='pip'),\n 'install',\n '-U',\n '--cache-dir',\n self.project.pip_cache_path,\n ]\n pip_cmd.extend(pip_requirements)\n self.build_env.run(\n *pip_cmd,\n bin_path=self.venv_bin()\n )\n\n def install_user_requirements(self):\n conda_env_path = os.path.join(self.project.doc_path, 'conda')\n self.build_env.run(\n 'conda',\n 'env',\n 'update',\n '--name',\n self.version.slug,\n '--file',\n self.config.conda_file,\n )\n", "path": "readthedocs/doc_builder/python_environments.py"}]}
3,520
433
gh_patches_debug_39037
rasdani/github-patches
git_diff
pypa__setuptools-1750
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `build_meta.build_sdist` should work if the destination directory already contains a tar.gz The issue is similar to #1671, see #1745 for how the issue can be resolved. </issue> <code> [start of setuptools/build_meta.py] 1 """A PEP 517 interface to setuptools 2 3 Previously, when a user or a command line tool (let's call it a "frontend") 4 needed to make a request of setuptools to take a certain action, for 5 example, generating a list of installation requirements, the frontend would 6 would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line. 7 8 PEP 517 defines a different method of interfacing with setuptools. Rather 9 than calling "setup.py" directly, the frontend should: 10 11 1. Set the current directory to the directory with a setup.py file 12 2. Import this module into a safe python interpreter (one in which 13 setuptools can potentially set global variables or crash hard). 14 3. Call one of the functions defined in PEP 517. 15 16 What each function does is defined in PEP 517. However, here is a "casual" 17 definition of the functions (this definition should not be relied on for 18 bug reports or API stability): 19 20 - `build_wheel`: build a wheel in the folder and return the basename 21 - `get_requires_for_build_wheel`: get the `setup_requires` to build 22 - `prepare_metadata_for_build_wheel`: get the `install_requires` 23 - `build_sdist`: build an sdist in the folder and return the basename 24 - `get_requires_for_build_sdist`: get the `setup_requires` to build 25 26 Again, this is not a formal definition! Just a "taste" of the module. 27 """ 28 29 import io 30 import os 31 import sys 32 import tokenize 33 import shutil 34 import contextlib 35 36 import setuptools 37 import distutils 38 from setuptools.py31compat import TemporaryDirectory 39 40 from pkg_resources import parse_requirements 41 42 __all__ = ['get_requires_for_build_sdist', 43 'get_requires_for_build_wheel', 44 'prepare_metadata_for_build_wheel', 45 'build_wheel', 46 'build_sdist', 47 '__legacy__', 48 'SetupRequirementsError'] 49 50 class SetupRequirementsError(BaseException): 51 def __init__(self, specifiers): 52 self.specifiers = specifiers 53 54 55 class Distribution(setuptools.dist.Distribution): 56 def fetch_build_eggs(self, specifiers): 57 specifier_list = list(map(str, parse_requirements(specifiers))) 58 59 raise SetupRequirementsError(specifier_list) 60 61 @classmethod 62 @contextlib.contextmanager 63 def patch(cls): 64 """ 65 Replace 66 distutils.dist.Distribution with this class 67 for the duration of this context. 68 """ 69 orig = distutils.core.Distribution 70 distutils.core.Distribution = cls 71 try: 72 yield 73 finally: 74 distutils.core.Distribution = orig 75 76 77 def _to_str(s): 78 """ 79 Convert a filename to a string (on Python 2, explicitly 80 a byte string, not Unicode) as distutils checks for the 81 exact type str. 82 """ 83 if sys.version_info[0] == 2 and not isinstance(s, str): 84 # Assume it's Unicode, as that's what the PEP says 85 # should be provided. 86 return s.encode(sys.getfilesystemencoding()) 87 return s 88 89 90 def _get_immediate_subdirectories(a_dir): 91 return [name for name in os.listdir(a_dir) 92 if os.path.isdir(os.path.join(a_dir, name))] 93 94 95 def _file_with_extension(directory, extension): 96 matching = ( 97 f for f in os.listdir(directory) 98 if f.endswith(extension) 99 ) 100 file, = matching 101 return file 102 103 104 def _open_setup_script(setup_script): 105 if not os.path.exists(setup_script): 106 # Supply a default setup.py 107 return io.StringIO(u"from setuptools import setup; setup()") 108 109 return getattr(tokenize, 'open', open)(setup_script) 110 111 112 class _BuildMetaBackend(object): 113 114 def _fix_config(self, config_settings): 115 config_settings = config_settings or {} 116 config_settings.setdefault('--global-option', []) 117 return config_settings 118 119 def _get_build_requires(self, config_settings, requirements): 120 config_settings = self._fix_config(config_settings) 121 122 sys.argv = sys.argv[:1] + ['egg_info'] + \ 123 config_settings["--global-option"] 124 try: 125 with Distribution.patch(): 126 self.run_setup() 127 except SetupRequirementsError as e: 128 requirements += e.specifiers 129 130 return requirements 131 132 def run_setup(self, setup_script='setup.py'): 133 # Note that we can reuse our build directory between calls 134 # Correctness comes first, then optimization later 135 __file__ = setup_script 136 __name__ = '__main__' 137 138 with _open_setup_script(__file__) as f: 139 code = f.read().replace(r'\r\n', r'\n') 140 141 exec(compile(code, __file__, 'exec'), locals()) 142 143 def get_requires_for_build_wheel(self, config_settings=None): 144 config_settings = self._fix_config(config_settings) 145 return self._get_build_requires(config_settings, requirements=['wheel']) 146 147 def get_requires_for_build_sdist(self, config_settings=None): 148 config_settings = self._fix_config(config_settings) 149 return self._get_build_requires(config_settings, requirements=[]) 150 151 def prepare_metadata_for_build_wheel(self, metadata_directory, 152 config_settings=None): 153 sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', 154 _to_str(metadata_directory)] 155 self.run_setup() 156 157 dist_info_directory = metadata_directory 158 while True: 159 dist_infos = [f for f in os.listdir(dist_info_directory) 160 if f.endswith('.dist-info')] 161 162 if (len(dist_infos) == 0 and 163 len(_get_immediate_subdirectories(dist_info_directory)) == 1): 164 165 dist_info_directory = os.path.join( 166 dist_info_directory, os.listdir(dist_info_directory)[0]) 167 continue 168 169 assert len(dist_infos) == 1 170 break 171 172 # PEP 517 requires that the .dist-info directory be placed in the 173 # metadata_directory. To comply, we MUST copy the directory to the root 174 if dist_info_directory != metadata_directory: 175 shutil.move( 176 os.path.join(dist_info_directory, dist_infos[0]), 177 metadata_directory) 178 shutil.rmtree(dist_info_directory, ignore_errors=True) 179 180 return dist_infos[0] 181 182 def build_wheel(self, wheel_directory, config_settings=None, 183 metadata_directory=None): 184 config_settings = self._fix_config(config_settings) 185 wheel_directory = os.path.abspath(wheel_directory) 186 187 # Build the wheel in a temporary directory, then copy to the target 188 with TemporaryDirectory(dir=wheel_directory) as tmp_dist_dir: 189 sys.argv = (sys.argv[:1] + 190 ['bdist_wheel', '--dist-dir', tmp_dist_dir] + 191 config_settings["--global-option"]) 192 self.run_setup() 193 194 wheel_basename = _file_with_extension(tmp_dist_dir, '.whl') 195 wheel_path = os.path.join(wheel_directory, wheel_basename) 196 if os.path.exists(wheel_path): 197 # os.rename will fail overwriting on non-unix env 198 os.remove(wheel_path) 199 os.rename(os.path.join(tmp_dist_dir, wheel_basename), wheel_path) 200 201 return wheel_basename 202 203 def build_sdist(self, sdist_directory, config_settings=None): 204 config_settings = self._fix_config(config_settings) 205 sdist_directory = os.path.abspath(sdist_directory) 206 sys.argv = sys.argv[:1] + ['sdist', '--formats', 'gztar'] + \ 207 config_settings["--global-option"] + \ 208 ["--dist-dir", sdist_directory] 209 self.run_setup() 210 211 return _file_with_extension(sdist_directory, '.tar.gz') 212 213 214 class _BuildMetaLegacyBackend(_BuildMetaBackend): 215 """Compatibility backend for setuptools 216 217 This is a version of setuptools.build_meta that endeavors to maintain backwards 218 compatibility with pre-PEP 517 modes of invocation. It exists as a temporary 219 bridge between the old packaging mechanism and the new packaging mechanism, 220 and will eventually be removed. 221 """ 222 def run_setup(self, setup_script='setup.py'): 223 # In order to maintain compatibility with scripts assuming that 224 # the setup.py script is in a directory on the PYTHONPATH, inject 225 # '' into sys.path. (pypa/setuptools#1642) 226 sys_path = list(sys.path) # Save the original path 227 228 script_dir = os.path.dirname(os.path.abspath(setup_script)) 229 if script_dir not in sys.path: 230 sys.path.insert(0, script_dir) 231 232 try: 233 super(_BuildMetaLegacyBackend, 234 self).run_setup(setup_script=setup_script) 235 finally: 236 # While PEP 517 frontends should be calling each hook in a fresh 237 # subprocess according to the standard (and thus it should not be 238 # strictly necessary to restore the old sys.path), we'll restore 239 # the original path so that the path manipulation does not persist 240 # within the hook after run_setup is called. 241 sys.path[:] = sys_path 242 243 # The primary backend 244 _BACKEND = _BuildMetaBackend() 245 246 get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel 247 get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist 248 prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel 249 build_wheel = _BACKEND.build_wheel 250 build_sdist = _BACKEND.build_sdist 251 252 253 # The legacy backend 254 __legacy__ = _BuildMetaLegacyBackend() 255 [end of setuptools/build_meta.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setuptools/build_meta.py b/setuptools/build_meta.py --- a/setuptools/build_meta.py +++ b/setuptools/build_meta.py @@ -38,6 +38,7 @@ from setuptools.py31compat import TemporaryDirectory from pkg_resources import parse_requirements +from pkg_resources.py31compat import makedirs __all__ = ['get_requires_for_build_sdist', 'get_requires_for_build_wheel', @@ -179,36 +180,38 @@ return dist_infos[0] - def build_wheel(self, wheel_directory, config_settings=None, - metadata_directory=None): + def _build_with_temp_dir(self, setup_command, result_extension, + result_directory, config_settings): config_settings = self._fix_config(config_settings) - wheel_directory = os.path.abspath(wheel_directory) + result_directory = os.path.abspath(result_directory) - # Build the wheel in a temporary directory, then copy to the target - with TemporaryDirectory(dir=wheel_directory) as tmp_dist_dir: - sys.argv = (sys.argv[:1] + - ['bdist_wheel', '--dist-dir', tmp_dist_dir] + + # Build in a temporary directory, then copy to the target. + makedirs(result_directory, exist_ok=True) + with TemporaryDirectory(dir=result_directory) as tmp_dist_dir: + sys.argv = (sys.argv[:1] + setup_command + + ['--dist-dir', tmp_dist_dir] + config_settings["--global-option"]) self.run_setup() - wheel_basename = _file_with_extension(tmp_dist_dir, '.whl') - wheel_path = os.path.join(wheel_directory, wheel_basename) - if os.path.exists(wheel_path): - # os.rename will fail overwriting on non-unix env - os.remove(wheel_path) - os.rename(os.path.join(tmp_dist_dir, wheel_basename), wheel_path) + result_basename = _file_with_extension(tmp_dist_dir, result_extension) + result_path = os.path.join(result_directory, result_basename) + if os.path.exists(result_path): + # os.rename will fail overwriting on non-Unix. + os.remove(result_path) + os.rename(os.path.join(tmp_dist_dir, result_basename), result_path) - return wheel_basename + return result_basename - def build_sdist(self, sdist_directory, config_settings=None): - config_settings = self._fix_config(config_settings) - sdist_directory = os.path.abspath(sdist_directory) - sys.argv = sys.argv[:1] + ['sdist', '--formats', 'gztar'] + \ - config_settings["--global-option"] + \ - ["--dist-dir", sdist_directory] - self.run_setup() - return _file_with_extension(sdist_directory, '.tar.gz') + def build_wheel(self, wheel_directory, config_settings=None, + metadata_directory=None): + return self._build_with_temp_dir(['bdist_wheel'], '.whl', + wheel_directory, config_settings) + + def build_sdist(self, sdist_directory, config_settings=None): + return self._build_with_temp_dir(['sdist', '--formats', 'gztar'], + '.tar.gz', sdist_directory, + config_settings) class _BuildMetaLegacyBackend(_BuildMetaBackend):
{"golden_diff": "diff --git a/setuptools/build_meta.py b/setuptools/build_meta.py\n--- a/setuptools/build_meta.py\n+++ b/setuptools/build_meta.py\n@@ -38,6 +38,7 @@\n from setuptools.py31compat import TemporaryDirectory\n \n from pkg_resources import parse_requirements\n+from pkg_resources.py31compat import makedirs\n \n __all__ = ['get_requires_for_build_sdist',\n 'get_requires_for_build_wheel',\n@@ -179,36 +180,38 @@\n \n return dist_infos[0]\n \n- def build_wheel(self, wheel_directory, config_settings=None,\n- metadata_directory=None):\n+ def _build_with_temp_dir(self, setup_command, result_extension,\n+ result_directory, config_settings):\n config_settings = self._fix_config(config_settings)\n- wheel_directory = os.path.abspath(wheel_directory)\n+ result_directory = os.path.abspath(result_directory)\n \n- # Build the wheel in a temporary directory, then copy to the target\n- with TemporaryDirectory(dir=wheel_directory) as tmp_dist_dir:\n- sys.argv = (sys.argv[:1] +\n- ['bdist_wheel', '--dist-dir', tmp_dist_dir] +\n+ # Build in a temporary directory, then copy to the target.\n+ makedirs(result_directory, exist_ok=True)\n+ with TemporaryDirectory(dir=result_directory) as tmp_dist_dir:\n+ sys.argv = (sys.argv[:1] + setup_command +\n+ ['--dist-dir', tmp_dist_dir] +\n config_settings[\"--global-option\"])\n self.run_setup()\n \n- wheel_basename = _file_with_extension(tmp_dist_dir, '.whl')\n- wheel_path = os.path.join(wheel_directory, wheel_basename)\n- if os.path.exists(wheel_path):\n- # os.rename will fail overwriting on non-unix env\n- os.remove(wheel_path)\n- os.rename(os.path.join(tmp_dist_dir, wheel_basename), wheel_path)\n+ result_basename = _file_with_extension(tmp_dist_dir, result_extension)\n+ result_path = os.path.join(result_directory, result_basename)\n+ if os.path.exists(result_path):\n+ # os.rename will fail overwriting on non-Unix.\n+ os.remove(result_path)\n+ os.rename(os.path.join(tmp_dist_dir, result_basename), result_path)\n \n- return wheel_basename\n+ return result_basename\n \n- def build_sdist(self, sdist_directory, config_settings=None):\n- config_settings = self._fix_config(config_settings)\n- sdist_directory = os.path.abspath(sdist_directory)\n- sys.argv = sys.argv[:1] + ['sdist', '--formats', 'gztar'] + \\\n- config_settings[\"--global-option\"] + \\\n- [\"--dist-dir\", sdist_directory]\n- self.run_setup()\n \n- return _file_with_extension(sdist_directory, '.tar.gz')\n+ def build_wheel(self, wheel_directory, config_settings=None,\n+ metadata_directory=None):\n+ return self._build_with_temp_dir(['bdist_wheel'], '.whl',\n+ wheel_directory, config_settings)\n+\n+ def build_sdist(self, sdist_directory, config_settings=None):\n+ return self._build_with_temp_dir(['sdist', '--formats', 'gztar'],\n+ '.tar.gz', sdist_directory,\n+ config_settings)\n \n \n class _BuildMetaLegacyBackend(_BuildMetaBackend):\n", "issue": "`build_meta.build_sdist` should work if the destination directory already contains a tar.gz\nThe issue is similar to #1671, see #1745 for how the issue can be resolved.\n", "before_files": [{"content": "\"\"\"A PEP 517 interface to setuptools\n\nPreviously, when a user or a command line tool (let's call it a \"frontend\")\nneeded to make a request of setuptools to take a certain action, for\nexample, generating a list of installation requirements, the frontend would\nwould call \"setup.py egg_info\" or \"setup.py bdist_wheel\" on the command line.\n\nPEP 517 defines a different method of interfacing with setuptools. Rather\nthan calling \"setup.py\" directly, the frontend should:\n\n 1. Set the current directory to the directory with a setup.py file\n 2. Import this module into a safe python interpreter (one in which\n setuptools can potentially set global variables or crash hard).\n 3. Call one of the functions defined in PEP 517.\n\nWhat each function does is defined in PEP 517. However, here is a \"casual\"\ndefinition of the functions (this definition should not be relied on for\nbug reports or API stability):\n\n - `build_wheel`: build a wheel in the folder and return the basename\n - `get_requires_for_build_wheel`: get the `setup_requires` to build\n - `prepare_metadata_for_build_wheel`: get the `install_requires`\n - `build_sdist`: build an sdist in the folder and return the basename\n - `get_requires_for_build_sdist`: get the `setup_requires` to build\n\nAgain, this is not a formal definition! Just a \"taste\" of the module.\n\"\"\"\n\nimport io\nimport os\nimport sys\nimport tokenize\nimport shutil\nimport contextlib\n\nimport setuptools\nimport distutils\nfrom setuptools.py31compat import TemporaryDirectory\n\nfrom pkg_resources import parse_requirements\n\n__all__ = ['get_requires_for_build_sdist',\n 'get_requires_for_build_wheel',\n 'prepare_metadata_for_build_wheel',\n 'build_wheel',\n 'build_sdist',\n '__legacy__',\n 'SetupRequirementsError']\n\nclass SetupRequirementsError(BaseException):\n def __init__(self, specifiers):\n self.specifiers = specifiers\n\n\nclass Distribution(setuptools.dist.Distribution):\n def fetch_build_eggs(self, specifiers):\n specifier_list = list(map(str, parse_requirements(specifiers)))\n\n raise SetupRequirementsError(specifier_list)\n\n @classmethod\n @contextlib.contextmanager\n def patch(cls):\n \"\"\"\n Replace\n distutils.dist.Distribution with this class\n for the duration of this context.\n \"\"\"\n orig = distutils.core.Distribution\n distutils.core.Distribution = cls\n try:\n yield\n finally:\n distutils.core.Distribution = orig\n\n\ndef _to_str(s):\n \"\"\"\n Convert a filename to a string (on Python 2, explicitly\n a byte string, not Unicode) as distutils checks for the\n exact type str.\n \"\"\"\n if sys.version_info[0] == 2 and not isinstance(s, str):\n # Assume it's Unicode, as that's what the PEP says\n # should be provided.\n return s.encode(sys.getfilesystemencoding())\n return s\n\n\ndef _get_immediate_subdirectories(a_dir):\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n\n\ndef _file_with_extension(directory, extension):\n matching = (\n f for f in os.listdir(directory)\n if f.endswith(extension)\n )\n file, = matching\n return file\n\n\ndef _open_setup_script(setup_script):\n if not os.path.exists(setup_script):\n # Supply a default setup.py\n return io.StringIO(u\"from setuptools import setup; setup()\")\n\n return getattr(tokenize, 'open', open)(setup_script)\n\n\nclass _BuildMetaBackend(object):\n\n def _fix_config(self, config_settings):\n config_settings = config_settings or {}\n config_settings.setdefault('--global-option', [])\n return config_settings\n\n def _get_build_requires(self, config_settings, requirements):\n config_settings = self._fix_config(config_settings)\n\n sys.argv = sys.argv[:1] + ['egg_info'] + \\\n config_settings[\"--global-option\"]\n try:\n with Distribution.patch():\n self.run_setup()\n except SetupRequirementsError as e:\n requirements += e.specifiers\n\n return requirements\n\n def run_setup(self, setup_script='setup.py'):\n # Note that we can reuse our build directory between calls\n # Correctness comes first, then optimization later\n __file__ = setup_script\n __name__ = '__main__'\n\n with _open_setup_script(__file__) as f:\n code = f.read().replace(r'\\r\\n', r'\\n')\n\n exec(compile(code, __file__, 'exec'), locals())\n\n def get_requires_for_build_wheel(self, config_settings=None):\n config_settings = self._fix_config(config_settings)\n return self._get_build_requires(config_settings, requirements=['wheel'])\n\n def get_requires_for_build_sdist(self, config_settings=None):\n config_settings = self._fix_config(config_settings)\n return self._get_build_requires(config_settings, requirements=[])\n\n def prepare_metadata_for_build_wheel(self, metadata_directory,\n config_settings=None):\n sys.argv = sys.argv[:1] + ['dist_info', '--egg-base',\n _to_str(metadata_directory)]\n self.run_setup()\n\n dist_info_directory = metadata_directory\n while True:\n dist_infos = [f for f in os.listdir(dist_info_directory)\n if f.endswith('.dist-info')]\n\n if (len(dist_infos) == 0 and\n len(_get_immediate_subdirectories(dist_info_directory)) == 1):\n\n dist_info_directory = os.path.join(\n dist_info_directory, os.listdir(dist_info_directory)[0])\n continue\n\n assert len(dist_infos) == 1\n break\n\n # PEP 517 requires that the .dist-info directory be placed in the\n # metadata_directory. To comply, we MUST copy the directory to the root\n if dist_info_directory != metadata_directory:\n shutil.move(\n os.path.join(dist_info_directory, dist_infos[0]),\n metadata_directory)\n shutil.rmtree(dist_info_directory, ignore_errors=True)\n\n return dist_infos[0]\n\n def build_wheel(self, wheel_directory, config_settings=None,\n metadata_directory=None):\n config_settings = self._fix_config(config_settings)\n wheel_directory = os.path.abspath(wheel_directory)\n\n # Build the wheel in a temporary directory, then copy to the target\n with TemporaryDirectory(dir=wheel_directory) as tmp_dist_dir:\n sys.argv = (sys.argv[:1] +\n ['bdist_wheel', '--dist-dir', tmp_dist_dir] +\n config_settings[\"--global-option\"])\n self.run_setup()\n\n wheel_basename = _file_with_extension(tmp_dist_dir, '.whl')\n wheel_path = os.path.join(wheel_directory, wheel_basename)\n if os.path.exists(wheel_path):\n # os.rename will fail overwriting on non-unix env\n os.remove(wheel_path)\n os.rename(os.path.join(tmp_dist_dir, wheel_basename), wheel_path)\n\n return wheel_basename\n\n def build_sdist(self, sdist_directory, config_settings=None):\n config_settings = self._fix_config(config_settings)\n sdist_directory = os.path.abspath(sdist_directory)\n sys.argv = sys.argv[:1] + ['sdist', '--formats', 'gztar'] + \\\n config_settings[\"--global-option\"] + \\\n [\"--dist-dir\", sdist_directory]\n self.run_setup()\n\n return _file_with_extension(sdist_directory, '.tar.gz')\n\n\nclass _BuildMetaLegacyBackend(_BuildMetaBackend):\n \"\"\"Compatibility backend for setuptools\n\n This is a version of setuptools.build_meta that endeavors to maintain backwards\n compatibility with pre-PEP 517 modes of invocation. It exists as a temporary\n bridge between the old packaging mechanism and the new packaging mechanism,\n and will eventually be removed.\n \"\"\"\n def run_setup(self, setup_script='setup.py'):\n # In order to maintain compatibility with scripts assuming that\n # the setup.py script is in a directory on the PYTHONPATH, inject\n # '' into sys.path. (pypa/setuptools#1642)\n sys_path = list(sys.path) # Save the original path\n\n script_dir = os.path.dirname(os.path.abspath(setup_script))\n if script_dir not in sys.path:\n sys.path.insert(0, script_dir)\n\n try:\n super(_BuildMetaLegacyBackend,\n self).run_setup(setup_script=setup_script)\n finally:\n # While PEP 517 frontends should be calling each hook in a fresh\n # subprocess according to the standard (and thus it should not be\n # strictly necessary to restore the old sys.path), we'll restore\n # the original path so that the path manipulation does not persist\n # within the hook after run_setup is called.\n sys.path[:] = sys_path\n\n# The primary backend\n_BACKEND = _BuildMetaBackend()\n\nget_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel\nget_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist\nprepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel\nbuild_wheel = _BACKEND.build_wheel\nbuild_sdist = _BACKEND.build_sdist\n\n\n# The legacy backend\n__legacy__ = _BuildMetaLegacyBackend()\n", "path": "setuptools/build_meta.py"}]}
3,282
731
gh_patches_debug_2583
rasdani/github-patches
git_diff
searxng__searxng-2081
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DuckDuckGo returning "access denied" errors <!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG --> **Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG** 2023.01.06-b241015e **How did you install SearXNG?** searxng-docker **What happened?** DuckDuckGo started returning "access denied" error messages. Very similar to previous issue #1854 **How To Reproduce** Enable DuckDuckGo and try to search anything. **Expected behavior** DDG results should return and no "Access Denied" error message should be displayed. **Screenshots & Logs** Error message in question: ![image](https://user-images.githubusercontent.com/4787751/211107269-bc163fce-162a-4a6f-b43b-7dcd1dea3138.png) - Exception: searx.exceptions.SearxEngineAccessDeniedException - Parameter: HTTP error 403 - Filename: searx/search/processors/online.py:113 - Function: _send_http_request - Code: response = req(params['url'], **request_args) **Additional context** It looks like it can be fixed by adding a HTTP `Referer` header to the request. DuckDuckGo returning "access denied" errors <!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG --> **Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG** 2023.01.06-b241015e **How did you install SearXNG?** searxng-docker **What happened?** DuckDuckGo started returning "access denied" error messages. Very similar to previous issue #1854 **How To Reproduce** Enable DuckDuckGo and try to search anything. **Expected behavior** DDG results should return and no "Access Denied" error message should be displayed. **Screenshots & Logs** Error message in question: ![image](https://user-images.githubusercontent.com/4787751/211107269-bc163fce-162a-4a6f-b43b-7dcd1dea3138.png) - Exception: searx.exceptions.SearxEngineAccessDeniedException - Parameter: HTTP error 403 - Filename: searx/search/processors/online.py:113 - Function: _send_http_request - Code: response = req(params['url'], **request_args) **Additional context** It looks like it can be fixed by adding a HTTP `Referer` header to the request. </issue> <code> [start of searx/engines/duckduckgo.py] 1 # SPDX-License-Identifier: AGPL-3.0-or-later 2 # lint: pylint 3 """DuckDuckGo Lite 4 """ 5 6 from json import loads 7 8 from lxml.html import fromstring 9 10 from searx.utils import ( 11 dict_subset, 12 eval_xpath, 13 eval_xpath_getindex, 14 extract_text, 15 match_language, 16 ) 17 from searx.network import get 18 19 # about 20 about = { 21 "website": 'https://lite.duckduckgo.com/lite/', 22 "wikidata_id": 'Q12805', 23 "official_api_documentation": 'https://duckduckgo.com/api', 24 "use_official_api": False, 25 "require_api_key": False, 26 "results": 'HTML', 27 } 28 29 # engine dependent config 30 categories = ['general', 'web'] 31 paging = True 32 supported_languages_url = 'https://duckduckgo.com/util/u588.js' 33 time_range_support = True 34 send_accept_language_header = True 35 36 language_aliases = { 37 'ar-SA': 'ar-XA', 38 'es-419': 'es-XL', 39 'ja': 'jp-JP', 40 'ko': 'kr-KR', 41 'sl-SI': 'sl-SL', 42 'zh-TW': 'tzh-TW', 43 'zh-HK': 'tzh-HK', 44 } 45 46 time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'} 47 48 # search-url 49 url = 'https://lite.duckduckgo.com/lite/' 50 url_ping = 'https://duckduckgo.com/t/sl_l' 51 52 # match query's language to a region code that duckduckgo will accept 53 def get_region_code(lang, lang_list=None): 54 if lang == 'all': 55 return None 56 57 lang_code = match_language(lang, lang_list or [], language_aliases, 'wt-WT') 58 lang_parts = lang_code.split('-') 59 60 # country code goes first 61 return lang_parts[1].lower() + '-' + lang_parts[0].lower() 62 63 64 def request(query, params): 65 66 params['url'] = url 67 params['method'] = 'POST' 68 69 params['data']['q'] = query 70 71 # The API is not documented, so we do some reverse engineering and emulate 72 # what https://lite.duckduckgo.com/lite/ does when you press "next Page" 73 # link again and again .. 74 75 params['headers']['Content-Type'] = 'application/x-www-form-urlencoded' 76 77 # initial page does not have an offset 78 if params['pageno'] == 2: 79 # second page does have an offset of 30 80 offset = (params['pageno'] - 1) * 30 81 params['data']['s'] = offset 82 params['data']['dc'] = offset + 1 83 84 elif params['pageno'] > 2: 85 # third and following pages do have an offset of 30 + n*50 86 offset = 30 + (params['pageno'] - 2) * 50 87 params['data']['s'] = offset 88 params['data']['dc'] = offset + 1 89 90 # initial page does not have additional data in the input form 91 if params['pageno'] > 1: 92 # request the second page (and more pages) needs 'o' and 'api' arguments 93 params['data']['o'] = 'json' 94 params['data']['api'] = 'd.js' 95 96 # initial page does not have additional data in the input form 97 if params['pageno'] > 2: 98 # request the third page (and more pages) some more arguments 99 params['data']['nextParams'] = '' 100 params['data']['v'] = '' 101 params['data']['vqd'] = '' 102 103 region_code = get_region_code(params['language'], supported_languages) 104 if region_code: 105 params['data']['kl'] = region_code 106 params['cookies']['kl'] = region_code 107 108 params['data']['df'] = '' 109 if params['time_range'] in time_range_dict: 110 params['data']['df'] = time_range_dict[params['time_range']] 111 params['cookies']['df'] = time_range_dict[params['time_range']] 112 113 logger.debug("param data: %s", params['data']) 114 logger.debug("param cookies: %s", params['cookies']) 115 return params 116 117 118 # get response from search-request 119 def response(resp): 120 121 headers_ping = dict_subset(resp.request.headers, ['User-Agent', 'Accept-Encoding', 'Accept', 'Cookie']) 122 get(url_ping, headers=headers_ping) 123 124 if resp.status_code == 303: 125 return [] 126 127 results = [] 128 doc = fromstring(resp.text) 129 130 result_table = eval_xpath(doc, '//html/body/form/div[@class="filters"]/table') 131 if not len(result_table) >= 3: 132 # no more results 133 return [] 134 result_table = result_table[2] 135 136 tr_rows = eval_xpath(result_table, './/tr') 137 138 # In the last <tr> is the form of the 'previous/next page' links 139 tr_rows = tr_rows[:-1] 140 141 len_tr_rows = len(tr_rows) 142 offset = 0 143 144 while len_tr_rows >= offset + 4: 145 146 # assemble table rows we need to scrap 147 tr_title = tr_rows[offset] 148 tr_content = tr_rows[offset + 1] 149 offset += 4 150 151 # ignore sponsored Adds <tr class="result-sponsored"> 152 if tr_content.get('class') == 'result-sponsored': 153 continue 154 155 a_tag = eval_xpath_getindex(tr_title, './/td//a[@class="result-link"]', 0, None) 156 if a_tag is None: 157 continue 158 159 td_content = eval_xpath_getindex(tr_content, './/td[@class="result-snippet"]', 0, None) 160 if td_content is None: 161 continue 162 163 results.append( 164 { 165 'title': a_tag.text_content(), 166 'content': extract_text(td_content), 167 'url': a_tag.get('href'), 168 } 169 ) 170 171 return results 172 173 174 # get supported languages from their site 175 def _fetch_supported_languages(resp): 176 177 # response is a js file with regions as an embedded object 178 response_page = resp.text 179 response_page = response_page[response_page.find('regions:{') + 8 :] 180 response_page = response_page[: response_page.find('}') + 1] 181 182 regions_json = loads(response_page) 183 supported_languages = map((lambda x: x[3:] + '-' + x[:2].upper()), regions_json.keys()) 184 185 return list(supported_languages) 186 [end of searx/engines/duckduckgo.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py --- a/searx/engines/duckduckgo.py +++ b/searx/engines/duckduckgo.py @@ -73,6 +73,7 @@ # link again and again .. params['headers']['Content-Type'] = 'application/x-www-form-urlencoded' + params['headers']['Referer'] = 'https://lite.duckduckgo.com/' # initial page does not have an offset if params['pageno'] == 2:
{"golden_diff": "diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py\n--- a/searx/engines/duckduckgo.py\n+++ b/searx/engines/duckduckgo.py\n@@ -73,6 +73,7 @@\n # link again and again ..\n \n params['headers']['Content-Type'] = 'application/x-www-form-urlencoded'\n+ params['headers']['Referer'] = 'https://lite.duckduckgo.com/'\n \n # initial page does not have an offset\n if params['pageno'] == 2:\n", "issue": "DuckDuckGo returning \"access denied\" errors\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n2023.01.06-b241015e\r\n\r\n**How did you install SearXNG?**\r\nsearxng-docker\r\n\r\n**What happened?**\r\nDuckDuckGo started returning \"access denied\" error messages. Very similar to previous issue #1854 \r\n\r\n**How To Reproduce**\r\nEnable DuckDuckGo and try to search anything.\r\n\r\n**Expected behavior**\r\nDDG results should return and no \"Access Denied\" error message should be displayed.\r\n\r\n**Screenshots & Logs**\r\nError message in question:\r\n![image](https://user-images.githubusercontent.com/4787751/211107269-bc163fce-162a-4a6f-b43b-7dcd1dea3138.png)\r\n\r\n- Exception: searx.exceptions.SearxEngineAccessDeniedException\r\n- Parameter: HTTP error 403\r\n- Filename: searx/search/processors/online.py:113\r\n- Function: _send_http_request\r\n- Code: response = req(params['url'], **request_args)\r\n\r\n**Additional context**\r\n\r\nIt looks like it can be fixed by adding a HTTP `Referer` header to the request.\nDuckDuckGo returning \"access denied\" errors\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n2023.01.06-b241015e\r\n\r\n**How did you install SearXNG?**\r\nsearxng-docker\r\n\r\n**What happened?**\r\nDuckDuckGo started returning \"access denied\" error messages. Very similar to previous issue #1854 \r\n\r\n**How To Reproduce**\r\nEnable DuckDuckGo and try to search anything.\r\n\r\n**Expected behavior**\r\nDDG results should return and no \"Access Denied\" error message should be displayed.\r\n\r\n**Screenshots & Logs**\r\nError message in question:\r\n![image](https://user-images.githubusercontent.com/4787751/211107269-bc163fce-162a-4a6f-b43b-7dcd1dea3138.png)\r\n\r\n- Exception: searx.exceptions.SearxEngineAccessDeniedException\r\n- Parameter: HTTP error 403\r\n- Filename: searx/search/processors/online.py:113\r\n- Function: _send_http_request\r\n- Code: response = req(params['url'], **request_args)\r\n\r\n**Additional context**\r\n\r\nIt looks like it can be fixed by adding a HTTP `Referer` header to the request.\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"DuckDuckGo Lite\n\"\"\"\n\nfrom json import loads\n\nfrom lxml.html import fromstring\n\nfrom searx.utils import (\n dict_subset,\n eval_xpath,\n eval_xpath_getindex,\n extract_text,\n match_language,\n)\nfrom searx.network import get\n\n# about\nabout = {\n \"website\": 'https://lite.duckduckgo.com/lite/',\n \"wikidata_id\": 'Q12805',\n \"official_api_documentation\": 'https://duckduckgo.com/api',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\n# engine dependent config\ncategories = ['general', 'web']\npaging = True\nsupported_languages_url = 'https://duckduckgo.com/util/u588.js'\ntime_range_support = True\nsend_accept_language_header = True\n\nlanguage_aliases = {\n 'ar-SA': 'ar-XA',\n 'es-419': 'es-XL',\n 'ja': 'jp-JP',\n 'ko': 'kr-KR',\n 'sl-SI': 'sl-SL',\n 'zh-TW': 'tzh-TW',\n 'zh-HK': 'tzh-HK',\n}\n\ntime_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}\n\n# search-url\nurl = 'https://lite.duckduckgo.com/lite/'\nurl_ping = 'https://duckduckgo.com/t/sl_l'\n\n# match query's language to a region code that duckduckgo will accept\ndef get_region_code(lang, lang_list=None):\n if lang == 'all':\n return None\n\n lang_code = match_language(lang, lang_list or [], language_aliases, 'wt-WT')\n lang_parts = lang_code.split('-')\n\n # country code goes first\n return lang_parts[1].lower() + '-' + lang_parts[0].lower()\n\n\ndef request(query, params):\n\n params['url'] = url\n params['method'] = 'POST'\n\n params['data']['q'] = query\n\n # The API is not documented, so we do some reverse engineering and emulate\n # what https://lite.duckduckgo.com/lite/ does when you press \"next Page\"\n # link again and again ..\n\n params['headers']['Content-Type'] = 'application/x-www-form-urlencoded'\n\n # initial page does not have an offset\n if params['pageno'] == 2:\n # second page does have an offset of 30\n offset = (params['pageno'] - 1) * 30\n params['data']['s'] = offset\n params['data']['dc'] = offset + 1\n\n elif params['pageno'] > 2:\n # third and following pages do have an offset of 30 + n*50\n offset = 30 + (params['pageno'] - 2) * 50\n params['data']['s'] = offset\n params['data']['dc'] = offset + 1\n\n # initial page does not have additional data in the input form\n if params['pageno'] > 1:\n # request the second page (and more pages) needs 'o' and 'api' arguments\n params['data']['o'] = 'json'\n params['data']['api'] = 'd.js'\n\n # initial page does not have additional data in the input form\n if params['pageno'] > 2:\n # request the third page (and more pages) some more arguments\n params['data']['nextParams'] = ''\n params['data']['v'] = ''\n params['data']['vqd'] = ''\n\n region_code = get_region_code(params['language'], supported_languages)\n if region_code:\n params['data']['kl'] = region_code\n params['cookies']['kl'] = region_code\n\n params['data']['df'] = ''\n if params['time_range'] in time_range_dict:\n params['data']['df'] = time_range_dict[params['time_range']]\n params['cookies']['df'] = time_range_dict[params['time_range']]\n\n logger.debug(\"param data: %s\", params['data'])\n logger.debug(\"param cookies: %s\", params['cookies'])\n return params\n\n\n# get response from search-request\ndef response(resp):\n\n headers_ping = dict_subset(resp.request.headers, ['User-Agent', 'Accept-Encoding', 'Accept', 'Cookie'])\n get(url_ping, headers=headers_ping)\n\n if resp.status_code == 303:\n return []\n\n results = []\n doc = fromstring(resp.text)\n\n result_table = eval_xpath(doc, '//html/body/form/div[@class=\"filters\"]/table')\n if not len(result_table) >= 3:\n # no more results\n return []\n result_table = result_table[2]\n\n tr_rows = eval_xpath(result_table, './/tr')\n\n # In the last <tr> is the form of the 'previous/next page' links\n tr_rows = tr_rows[:-1]\n\n len_tr_rows = len(tr_rows)\n offset = 0\n\n while len_tr_rows >= offset + 4:\n\n # assemble table rows we need to scrap\n tr_title = tr_rows[offset]\n tr_content = tr_rows[offset + 1]\n offset += 4\n\n # ignore sponsored Adds <tr class=\"result-sponsored\">\n if tr_content.get('class') == 'result-sponsored':\n continue\n\n a_tag = eval_xpath_getindex(tr_title, './/td//a[@class=\"result-link\"]', 0, None)\n if a_tag is None:\n continue\n\n td_content = eval_xpath_getindex(tr_content, './/td[@class=\"result-snippet\"]', 0, None)\n if td_content is None:\n continue\n\n results.append(\n {\n 'title': a_tag.text_content(),\n 'content': extract_text(td_content),\n 'url': a_tag.get('href'),\n }\n )\n\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n\n # response is a js file with regions as an embedded object\n response_page = resp.text\n response_page = response_page[response_page.find('regions:{') + 8 :]\n response_page = response_page[: response_page.find('}') + 1]\n\n regions_json = loads(response_page)\n supported_languages = map((lambda x: x[3:] + '-' + x[:2].upper()), regions_json.keys())\n\n return list(supported_languages)\n", "path": "searx/engines/duckduckgo.py"}]}
3,126
134
gh_patches_debug_7525
rasdani/github-patches
git_diff
conda-forge__staged-recipes-261
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Latest conda-smithy is prevent poliastro feedstock creation ``` Repository registered at github, now call 'conda smithy register-ci' Making feedstock for poliastro /Users/travis/build/conda-forge/staged-recipes/recipes/poliastro has some lint: Selectors are suggested to take a " # [<selector>]" form. Traceback (most recent call last): File ".CI/create_feedstocks.py", line 93, in <module> subprocess.check_call(['conda', 'smithy', 'recipe-lint', recipe_dir]) File "/Users/travis/miniconda/lib/python3.5/subprocess.py", line 584, in check_call raise CalledProcessError(retcode, cmd) subprocess.CalledProcessError: Command '['conda', 'smithy', 'recipe-lint', '/Users/travis/build/conda-forge/staged-recipes/recipes/poliastro']' returned non-zero exit status 1 ``` I am working on that. </issue> <code> [start of .CI/create_feedstocks.py] 1 #!/usr/bin/env python 2 """ 3 Convert all recipes into feedstocks. 4 5 This script is to be run in a TravisCI context, with all secret environment variables defined (BINSTAR_TOKEN, GH_TOKEN) 6 Such as: 7 8 export GH_TOKEN=$(cat ~/.conda-smithy/github.token) 9 10 """ 11 from __future__ import print_function 12 13 from conda_smithy.github import gh_token 14 from contextlib import contextmanager 15 from github import Github, GithubException 16 import os.path 17 import shutil 18 import subprocess 19 import tempfile 20 21 22 # Enable DEBUG to run the diagnostics, without actually creating new feedstocks. 23 DEBUG = False 24 25 26 def list_recipes(): 27 recipe_directory_name = 'recipes' 28 if os.path.isdir(recipe_directory_name): 29 recipes = os.listdir(recipe_directory_name) 30 else: 31 recipes = [] 32 33 for recipe_dir in recipes: 34 # We don't list the "example" feedstock. It is an example, and is there 35 # to be helpful. 36 if recipe_dir.startswith('example'): 37 continue 38 path = os.path.abspath(os.path.join(recipe_directory_name, recipe_dir)) 39 yield path, recipe_dir 40 41 42 @contextmanager 43 def tmp_dir(*args, **kwargs): 44 temp_dir = tempfile.mkdtemp(*args, **kwargs) 45 try: 46 yield temp_dir 47 finally: 48 shutil.rmtree(temp_dir) 49 50 51 def repo_exists(organization, name): 52 token = gh_token() 53 gh = Github(token) 54 # Use the organization provided. 55 org = gh.get_organization(organization) 56 try: 57 org.get_repo(name) 58 return True 59 except GithubException as e: 60 if e.status == 404: 61 return False 62 raise 63 64 65 if __name__ == '__main__': 66 is_merged_pr = (os.environ.get('TRAVIS_BRANCH') == 'master' and os.environ.get('TRAVIS_PULL_REQUEST') == 'false') 67 68 smithy_conf = os.path.expanduser('~/.conda-smithy') 69 if not os.path.exists(smithy_conf): 70 os.mkdir(smithy_conf) 71 72 def write_token(name, token): 73 with open(os.path.join(smithy_conf, name + '.token'), 'w') as fh: 74 fh.write(token) 75 if 'APPVEYOR_TOKEN' in os.environ: 76 write_token('appveyor', os.environ['APPVEYOR_TOKEN']) 77 if 'CIRCLE_TOKEN' in os.environ: 78 write_token('circle', os.environ['CIRCLE_TOKEN']) 79 if 'GH_TOKEN' in os.environ: 80 write_token('github', os.environ['GH_TOKEN']) 81 82 owner_info = ['--organization', 'conda-forge'] 83 84 print('Calculating the recipes which need to be turned into feedstocks.') 85 removed_recipes = [] 86 with tmp_dir('__feedstocks') as feedstocks_dir: 87 feedstock_dirs = [] 88 for recipe_dir, name in list_recipes(): 89 feedstock_dir = os.path.join(feedstocks_dir, name + '-feedstock') 90 os.mkdir(feedstock_dir) 91 print('Making feedstock for {}'.format(name)) 92 93 subprocess.check_call(['conda', 'smithy', 'recipe-lint', recipe_dir]) 94 95 subprocess.check_call(['conda', 'smithy', 'init', recipe_dir, 96 '--feedstock-directory', feedstock_dir]) 97 if not is_merged_pr: 98 # We just want to check that conda-smithy is doing its thing without having any metadata issues. 99 continue 100 101 feedstock_dirs.append([feedstock_dir, name, recipe_dir]) 102 103 subprocess.check_call(['git', 'remote', 'add', 'upstream_with_token', 104 'https://conda-forge-admin:{}@github.com/conda-forge/{}'.format(os.environ['GH_TOKEN'], 105 os.path.basename(feedstock_dir))], 106 cwd=feedstock_dir) 107 108 # Sometimes we already have the feedstock created. We need to deal with that case. 109 if repo_exists('conda-forge', os.path.basename(feedstock_dir)): 110 subprocess.check_call(['git', 'fetch', 'upstream_with_token'], cwd=feedstock_dir) 111 subprocess.check_call(['git', 'branch', '-m', 'master', 'old'], cwd=feedstock_dir) 112 try: 113 subprocess.check_call(['git', 'checkout', '-b', 'master', 'upstream_with_token/master'], cwd=feedstock_dir) 114 except subprocess.CalledProcessError: 115 # Sometimes, we have a repo, but there are no commits on it! Just catch that case. 116 subprocess.check_call(['git', 'checkout', '-b' 'master'], cwd=feedstock_dir) 117 else: 118 subprocess.check_call(['conda', 'smithy', 'register-github', feedstock_dir] + owner_info) 119 120 # Break the previous loop to allow the TravisCI registering to take place only once per function call. 121 # Without this, intermittent failiures to synch the TravisCI repos ensue. 122 for feedstock_dir, name, recipe_dir in feedstock_dirs: 123 subprocess.check_call(['conda', 'smithy', 'register-ci', '--feedstock_directory', feedstock_dir] + owner_info) 124 125 subprocess.check_call(['conda', 'smithy', 'rerender'], cwd=feedstock_dir) 126 subprocess.check_call(['git', 'commit', '-am', "Re-render the feedstock after CI registration."], cwd=feedstock_dir) 127 # Capture the output, as it may contain the GH_TOKEN. 128 out = subprocess.check_output(['git', 'push', 'upstream_with_token', 'master'], cwd=feedstock_dir, 129 stderr=subprocess.STDOUT) 130 131 # Remove this recipe from the repo. 132 removed_recipes.append(name) 133 if is_merged_pr: 134 subprocess.check_call(['git', 'rm', '-r', recipe_dir]) 135 136 # Commit any removed packages. 137 subprocess.check_call(['git', 'status']) 138 if removed_recipes: 139 subprocess.check_call(['git', 'checkout', os.environ.get('TRAVIS_BRANCH')]) 140 msg = ('Removed recipe{s} ({}) after converting into feedstock{s}. ' 141 '[ci skip]'.format(', '.join(removed_recipes), 142 s=('s' if len(removed_recipes) > 1 else ''))) 143 if is_merged_pr: 144 # Capture the output, as it may contain the GH_TOKEN. 145 out = subprocess.check_output(['git', 'remote', 'add', 'upstream_with_token', 146 'https://conda-forge-admin:{}@github.com/conda-forge/staged-recipes'.format(os.environ['GH_TOKEN'])], 147 stderr=subprocess.STDOUT) 148 subprocess.check_call(['git', 'commit', '-m', msg]) 149 # Capture the output, as it may contain the GH_TOKEN. 150 out = subprocess.check_output(['git', 'push', 'upstream_with_token', os.environ.get('TRAVIS_BRANCH')], 151 stderr=subprocess.STDOUT) 152 else: 153 print('Would git commit, with the following message: \n {}'.format(msg)) 154 [end of .CI/create_feedstocks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/.CI/create_feedstocks.py b/.CI/create_feedstocks.py --- a/.CI/create_feedstocks.py +++ b/.CI/create_feedstocks.py @@ -90,8 +90,6 @@ os.mkdir(feedstock_dir) print('Making feedstock for {}'.format(name)) - subprocess.check_call(['conda', 'smithy', 'recipe-lint', recipe_dir]) - subprocess.check_call(['conda', 'smithy', 'init', recipe_dir, '--feedstock-directory', feedstock_dir]) if not is_merged_pr:
{"golden_diff": "diff --git a/.CI/create_feedstocks.py b/.CI/create_feedstocks.py\n--- a/.CI/create_feedstocks.py\n+++ b/.CI/create_feedstocks.py\n@@ -90,8 +90,6 @@\n os.mkdir(feedstock_dir)\n print('Making feedstock for {}'.format(name))\n \n- subprocess.check_call(['conda', 'smithy', 'recipe-lint', recipe_dir])\n-\n subprocess.check_call(['conda', 'smithy', 'init', recipe_dir,\n '--feedstock-directory', feedstock_dir])\n if not is_merged_pr:\n", "issue": "Latest conda-smithy is prevent poliastro feedstock creation\n```\nRepository registered at github, now call 'conda smithy register-ci'\nMaking feedstock for poliastro\n/Users/travis/build/conda-forge/staged-recipes/recipes/poliastro has some lint:\n Selectors are suggested to take a \" # [<selector>]\" form.\nTraceback (most recent call last):\n File \".CI/create_feedstocks.py\", line 93, in <module>\n subprocess.check_call(['conda', 'smithy', 'recipe-lint', recipe_dir])\n File \"/Users/travis/miniconda/lib/python3.5/subprocess.py\", line 584, in check_call\n raise CalledProcessError(retcode, cmd)\nsubprocess.CalledProcessError: Command '['conda', 'smithy', 'recipe-lint', '/Users/travis/build/conda-forge/staged-recipes/recipes/poliastro']' returned non-zero exit status 1\n```\n\nI am working on that.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nConvert all recipes into feedstocks.\n\nThis script is to be run in a TravisCI context, with all secret environment variables defined (BINSTAR_TOKEN, GH_TOKEN)\nSuch as:\n\n export GH_TOKEN=$(cat ~/.conda-smithy/github.token)\n\n\"\"\"\nfrom __future__ import print_function\n\nfrom conda_smithy.github import gh_token\nfrom contextlib import contextmanager\nfrom github import Github, GithubException\nimport os.path\nimport shutil\nimport subprocess\nimport tempfile\n\n\n# Enable DEBUG to run the diagnostics, without actually creating new feedstocks.\nDEBUG = False\n\n\ndef list_recipes():\n recipe_directory_name = 'recipes'\n if os.path.isdir(recipe_directory_name):\n recipes = os.listdir(recipe_directory_name)\n else:\n recipes = []\n\n for recipe_dir in recipes:\n # We don't list the \"example\" feedstock. It is an example, and is there\n # to be helpful.\n if recipe_dir.startswith('example'):\n continue\n path = os.path.abspath(os.path.join(recipe_directory_name, recipe_dir))\n yield path, recipe_dir\n\n\n@contextmanager\ndef tmp_dir(*args, **kwargs):\n temp_dir = tempfile.mkdtemp(*args, **kwargs)\n try:\n yield temp_dir\n finally:\n shutil.rmtree(temp_dir)\n\n\ndef repo_exists(organization, name):\n token = gh_token()\n gh = Github(token)\n # Use the organization provided.\n org = gh.get_organization(organization)\n try:\n org.get_repo(name)\n return True\n except GithubException as e:\n if e.status == 404:\n return False\n raise\n\n\nif __name__ == '__main__':\n is_merged_pr = (os.environ.get('TRAVIS_BRANCH') == 'master' and os.environ.get('TRAVIS_PULL_REQUEST') == 'false')\n\n smithy_conf = os.path.expanduser('~/.conda-smithy')\n if not os.path.exists(smithy_conf):\n os.mkdir(smithy_conf)\n\n def write_token(name, token):\n with open(os.path.join(smithy_conf, name + '.token'), 'w') as fh:\n fh.write(token)\n if 'APPVEYOR_TOKEN' in os.environ:\n write_token('appveyor', os.environ['APPVEYOR_TOKEN'])\n if 'CIRCLE_TOKEN' in os.environ:\n write_token('circle', os.environ['CIRCLE_TOKEN'])\n if 'GH_TOKEN' in os.environ:\n write_token('github', os.environ['GH_TOKEN'])\n\n owner_info = ['--organization', 'conda-forge']\n\n print('Calculating the recipes which need to be turned into feedstocks.')\n removed_recipes = []\n with tmp_dir('__feedstocks') as feedstocks_dir:\n feedstock_dirs = []\n for recipe_dir, name in list_recipes():\n feedstock_dir = os.path.join(feedstocks_dir, name + '-feedstock')\n os.mkdir(feedstock_dir)\n print('Making feedstock for {}'.format(name))\n\n subprocess.check_call(['conda', 'smithy', 'recipe-lint', recipe_dir])\n\n subprocess.check_call(['conda', 'smithy', 'init', recipe_dir,\n '--feedstock-directory', feedstock_dir])\n if not is_merged_pr:\n # We just want to check that conda-smithy is doing its thing without having any metadata issues.\n continue\n\n feedstock_dirs.append([feedstock_dir, name, recipe_dir])\n\n subprocess.check_call(['git', 'remote', 'add', 'upstream_with_token',\n 'https://conda-forge-admin:{}@github.com/conda-forge/{}'.format(os.environ['GH_TOKEN'],\n os.path.basename(feedstock_dir))],\n cwd=feedstock_dir)\n\n # Sometimes we already have the feedstock created. We need to deal with that case.\n if repo_exists('conda-forge', os.path.basename(feedstock_dir)):\n subprocess.check_call(['git', 'fetch', 'upstream_with_token'], cwd=feedstock_dir)\n subprocess.check_call(['git', 'branch', '-m', 'master', 'old'], cwd=feedstock_dir)\n try:\n subprocess.check_call(['git', 'checkout', '-b', 'master', 'upstream_with_token/master'], cwd=feedstock_dir)\n except subprocess.CalledProcessError:\n # Sometimes, we have a repo, but there are no commits on it! Just catch that case.\n subprocess.check_call(['git', 'checkout', '-b' 'master'], cwd=feedstock_dir)\n else:\n subprocess.check_call(['conda', 'smithy', 'register-github', feedstock_dir] + owner_info)\n\n # Break the previous loop to allow the TravisCI registering to take place only once per function call.\n # Without this, intermittent failiures to synch the TravisCI repos ensue.\n for feedstock_dir, name, recipe_dir in feedstock_dirs:\n subprocess.check_call(['conda', 'smithy', 'register-ci', '--feedstock_directory', feedstock_dir] + owner_info)\n\n subprocess.check_call(['conda', 'smithy', 'rerender'], cwd=feedstock_dir)\n subprocess.check_call(['git', 'commit', '-am', \"Re-render the feedstock after CI registration.\"], cwd=feedstock_dir)\n # Capture the output, as it may contain the GH_TOKEN.\n out = subprocess.check_output(['git', 'push', 'upstream_with_token', 'master'], cwd=feedstock_dir,\n stderr=subprocess.STDOUT)\n\n # Remove this recipe from the repo.\n removed_recipes.append(name)\n if is_merged_pr:\n subprocess.check_call(['git', 'rm', '-r', recipe_dir])\n\n # Commit any removed packages.\n subprocess.check_call(['git', 'status'])\n if removed_recipes:\n subprocess.check_call(['git', 'checkout', os.environ.get('TRAVIS_BRANCH')])\n msg = ('Removed recipe{s} ({}) after converting into feedstock{s}. '\n '[ci skip]'.format(', '.join(removed_recipes),\n s=('s' if len(removed_recipes) > 1 else '')))\n if is_merged_pr:\n # Capture the output, as it may contain the GH_TOKEN.\n out = subprocess.check_output(['git', 'remote', 'add', 'upstream_with_token',\n 'https://conda-forge-admin:{}@github.com/conda-forge/staged-recipes'.format(os.environ['GH_TOKEN'])],\n stderr=subprocess.STDOUT)\n subprocess.check_call(['git', 'commit', '-m', msg])\n # Capture the output, as it may contain the GH_TOKEN.\n out = subprocess.check_output(['git', 'push', 'upstream_with_token', os.environ.get('TRAVIS_BRANCH')],\n stderr=subprocess.STDOUT)\n else:\n print('Would git commit, with the following message: \\n {}'.format(msg))\n", "path": ".CI/create_feedstocks.py"}]}
2,591
122
gh_patches_debug_4516
rasdani/github-patches
git_diff
Parsl__parsl-1650
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [PBSPro] max_blocks limit is not obeyed when submitting as array-jobs **Describe the bug** Parsl keeps launching new blocks beyond the `max_blocks` limit when array-jobs mode is enabled using Parsl config `scheduler_options` parameter (e.g. #PBS -J 1-10). **To Reproduce** Enable Parsl monitoring and checkpointing Enable HighThroughputExecutor and PBSProProvider Add `#PBS -J 1-10` option to `scheduler_options` Set `max_blocks` limit to 3 **Expected behavior** No more than 3 blocks should be launched. **Actual behavior** Parsl keeps on launching new blocks. Following polling log can be seen `2020-04-23 14:41:33.575 parsl.dataflow.strategy:205 [DEBUG] Executor htex_array_jobs has 2739 active tasks, 0/4 running/pending blocks, and 60 connected workers` This means Parsl does not consider partially activated blocks (array-jobs with some jobs Running others in Queue status out of total array jobs in that block) when making a launch decision. It seems that conditional check is done here [1] but I couldn't find a place where the `provisioned_blocks` variable is updated. Could you shed some light on how this is updated? **Environment** - OS: RHEL6.1 - Python version: 3.7 (Anaconda 4.8.2) - Parsl version: master branch commit: a30ce173cf8593a34b81d5a9cdd646dcf63fa798 **Distributed Environment** - PBS Pro in NSCC's ASPIRE1 [1] https://github.com/Parsl/parsl/blob/master/parsl/providers/pbspro/pbspro.py#L109 </issue> <code> [start of parsl/providers/torque/torque.py] 1 import logging 2 import os 3 import time 4 5 from parsl.channels import LocalChannel 6 from parsl.launchers import AprunLauncher 7 from parsl.providers.provider_base import JobState, JobStatus 8 from parsl.providers.torque.template import template_string 9 from parsl.providers.cluster_provider import ClusterProvider 10 from parsl.utils import RepresentationMixin 11 12 logger = logging.getLogger(__name__) 13 14 # From the man pages for qstat for PBS/Torque systems 15 translate_table = { 16 'R': JobState.RUNNING, 17 'C': JobState.COMPLETED, # Completed after having run 18 'E': JobState.COMPLETED, # Exiting after having run 19 'H': JobState.HELD, # Held 20 'Q': JobState.PENDING, # Queued, and eligible to run 21 'W': JobState.PENDING, # Job is waiting for it's execution time (-a option) to be reached 22 'S': JobState.HELD 23 } # Suspended 24 25 26 class TorqueProvider(ClusterProvider, RepresentationMixin): 27 """Torque Execution Provider 28 29 This provider uses sbatch to submit, squeue for status, and scancel to cancel 30 jobs. The sbatch script to be used is created from a template file in this 31 same module. 32 33 Parameters 34 ---------- 35 channel : Channel 36 Channel for accessing this provider. Possible channels include 37 :class:`~parsl.channels.LocalChannel` (the default), 38 :class:`~parsl.channels.SSHChannel`, or 39 :class:`~parsl.channels.SSHInteractiveLoginChannel`. 40 account : str 41 Account the job will be charged against. 42 queue : str 43 Torque queue to request blocks from. 44 nodes_per_block : int 45 Nodes to provision per block. 46 init_blocks : int 47 Number of blocks to provision at the start of the run. Default is 1. 48 min_blocks : int 49 Minimum number of blocks to maintain. Default is 0. 50 max_blocks : int 51 Maximum number of blocks to maintain. 52 parallelism : float 53 Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive 54 scaling where as many resources as possible are used; parallelism close to 0 represents 55 the opposite situation in which as few resources as possible (i.e., min_blocks) are used. 56 walltime : str 57 Walltime requested per block in HH:MM:SS. 58 scheduler_options : str 59 String to prepend to the #PBS blocks in the submit script to the scheduler. 60 worker_init : str 61 Command to be run before starting a worker, such as 'module load Anaconda; source activate env'. 62 launcher : Launcher 63 Launcher for this provider. Possible launchers include 64 :class:`~parsl.launchers.AprunLauncher` (the default), or 65 :class:`~parsl.launchers.SingleNodeLauncher`, 66 67 """ 68 def __init__(self, 69 channel=LocalChannel(), 70 account=None, 71 queue=None, 72 scheduler_options='', 73 worker_init='', 74 nodes_per_block=1, 75 init_blocks=1, 76 min_blocks=0, 77 max_blocks=100, 78 parallelism=1, 79 launcher=AprunLauncher(), 80 walltime="00:20:00", 81 cmd_timeout=120): 82 label = 'torque' 83 super().__init__(label, 84 channel, 85 nodes_per_block, 86 init_blocks, 87 min_blocks, 88 max_blocks, 89 parallelism, 90 walltime, 91 launcher, 92 cmd_timeout=cmd_timeout) 93 94 self.account = account 95 self.queue = queue 96 self.scheduler_options = scheduler_options 97 self.worker_init = worker_init 98 self.provisioned_blocks = 0 99 self.template_string = template_string 100 101 # Dictionary that keeps track of jobs, keyed on job_id 102 self.resources = {} 103 104 def _status(self): 105 ''' Internal: Do not call. Returns the status list for a list of job_ids 106 107 Args: 108 self 109 110 Returns: 111 [status...] : Status list of all jobs 112 ''' 113 114 job_ids = list(self.resources.keys()) 115 job_id_list = ' '.join(self.resources.keys()) 116 117 jobs_missing = list(self.resources.keys()) 118 119 retcode, stdout, stderr = self.execute_wait("qstat {0}".format(job_id_list)) 120 for line in stdout.split('\n'): 121 parts = line.split() 122 if not parts or parts[0].upper().startswith('JOB') or parts[0].startswith('---'): 123 continue 124 job_id = parts[0] # likely truncated 125 for long_job_id in job_ids: 126 if long_job_id.startswith(job_id): 127 logger.debug('coerced job_id %s -> %s', job_id, long_job_id) 128 job_id = long_job_id 129 break 130 state = translate_table.get(parts[4], JobState.UNKNOWN) 131 self.resources[job_id]['status'] = JobStatus(state) 132 jobs_missing.remove(job_id) 133 134 # squeue does not report on jobs that are not running. So we are filling in the 135 # blanks for missing jobs, we might lose some information about why the jobs failed. 136 for missing_job in jobs_missing: 137 self.resources[missing_job]['status'] = JobStatus(JobState.COMPLETED) 138 139 def submit(self, command, tasks_per_node, job_name="parsl.torque"): 140 ''' Submits the command onto an Local Resource Manager job. 141 Submit returns an ID that corresponds to the task that was just submitted. 142 143 If tasks_per_node < 1 : ! This is illegal. tasks_per_node should be integer 144 145 If tasks_per_node == 1: 146 A single node is provisioned 147 148 If tasks_per_node > 1 : 149 tasks_per_node number of nodes are provisioned. 150 151 Args: 152 - command :(String) Commandline invocation to be made on the remote side. 153 - tasks_per_node (int) : command invocations to be launched per node 154 155 Kwargs: 156 - job_name (String): Name for job, must be unique 157 158 Returns: 159 - None: At capacity, cannot provision more 160 - job_id: (string) Identifier for the job 161 162 ''' 163 164 if self.provisioned_blocks >= self.max_blocks: 165 logger.warning("[%s] at capacity, cannot add more blocks now", self.label) 166 return None 167 168 # Set job name 169 job_name = "parsl.{0}.{1}".format(job_name, time.time()) 170 171 # Set script path 172 script_path = "{0}/{1}.submit".format(self.script_dir, job_name) 173 script_path = os.path.abspath(script_path) 174 175 logger.debug("Requesting nodes_per_block:%s tasks_per_node:%s", self.nodes_per_block, 176 tasks_per_node) 177 178 job_config = {} 179 # TODO : script_path might need to change to accommodate script dir set via channels 180 job_config["submit_script_dir"] = self.channel.script_dir 181 job_config["nodes"] = self.nodes_per_block 182 job_config["task_blocks"] = self.nodes_per_block * tasks_per_node 183 job_config["nodes_per_block"] = self.nodes_per_block 184 job_config["tasks_per_node"] = tasks_per_node 185 job_config["walltime"] = self.walltime 186 job_config["scheduler_options"] = self.scheduler_options 187 job_config["worker_init"] = self.worker_init 188 job_config["user_script"] = command 189 190 # Wrap the command 191 job_config["user_script"] = self.launcher(command, 192 tasks_per_node, 193 self.nodes_per_block) 194 195 logger.debug("Writing submit script") 196 self._write_submit_script(self.template_string, script_path, job_name, job_config) 197 198 channel_script_path = self.channel.push_file(script_path, self.channel.script_dir) 199 200 submit_options = '' 201 if self.queue is not None: 202 submit_options = '{0} -q {1}'.format(submit_options, self.queue) 203 if self.account is not None: 204 submit_options = '{0} -A {1}'.format(submit_options, self.account) 205 206 launch_cmd = "qsub {0} {1}".format(submit_options, channel_script_path) 207 retcode, stdout, stderr = self.execute_wait(launch_cmd) 208 209 job_id = None 210 if retcode == 0: 211 for line in stdout.split('\n'): 212 if line.strip(): 213 job_id = line.strip() 214 self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)} 215 else: 216 message = "Command '{}' failed with return code {}".format(launch_cmd, retcode) 217 if (stdout is not None) and (stderr is not None): 218 message += "\nstderr:{}\nstdout{}".format(stderr.strip(), stdout.strip()) 219 logger.error(message) 220 221 return job_id 222 223 def cancel(self, job_ids): 224 ''' Cancels the jobs specified by a list of job ids 225 226 Args: 227 job_ids : [<job_id> ...] 228 229 Returns : 230 [True/False...] : If the cancel operation fails the entire list will be False. 231 ''' 232 233 job_id_list = ' '.join(job_ids) 234 retcode, stdout, stderr = self.execute_wait("qdel {0}".format(job_id_list)) 235 rets = None 236 if retcode == 0: 237 for jid in job_ids: 238 self.resources[jid]['status'] = JobStatus(JobState.COMPLETED) # Setting state to exiting 239 rets = [True for i in job_ids] 240 else: 241 rets = [False for i in job_ids] 242 243 return rets 244 245 @property 246 def status_polling_interval(self): 247 return 60 248 249 250 if __name__ == "__main__": 251 252 print("None") 253 [end of parsl/providers/torque/torque.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/parsl/providers/torque/torque.py b/parsl/providers/torque/torque.py --- a/parsl/providers/torque/torque.py +++ b/parsl/providers/torque/torque.py @@ -13,6 +13,7 @@ # From the man pages for qstat for PBS/Torque systems translate_table = { + 'B': JobState.RUNNING, # This state is returned for running array jobs 'R': JobState.RUNNING, 'C': JobState.COMPLETED, # Completed after having run 'E': JobState.COMPLETED, # Exiting after having run
{"golden_diff": "diff --git a/parsl/providers/torque/torque.py b/parsl/providers/torque/torque.py\n--- a/parsl/providers/torque/torque.py\n+++ b/parsl/providers/torque/torque.py\n@@ -13,6 +13,7 @@\n \n # From the man pages for qstat for PBS/Torque systems\n translate_table = {\n+ 'B': JobState.RUNNING, # This state is returned for running array jobs\n 'R': JobState.RUNNING,\n 'C': JobState.COMPLETED, # Completed after having run\n 'E': JobState.COMPLETED, # Exiting after having run\n", "issue": "[PBSPro] max_blocks limit is not obeyed when submitting as array-jobs \n**Describe the bug**\r\nParsl keeps launching new blocks beyond the `max_blocks` limit when array-jobs mode is enabled using Parsl config `scheduler_options` parameter (e.g. #PBS -J 1-10).\r\n\r\n**To Reproduce**\r\nEnable Parsl monitoring and checkpointing\r\nEnable HighThroughputExecutor and PBSProProvider\r\nAdd `#PBS -J 1-10` option to `scheduler_options`\r\nSet `max_blocks` limit to 3\r\n\r\n**Expected behavior**\r\nNo more than 3 blocks should be launched.\r\n\r\n**Actual behavior**\r\nParsl keeps on launching new blocks. Following polling log can be seen\r\n`2020-04-23 14:41:33.575 parsl.dataflow.strategy:205 [DEBUG] Executor htex_array_jobs has 2739 active tasks, 0/4 running/pending blocks, and 60 connected workers`\r\n\r\nThis means Parsl does not consider partially activated blocks (array-jobs with some jobs Running others in Queue status out of total array jobs in that block) when making a launch decision.\r\nIt seems that conditional check is done here [1] but I couldn't find a place where the `provisioned_blocks` variable is updated. Could you shed some light on how this is updated?\r\n\r\n**Environment**\r\n- OS: RHEL6.1\r\n- Python version: 3.7 (Anaconda 4.8.2)\r\n- Parsl version: master branch commit: a30ce173cf8593a34b81d5a9cdd646dcf63fa798\r\n\r\n**Distributed Environment**\r\n- PBS Pro in NSCC's ASPIRE1\r\n\r\n[1] https://github.com/Parsl/parsl/blob/master/parsl/providers/pbspro/pbspro.py#L109\n", "before_files": [{"content": "import logging\nimport os\nimport time\n\nfrom parsl.channels import LocalChannel\nfrom parsl.launchers import AprunLauncher\nfrom parsl.providers.provider_base import JobState, JobStatus\nfrom parsl.providers.torque.template import template_string\nfrom parsl.providers.cluster_provider import ClusterProvider\nfrom parsl.utils import RepresentationMixin\n\nlogger = logging.getLogger(__name__)\n\n# From the man pages for qstat for PBS/Torque systems\ntranslate_table = {\n 'R': JobState.RUNNING,\n 'C': JobState.COMPLETED, # Completed after having run\n 'E': JobState.COMPLETED, # Exiting after having run\n 'H': JobState.HELD, # Held\n 'Q': JobState.PENDING, # Queued, and eligible to run\n 'W': JobState.PENDING, # Job is waiting for it's execution time (-a option) to be reached\n 'S': JobState.HELD\n} # Suspended\n\n\nclass TorqueProvider(ClusterProvider, RepresentationMixin):\n \"\"\"Torque Execution Provider\n\n This provider uses sbatch to submit, squeue for status, and scancel to cancel\n jobs. The sbatch script to be used is created from a template file in this\n same module.\n\n Parameters\n ----------\n channel : Channel\n Channel for accessing this provider. Possible channels include\n :class:`~parsl.channels.LocalChannel` (the default),\n :class:`~parsl.channels.SSHChannel`, or\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n account : str\n Account the job will be charged against.\n queue : str\n Torque queue to request blocks from.\n nodes_per_block : int\n Nodes to provision per block.\n init_blocks : int\n Number of blocks to provision at the start of the run. Default is 1.\n min_blocks : int\n Minimum number of blocks to maintain. Default is 0.\n max_blocks : int\n Maximum number of blocks to maintain.\n parallelism : float\n Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive\n scaling where as many resources as possible are used; parallelism close to 0 represents\n the opposite situation in which as few resources as possible (i.e., min_blocks) are used.\n walltime : str\n Walltime requested per block in HH:MM:SS.\n scheduler_options : str\n String to prepend to the #PBS blocks in the submit script to the scheduler.\n worker_init : str\n Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\n launcher : Launcher\n Launcher for this provider. Possible launchers include\n :class:`~parsl.launchers.AprunLauncher` (the default), or\n :class:`~parsl.launchers.SingleNodeLauncher`,\n\n \"\"\"\n def __init__(self,\n channel=LocalChannel(),\n account=None,\n queue=None,\n scheduler_options='',\n worker_init='',\n nodes_per_block=1,\n init_blocks=1,\n min_blocks=0,\n max_blocks=100,\n parallelism=1,\n launcher=AprunLauncher(),\n walltime=\"00:20:00\",\n cmd_timeout=120):\n label = 'torque'\n super().__init__(label,\n channel,\n nodes_per_block,\n init_blocks,\n min_blocks,\n max_blocks,\n parallelism,\n walltime,\n launcher,\n cmd_timeout=cmd_timeout)\n\n self.account = account\n self.queue = queue\n self.scheduler_options = scheduler_options\n self.worker_init = worker_init\n self.provisioned_blocks = 0\n self.template_string = template_string\n\n # Dictionary that keeps track of jobs, keyed on job_id\n self.resources = {}\n\n def _status(self):\n ''' Internal: Do not call. Returns the status list for a list of job_ids\n\n Args:\n self\n\n Returns:\n [status...] : Status list of all jobs\n '''\n\n job_ids = list(self.resources.keys())\n job_id_list = ' '.join(self.resources.keys())\n\n jobs_missing = list(self.resources.keys())\n\n retcode, stdout, stderr = self.execute_wait(\"qstat {0}\".format(job_id_list))\n for line in stdout.split('\\n'):\n parts = line.split()\n if not parts or parts[0].upper().startswith('JOB') or parts[0].startswith('---'):\n continue\n job_id = parts[0] # likely truncated\n for long_job_id in job_ids:\n if long_job_id.startswith(job_id):\n logger.debug('coerced job_id %s -> %s', job_id, long_job_id)\n job_id = long_job_id\n break\n state = translate_table.get(parts[4], JobState.UNKNOWN)\n self.resources[job_id]['status'] = JobStatus(state)\n jobs_missing.remove(job_id)\n\n # squeue does not report on jobs that are not running. So we are filling in the\n # blanks for missing jobs, we might lose some information about why the jobs failed.\n for missing_job in jobs_missing:\n self.resources[missing_job]['status'] = JobStatus(JobState.COMPLETED)\n\n def submit(self, command, tasks_per_node, job_name=\"parsl.torque\"):\n ''' Submits the command onto an Local Resource Manager job.\n Submit returns an ID that corresponds to the task that was just submitted.\n\n If tasks_per_node < 1 : ! This is illegal. tasks_per_node should be integer\n\n If tasks_per_node == 1:\n A single node is provisioned\n\n If tasks_per_node > 1 :\n tasks_per_node number of nodes are provisioned.\n\n Args:\n - command :(String) Commandline invocation to be made on the remote side.\n - tasks_per_node (int) : command invocations to be launched per node\n\n Kwargs:\n - job_name (String): Name for job, must be unique\n\n Returns:\n - None: At capacity, cannot provision more\n - job_id: (string) Identifier for the job\n\n '''\n\n if self.provisioned_blocks >= self.max_blocks:\n logger.warning(\"[%s] at capacity, cannot add more blocks now\", self.label)\n return None\n\n # Set job name\n job_name = \"parsl.{0}.{1}\".format(job_name, time.time())\n\n # Set script path\n script_path = \"{0}/{1}.submit\".format(self.script_dir, job_name)\n script_path = os.path.abspath(script_path)\n\n logger.debug(\"Requesting nodes_per_block:%s tasks_per_node:%s\", self.nodes_per_block,\n tasks_per_node)\n\n job_config = {}\n # TODO : script_path might need to change to accommodate script dir set via channels\n job_config[\"submit_script_dir\"] = self.channel.script_dir\n job_config[\"nodes\"] = self.nodes_per_block\n job_config[\"task_blocks\"] = self.nodes_per_block * tasks_per_node\n job_config[\"nodes_per_block\"] = self.nodes_per_block\n job_config[\"tasks_per_node\"] = tasks_per_node\n job_config[\"walltime\"] = self.walltime\n job_config[\"scheduler_options\"] = self.scheduler_options\n job_config[\"worker_init\"] = self.worker_init\n job_config[\"user_script\"] = command\n\n # Wrap the command\n job_config[\"user_script\"] = self.launcher(command,\n tasks_per_node,\n self.nodes_per_block)\n\n logger.debug(\"Writing submit script\")\n self._write_submit_script(self.template_string, script_path, job_name, job_config)\n\n channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)\n\n submit_options = ''\n if self.queue is not None:\n submit_options = '{0} -q {1}'.format(submit_options, self.queue)\n if self.account is not None:\n submit_options = '{0} -A {1}'.format(submit_options, self.account)\n\n launch_cmd = \"qsub {0} {1}\".format(submit_options, channel_script_path)\n retcode, stdout, stderr = self.execute_wait(launch_cmd)\n\n job_id = None\n if retcode == 0:\n for line in stdout.split('\\n'):\n if line.strip():\n job_id = line.strip()\n self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}\n else:\n message = \"Command '{}' failed with return code {}\".format(launch_cmd, retcode)\n if (stdout is not None) and (stderr is not None):\n message += \"\\nstderr:{}\\nstdout{}\".format(stderr.strip(), stdout.strip())\n logger.error(message)\n\n return job_id\n\n def cancel(self, job_ids):\n ''' Cancels the jobs specified by a list of job ids\n\n Args:\n job_ids : [<job_id> ...]\n\n Returns :\n [True/False...] : If the cancel operation fails the entire list will be False.\n '''\n\n job_id_list = ' '.join(job_ids)\n retcode, stdout, stderr = self.execute_wait(\"qdel {0}\".format(job_id_list))\n rets = None\n if retcode == 0:\n for jid in job_ids:\n self.resources[jid]['status'] = JobStatus(JobState.COMPLETED) # Setting state to exiting\n rets = [True for i in job_ids]\n else:\n rets = [False for i in job_ids]\n\n return rets\n\n @property\n def status_polling_interval(self):\n return 60\n\n\nif __name__ == \"__main__\":\n\n print(\"None\")\n", "path": "parsl/providers/torque/torque.py"}]}
3,740
141
gh_patches_debug_2565
rasdani/github-patches
git_diff
ibis-project__ibis-1951
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Implement Interval arithmetic on one or more backends After subtraction is in from #1489, we'll want to implement this on at least one backend. </issue> <code> [start of ibis/pandas/execution/temporal.py] 1 import datetime 2 3 import numpy as np 4 import pandas as pd 5 from pandas.core.groupby import SeriesGroupBy 6 7 import ibis 8 import ibis.expr.datatypes as dt 9 import ibis.expr.operations as ops 10 from ibis.pandas.core import ( 11 date_types, 12 integer_types, 13 numeric_types, 14 timedelta_types, 15 timestamp_types, 16 ) 17 from ibis.pandas.dispatch import execute_node, pre_execute 18 19 20 @execute_node.register(ops.Strftime, pd.Timestamp, str) 21 def execute_strftime_timestamp_str(op, data, format_string, **kwargs): 22 return data.strftime(format_string) 23 24 25 @execute_node.register(ops.Strftime, pd.Series, str) 26 def execute_strftime_series_str(op, data, format_string, **kwargs): 27 return data.dt.strftime(format_string) 28 29 30 @execute_node.register(ops.ExtractTemporalField, pd.Timestamp) 31 def execute_extract_timestamp_field_timestamp(op, data, **kwargs): 32 field_name = type(op).__name__.lower().replace('extract', '') 33 return getattr(data, field_name) 34 35 36 @execute_node.register(ops.ExtractMillisecond, pd.Timestamp) 37 def execute_extract_millisecond_timestamp(op, data, **kwargs): 38 return int(data.microsecond // 1000.0) 39 40 41 @execute_node.register(ops.ExtractTemporalField, pd.Series) 42 def execute_extract_timestamp_field_series(op, data, **kwargs): 43 field_name = type(op).__name__.lower().replace('extract', '') 44 return getattr(data.dt, field_name).astype(np.int32) 45 46 47 @execute_node.register( 48 ops.BetweenTime, 49 pd.Series, 50 (pd.Series, str, datetime.time), 51 (pd.Series, str, datetime.time), 52 ) 53 def execute_between_time(op, data, lower, upper, **kwargs): 54 indexer = pd.DatetimeIndex(data).indexer_between_time(lower, upper) 55 result = np.zeros(len(data), dtype=np.bool_) 56 result[indexer] = True 57 return pd.Series(result) 58 59 60 @execute_node.register(ops.Date, pd.Series) 61 def execute_timestamp_date(op, data, **kwargs): 62 return data.dt.floor('d') 63 64 65 @execute_node.register((ops.TimestampTruncate, ops.DateTruncate), pd.Series) 66 def execute_timestamp_truncate(op, data, **kwargs): 67 dtype = 'datetime64[{}]'.format(op.unit) 68 array = data.values.astype(dtype) 69 return pd.Series(array, name=data.name) 70 71 72 OFFSET_CLASS = { 73 "Y": pd.offsets.DateOffset, 74 "Q": pd.offsets.DateOffset, 75 "M": pd.offsets.DateOffset, 76 "W": pd.offsets.DateOffset, 77 # all other units are timedelta64s 78 } 79 80 81 @execute_node.register(ops.IntervalFromInteger, pd.Series) 82 def execute_interval_from_integer_series(op, data, **kwargs): 83 unit = op.unit 84 resolution = "{}s".format(op.resolution) 85 cls = OFFSET_CLASS.get(unit, None) 86 87 # fast path for timedelta conversion 88 if cls is None: 89 return data.astype("timedelta64[{}]".format(unit)) 90 return data.apply( 91 lambda n, cls=cls, resolution=resolution: cls(**{resolution: n}) 92 ) 93 94 95 @execute_node.register(ops.IntervalFromInteger, integer_types) 96 def execute_interval_from_integer_integer_types(op, data, **kwargs): 97 unit = op.unit 98 resolution = "{}s".format(op.resolution) 99 cls = OFFSET_CLASS.get(unit, None) 100 101 if cls is None: 102 return pd.Timedelta(data, unit=unit) 103 return cls(**{resolution: data}) 104 105 106 @execute_node.register(ops.Cast, pd.Series, dt.Interval) 107 def execute_cast_integer_to_interval_series(op, data, type, **kwargs): 108 to = op.to 109 unit = to.unit 110 resolution = "{}s".format(to.resolution) 111 cls = OFFSET_CLASS.get(unit, None) 112 113 if cls is None: 114 return data.astype("timedelta64[{}]".format(unit)) 115 return data.apply( 116 lambda n, cls=cls, resolution=resolution: cls(**{resolution: n}) 117 ) 118 119 120 @execute_node.register(ops.Cast, integer_types, dt.Interval) 121 def execute_cast_integer_to_interval_integer_types(op, data, type, **kwargs): 122 to = op.to 123 unit = to.unit 124 resolution = "{}s".format(to.resolution) 125 cls = OFFSET_CLASS.get(unit, None) 126 127 if cls is None: 128 return pd.Timedelta(data, unit=unit) 129 return cls(**{resolution: data}) 130 131 132 @execute_node.register(ops.TimestampAdd, timestamp_types, timedelta_types) 133 def execute_timestamp_add_datetime_timedelta(op, left, right, **kwargs): 134 return pd.Timestamp(left) + pd.Timedelta(right) 135 136 137 @execute_node.register(ops.TimestampAdd, timestamp_types, pd.Series) 138 def execute_timestamp_add_datetime_series(op, left, right, **kwargs): 139 return pd.Timestamp(left) + right 140 141 142 @execute_node.register(ops.IntervalAdd, timedelta_types, timedelta_types) 143 def execute_interval_add_delta_delta(op, left, right, **kwargs): 144 return op.op(pd.Timedelta(left), pd.Timedelta(right)) 145 146 147 @execute_node.register(ops.IntervalAdd, timedelta_types, pd.Series) 148 @execute_node.register( 149 ops.IntervalMultiply, timedelta_types, numeric_types + (pd.Series,) 150 ) 151 def execute_interval_add_multiply_delta_series(op, left, right, **kwargs): 152 return op.op(pd.Timedelta(left), right) 153 154 155 @execute_node.register( 156 (ops.TimestampAdd, ops.IntervalAdd), pd.Series, timedelta_types 157 ) 158 def execute_timestamp_interval_add_series_delta(op, left, right, **kwargs): 159 return left + pd.Timedelta(right) 160 161 162 @execute_node.register( 163 (ops.TimestampAdd, ops.IntervalAdd), pd.Series, pd.Series 164 ) 165 def execute_timestamp_interval_add_series_series(op, left, right, **kwargs): 166 return left + right 167 168 169 @execute_node.register(ops.TimestampSub, timestamp_types, timedelta_types) 170 def execute_timestamp_sub_datetime_timedelta(op, left, right, **kwargs): 171 return pd.Timestamp(left) - pd.Timedelta(right) 172 173 174 @execute_node.register( 175 (ops.TimestampDiff, ops.TimestampSub), timestamp_types, pd.Series 176 ) 177 def execute_timestamp_diff_sub_datetime_series(op, left, right, **kwargs): 178 return pd.Timestamp(left) - right 179 180 181 @execute_node.register(ops.TimestampSub, pd.Series, timedelta_types) 182 def execute_timestamp_sub_series_timedelta(op, left, right, **kwargs): 183 return left - pd.Timedelta(right) 184 185 186 @execute_node.register( 187 (ops.TimestampDiff, ops.TimestampSub), pd.Series, pd.Series 188 ) 189 def execute_timestamp_diff_sub_series_series(op, left, right, **kwargs): 190 return left - right 191 192 193 @execute_node.register(ops.TimestampDiff, timestamp_types, timestamp_types) 194 def execute_timestamp_diff_datetime_datetime(op, left, right, **kwargs): 195 return pd.Timestamp(left) - pd.Timestamp(right) 196 197 198 @execute_node.register(ops.TimestampDiff, pd.Series, timestamp_types) 199 def execute_timestamp_diff_series_datetime(op, left, right, **kwargs): 200 return left - pd.Timestamp(right) 201 202 203 @execute_node.register( 204 ops.IntervalMultiply, pd.Series, numeric_types + (pd.Series,) 205 ) 206 @execute_node.register( 207 ops.IntervalFloorDivide, 208 (pd.Timedelta, pd.Series), 209 numeric_types + (pd.Series,), 210 ) 211 def execute_interval_multiply_fdiv_series_numeric(op, left, right, **kwargs): 212 return op.op(left, right) 213 214 215 @execute_node.register(ops.TimestampFromUNIX, (pd.Series,) + integer_types) 216 def execute_timestamp_from_unix(op, data, **kwargs): 217 return pd.to_datetime(data, unit=op.unit) 218 219 220 @pre_execute.register(ops.TimestampNow) 221 @pre_execute.register(ops.TimestampNow, ibis.client.Client) 222 def pre_execute_timestamp_now(op, *args, **kwargs): 223 return {op: pd.Timestamp('now')} 224 225 226 @execute_node.register(ops.DayOfWeekIndex, (str, datetime.date)) 227 def execute_day_of_week_index_any(op, value, **kwargs): 228 return pd.Timestamp(value).dayofweek 229 230 231 @execute_node.register(ops.DayOfWeekIndex, pd.Series) 232 def execute_day_of_week_index_series(op, data, **kwargs): 233 return data.dt.dayofweek.astype(np.int16) 234 235 236 @execute_node.register(ops.DayOfWeekIndex, SeriesGroupBy) 237 def execute_day_of_week_index_series_group_by(op, data, **kwargs): 238 groupings = data.grouper.groupings 239 return data.obj.dt.dayofweek.astype(np.int16).groupby(groupings) 240 241 242 def day_name(obj): 243 """Backwards compatible name of day getting function. 244 245 Parameters 246 ---------- 247 obj : Union[Series, pd.Timestamp] 248 249 Returns 250 ------- 251 str 252 The name of the day corresponding to `obj` 253 """ 254 try: 255 return obj.day_name() 256 except AttributeError: 257 return obj.weekday_name 258 259 260 @execute_node.register(ops.DayOfWeekName, (str, datetime.date)) 261 def execute_day_of_week_name_any(op, value, **kwargs): 262 return day_name(pd.Timestamp(value)) 263 264 265 @execute_node.register(ops.DayOfWeekName, pd.Series) 266 def execute_day_of_week_name_series(op, data, **kwargs): 267 return day_name(data.dt) 268 269 270 @execute_node.register(ops.DayOfWeekName, SeriesGroupBy) 271 def execute_day_of_week_name_series_group_by(op, data, **kwargs): 272 return day_name(data.obj.dt).groupby(data.grouper.groupings) 273 274 275 @execute_node.register(ops.DateSub, date_types, timedelta_types) 276 @execute_node.register((ops.DateDiff, ops.DateSub), date_types, pd.Series) 277 @execute_node.register(ops.DateSub, pd.Series, timedelta_types) 278 @execute_node.register((ops.DateDiff, ops.DateSub), pd.Series, pd.Series) 279 @execute_node.register(ops.DateDiff, date_types, date_types) 280 @execute_node.register(ops.DateDiff, pd.Series, date_types) 281 def execute_date_sub_diff(op, left, right, **kwargs): 282 return left - right 283 284 285 @execute_node.register(ops.DateAdd, pd.Series, timedelta_types) 286 @execute_node.register(ops.DateAdd, timedelta_types, pd.Series) 287 @execute_node.register(ops.DateAdd, pd.Series, pd.Series) 288 @execute_node.register(ops.DateAdd, date_types, timedelta_types) 289 @execute_node.register(ops.DateAdd, timedelta_types, date_types) 290 @execute_node.register(ops.DateAdd, date_types, pd.Series) 291 @execute_node.register(ops.DateAdd, pd.Series, date_types) 292 def execute_date_add(op, left, right, **kwargs): 293 return left + right 294 [end of ibis/pandas/execution/temporal.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ibis/pandas/execution/temporal.py b/ibis/pandas/execution/temporal.py --- a/ibis/pandas/execution/temporal.py +++ b/ibis/pandas/execution/temporal.py @@ -184,7 +184,9 @@ @execute_node.register( - (ops.TimestampDiff, ops.TimestampSub), pd.Series, pd.Series + (ops.TimestampDiff, ops.TimestampSub, ops.IntervalSubtract), + pd.Series, + pd.Series ) def execute_timestamp_diff_sub_series_series(op, left, right, **kwargs): return left - right
{"golden_diff": "diff --git a/ibis/pandas/execution/temporal.py b/ibis/pandas/execution/temporal.py\n--- a/ibis/pandas/execution/temporal.py\n+++ b/ibis/pandas/execution/temporal.py\n@@ -184,7 +184,9 @@\n \n \n @execute_node.register(\n- (ops.TimestampDiff, ops.TimestampSub), pd.Series, pd.Series\n+ (ops.TimestampDiff, ops.TimestampSub, ops.IntervalSubtract),\n+ pd.Series,\n+ pd.Series\n )\n def execute_timestamp_diff_sub_series_series(op, left, right, **kwargs):\n return left - right\n", "issue": "Implement Interval arithmetic on one or more backends\nAfter subtraction is in from #1489, we'll want to implement this on at least one backend.\n", "before_files": [{"content": "import datetime\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.core.groupby import SeriesGroupBy\n\nimport ibis\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nfrom ibis.pandas.core import (\n date_types,\n integer_types,\n numeric_types,\n timedelta_types,\n timestamp_types,\n)\nfrom ibis.pandas.dispatch import execute_node, pre_execute\n\n\n@execute_node.register(ops.Strftime, pd.Timestamp, str)\ndef execute_strftime_timestamp_str(op, data, format_string, **kwargs):\n return data.strftime(format_string)\n\n\n@execute_node.register(ops.Strftime, pd.Series, str)\ndef execute_strftime_series_str(op, data, format_string, **kwargs):\n return data.dt.strftime(format_string)\n\n\n@execute_node.register(ops.ExtractTemporalField, pd.Timestamp)\ndef execute_extract_timestamp_field_timestamp(op, data, **kwargs):\n field_name = type(op).__name__.lower().replace('extract', '')\n return getattr(data, field_name)\n\n\n@execute_node.register(ops.ExtractMillisecond, pd.Timestamp)\ndef execute_extract_millisecond_timestamp(op, data, **kwargs):\n return int(data.microsecond // 1000.0)\n\n\n@execute_node.register(ops.ExtractTemporalField, pd.Series)\ndef execute_extract_timestamp_field_series(op, data, **kwargs):\n field_name = type(op).__name__.lower().replace('extract', '')\n return getattr(data.dt, field_name).astype(np.int32)\n\n\n@execute_node.register(\n ops.BetweenTime,\n pd.Series,\n (pd.Series, str, datetime.time),\n (pd.Series, str, datetime.time),\n)\ndef execute_between_time(op, data, lower, upper, **kwargs):\n indexer = pd.DatetimeIndex(data).indexer_between_time(lower, upper)\n result = np.zeros(len(data), dtype=np.bool_)\n result[indexer] = True\n return pd.Series(result)\n\n\n@execute_node.register(ops.Date, pd.Series)\ndef execute_timestamp_date(op, data, **kwargs):\n return data.dt.floor('d')\n\n\n@execute_node.register((ops.TimestampTruncate, ops.DateTruncate), pd.Series)\ndef execute_timestamp_truncate(op, data, **kwargs):\n dtype = 'datetime64[{}]'.format(op.unit)\n array = data.values.astype(dtype)\n return pd.Series(array, name=data.name)\n\n\nOFFSET_CLASS = {\n \"Y\": pd.offsets.DateOffset,\n \"Q\": pd.offsets.DateOffset,\n \"M\": pd.offsets.DateOffset,\n \"W\": pd.offsets.DateOffset,\n # all other units are timedelta64s\n}\n\n\n@execute_node.register(ops.IntervalFromInteger, pd.Series)\ndef execute_interval_from_integer_series(op, data, **kwargs):\n unit = op.unit\n resolution = \"{}s\".format(op.resolution)\n cls = OFFSET_CLASS.get(unit, None)\n\n # fast path for timedelta conversion\n if cls is None:\n return data.astype(\"timedelta64[{}]\".format(unit))\n return data.apply(\n lambda n, cls=cls, resolution=resolution: cls(**{resolution: n})\n )\n\n\n@execute_node.register(ops.IntervalFromInteger, integer_types)\ndef execute_interval_from_integer_integer_types(op, data, **kwargs):\n unit = op.unit\n resolution = \"{}s\".format(op.resolution)\n cls = OFFSET_CLASS.get(unit, None)\n\n if cls is None:\n return pd.Timedelta(data, unit=unit)\n return cls(**{resolution: data})\n\n\n@execute_node.register(ops.Cast, pd.Series, dt.Interval)\ndef execute_cast_integer_to_interval_series(op, data, type, **kwargs):\n to = op.to\n unit = to.unit\n resolution = \"{}s\".format(to.resolution)\n cls = OFFSET_CLASS.get(unit, None)\n\n if cls is None:\n return data.astype(\"timedelta64[{}]\".format(unit))\n return data.apply(\n lambda n, cls=cls, resolution=resolution: cls(**{resolution: n})\n )\n\n\n@execute_node.register(ops.Cast, integer_types, dt.Interval)\ndef execute_cast_integer_to_interval_integer_types(op, data, type, **kwargs):\n to = op.to\n unit = to.unit\n resolution = \"{}s\".format(to.resolution)\n cls = OFFSET_CLASS.get(unit, None)\n\n if cls is None:\n return pd.Timedelta(data, unit=unit)\n return cls(**{resolution: data})\n\n\n@execute_node.register(ops.TimestampAdd, timestamp_types, timedelta_types)\ndef execute_timestamp_add_datetime_timedelta(op, left, right, **kwargs):\n return pd.Timestamp(left) + pd.Timedelta(right)\n\n\n@execute_node.register(ops.TimestampAdd, timestamp_types, pd.Series)\ndef execute_timestamp_add_datetime_series(op, left, right, **kwargs):\n return pd.Timestamp(left) + right\n\n\n@execute_node.register(ops.IntervalAdd, timedelta_types, timedelta_types)\ndef execute_interval_add_delta_delta(op, left, right, **kwargs):\n return op.op(pd.Timedelta(left), pd.Timedelta(right))\n\n\n@execute_node.register(ops.IntervalAdd, timedelta_types, pd.Series)\n@execute_node.register(\n ops.IntervalMultiply, timedelta_types, numeric_types + (pd.Series,)\n)\ndef execute_interval_add_multiply_delta_series(op, left, right, **kwargs):\n return op.op(pd.Timedelta(left), right)\n\n\n@execute_node.register(\n (ops.TimestampAdd, ops.IntervalAdd), pd.Series, timedelta_types\n)\ndef execute_timestamp_interval_add_series_delta(op, left, right, **kwargs):\n return left + pd.Timedelta(right)\n\n\n@execute_node.register(\n (ops.TimestampAdd, ops.IntervalAdd), pd.Series, pd.Series\n)\ndef execute_timestamp_interval_add_series_series(op, left, right, **kwargs):\n return left + right\n\n\n@execute_node.register(ops.TimestampSub, timestamp_types, timedelta_types)\ndef execute_timestamp_sub_datetime_timedelta(op, left, right, **kwargs):\n return pd.Timestamp(left) - pd.Timedelta(right)\n\n\n@execute_node.register(\n (ops.TimestampDiff, ops.TimestampSub), timestamp_types, pd.Series\n)\ndef execute_timestamp_diff_sub_datetime_series(op, left, right, **kwargs):\n return pd.Timestamp(left) - right\n\n\n@execute_node.register(ops.TimestampSub, pd.Series, timedelta_types)\ndef execute_timestamp_sub_series_timedelta(op, left, right, **kwargs):\n return left - pd.Timedelta(right)\n\n\n@execute_node.register(\n (ops.TimestampDiff, ops.TimestampSub), pd.Series, pd.Series\n)\ndef execute_timestamp_diff_sub_series_series(op, left, right, **kwargs):\n return left - right\n\n\n@execute_node.register(ops.TimestampDiff, timestamp_types, timestamp_types)\ndef execute_timestamp_diff_datetime_datetime(op, left, right, **kwargs):\n return pd.Timestamp(left) - pd.Timestamp(right)\n\n\n@execute_node.register(ops.TimestampDiff, pd.Series, timestamp_types)\ndef execute_timestamp_diff_series_datetime(op, left, right, **kwargs):\n return left - pd.Timestamp(right)\n\n\n@execute_node.register(\n ops.IntervalMultiply, pd.Series, numeric_types + (pd.Series,)\n)\n@execute_node.register(\n ops.IntervalFloorDivide,\n (pd.Timedelta, pd.Series),\n numeric_types + (pd.Series,),\n)\ndef execute_interval_multiply_fdiv_series_numeric(op, left, right, **kwargs):\n return op.op(left, right)\n\n\n@execute_node.register(ops.TimestampFromUNIX, (pd.Series,) + integer_types)\ndef execute_timestamp_from_unix(op, data, **kwargs):\n return pd.to_datetime(data, unit=op.unit)\n\n\n@pre_execute.register(ops.TimestampNow)\n@pre_execute.register(ops.TimestampNow, ibis.client.Client)\ndef pre_execute_timestamp_now(op, *args, **kwargs):\n return {op: pd.Timestamp('now')}\n\n\n@execute_node.register(ops.DayOfWeekIndex, (str, datetime.date))\ndef execute_day_of_week_index_any(op, value, **kwargs):\n return pd.Timestamp(value).dayofweek\n\n\n@execute_node.register(ops.DayOfWeekIndex, pd.Series)\ndef execute_day_of_week_index_series(op, data, **kwargs):\n return data.dt.dayofweek.astype(np.int16)\n\n\n@execute_node.register(ops.DayOfWeekIndex, SeriesGroupBy)\ndef execute_day_of_week_index_series_group_by(op, data, **kwargs):\n groupings = data.grouper.groupings\n return data.obj.dt.dayofweek.astype(np.int16).groupby(groupings)\n\n\ndef day_name(obj):\n \"\"\"Backwards compatible name of day getting function.\n\n Parameters\n ----------\n obj : Union[Series, pd.Timestamp]\n\n Returns\n -------\n str\n The name of the day corresponding to `obj`\n \"\"\"\n try:\n return obj.day_name()\n except AttributeError:\n return obj.weekday_name\n\n\n@execute_node.register(ops.DayOfWeekName, (str, datetime.date))\ndef execute_day_of_week_name_any(op, value, **kwargs):\n return day_name(pd.Timestamp(value))\n\n\n@execute_node.register(ops.DayOfWeekName, pd.Series)\ndef execute_day_of_week_name_series(op, data, **kwargs):\n return day_name(data.dt)\n\n\n@execute_node.register(ops.DayOfWeekName, SeriesGroupBy)\ndef execute_day_of_week_name_series_group_by(op, data, **kwargs):\n return day_name(data.obj.dt).groupby(data.grouper.groupings)\n\n\n@execute_node.register(ops.DateSub, date_types, timedelta_types)\n@execute_node.register((ops.DateDiff, ops.DateSub), date_types, pd.Series)\n@execute_node.register(ops.DateSub, pd.Series, timedelta_types)\n@execute_node.register((ops.DateDiff, ops.DateSub), pd.Series, pd.Series)\n@execute_node.register(ops.DateDiff, date_types, date_types)\n@execute_node.register(ops.DateDiff, pd.Series, date_types)\ndef execute_date_sub_diff(op, left, right, **kwargs):\n return left - right\n\n\n@execute_node.register(ops.DateAdd, pd.Series, timedelta_types)\n@execute_node.register(ops.DateAdd, timedelta_types, pd.Series)\n@execute_node.register(ops.DateAdd, pd.Series, pd.Series)\n@execute_node.register(ops.DateAdd, date_types, timedelta_types)\n@execute_node.register(ops.DateAdd, timedelta_types, date_types)\n@execute_node.register(ops.DateAdd, date_types, pd.Series)\n@execute_node.register(ops.DateAdd, pd.Series, date_types)\ndef execute_date_add(op, left, right, **kwargs):\n return left + right\n", "path": "ibis/pandas/execution/temporal.py"}]}
3,649
138
gh_patches_debug_41854
rasdani/github-patches
git_diff
iterative__dvc-3020
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> import: Handle non-DVC Git repositories After https://github.com/iterative/dvc/pull/2889, `dvc import` can also import files that are tracked by Git but not DVC. DVC still requires that they come from a DVC repository rather than any Git repository, although there is no longer need for that. </issue> <code> [start of dvc/external_repo.py] 1 import os 2 import tempfile 3 from contextlib import contextmanager 4 from distutils.dir_util import copy_tree 5 6 from funcy import retry 7 8 from dvc.config import NoRemoteError, ConfigError 9 from dvc.exceptions import NoRemoteInExternalRepoError 10 from dvc.remote import RemoteConfig 11 from dvc.exceptions import NoOutputInExternalRepoError 12 from dvc.exceptions import OutputNotFoundError 13 from dvc.utils.fs import remove 14 15 16 REPO_CACHE = {} 17 18 19 @contextmanager 20 def external_repo(url=None, rev=None, rev_lock=None, cache_dir=None): 21 from dvc.repo import Repo 22 23 path = _external_repo(url=url, rev=rev_lock or rev, cache_dir=cache_dir) 24 repo = Repo(path) 25 try: 26 yield repo 27 except NoRemoteError: 28 raise NoRemoteInExternalRepoError(url) 29 except OutputNotFoundError as exc: 30 if exc.repo is repo: 31 raise NoOutputInExternalRepoError(exc.output, repo.root_dir, url) 32 raise 33 repo.close() 34 35 36 def _external_repo(url=None, rev=None, cache_dir=None): 37 from dvc.config import Config 38 from dvc.cache import CacheConfig 39 from dvc.repo import Repo 40 41 key = (url, rev, cache_dir) 42 if key in REPO_CACHE: 43 return REPO_CACHE[key] 44 45 new_path = tempfile.mkdtemp("dvc-erepo") 46 47 # Copy and adjust existing clone 48 if (url, None, None) in REPO_CACHE: 49 old_path = REPO_CACHE[url, None, None] 50 51 # This one unlike shutil.copytree() works with an existing dir 52 copy_tree(old_path, new_path) 53 else: 54 # Create a new clone 55 _clone_repo(url, new_path) 56 57 # Save clean clone dir so that we will have access to a default branch 58 clean_clone_path = tempfile.mkdtemp("dvc-erepo") 59 copy_tree(new_path, clean_clone_path) 60 REPO_CACHE[url, None, None] = clean_clone_path 61 62 # Adjust new clone/copy to fit rev and cache_dir 63 64 # Checkout needs to be done first because current branch might not be 65 # DVC repository 66 if rev is not None: 67 _git_checkout(new_path, rev) 68 69 repo = Repo(new_path) 70 try: 71 # check if the URL is local and no default remote is present 72 # add default remote pointing to the original repo's cache location 73 if os.path.isdir(url): 74 rconfig = RemoteConfig(repo.config) 75 if not _default_remote_set(rconfig): 76 original_repo = Repo(url) 77 try: 78 rconfig.add( 79 "auto-generated-upstream", 80 original_repo.cache.local.cache_dir, 81 default=True, 82 level=Config.LEVEL_LOCAL, 83 ) 84 finally: 85 original_repo.close() 86 87 if cache_dir is not None: 88 cache_config = CacheConfig(repo.config) 89 cache_config.set_dir(cache_dir, level=Config.LEVEL_LOCAL) 90 finally: 91 # Need to close/reopen repo to force config reread 92 repo.close() 93 94 REPO_CACHE[key] = new_path 95 return new_path 96 97 98 def _git_checkout(repo_path, revision): 99 from dvc.scm import Git 100 101 git = Git(repo_path) 102 try: 103 git.checkout(revision) 104 finally: 105 git.close() 106 107 108 def clean_repos(): 109 # Outside code should not see cache while we are removing 110 repo_paths = list(REPO_CACHE.values()) 111 REPO_CACHE.clear() 112 113 for path in repo_paths: 114 _remove(path) 115 116 117 def _remove(path): 118 if os.name == "nt": 119 # git.exe may hang for a while not permitting to remove temp dir 120 os_retry = retry(5, errors=OSError, timeout=0.1) 121 os_retry(remove)(path) 122 else: 123 remove(path) 124 125 126 def _clone_repo(url, path): 127 from dvc.scm.git import Git 128 129 git = Git.clone(url, path) 130 git.close() 131 132 133 def _default_remote_set(rconfig): 134 """ 135 Checks if default remote config is present. 136 Args: 137 rconfig: a remote config 138 139 Returns: 140 True if the default remote config is set, else False 141 """ 142 try: 143 rconfig.get_default() 144 return True 145 except ConfigError: 146 return False 147 [end of dvc/external_repo.py] [start of dvc/dependency/repo.py] 1 import copy 2 import os 3 from contextlib import contextmanager 4 from dvc.utils.compat import FileNotFoundError 5 6 from funcy import merge 7 8 from .local import DependencyLOCAL 9 from dvc.external_repo import external_repo 10 from dvc.exceptions import OutputNotFoundError 11 from dvc.exceptions import PathMissingError 12 from dvc.utils.fs import fs_copy 13 14 15 class DependencyREPO(DependencyLOCAL): 16 PARAM_REPO = "repo" 17 PARAM_URL = "url" 18 PARAM_REV = "rev" 19 PARAM_REV_LOCK = "rev_lock" 20 21 REPO_SCHEMA = {PARAM_URL: str, PARAM_REV: str, PARAM_REV_LOCK: str} 22 23 def __init__(self, def_repo, stage, *args, **kwargs): 24 self.def_repo = def_repo 25 super(DependencyREPO, self).__init__(stage, *args, **kwargs) 26 27 def _parse_path(self, remote, path): 28 return None 29 30 @property 31 def is_in_repo(self): 32 return False 33 34 @property 35 def repo_pair(self): 36 d = self.def_repo 37 return d[self.PARAM_URL], d[self.PARAM_REV_LOCK] or d[self.PARAM_REV] 38 39 def __str__(self): 40 return "{} ({})".format(self.def_path, self.def_repo[self.PARAM_URL]) 41 42 @contextmanager 43 def _make_repo(self, **overrides): 44 with external_repo(**merge(self.def_repo, overrides)) as repo: 45 yield repo 46 47 def status(self): 48 with self._make_repo() as repo: 49 current = repo.find_out_by_relpath(self.def_path).info 50 51 with self._make_repo(rev_lock=None) as repo: 52 updated = repo.find_out_by_relpath(self.def_path).info 53 54 if current != updated: 55 return {str(self): "update available"} 56 57 return {} 58 59 def save(self): 60 pass 61 62 def dumpd(self): 63 return {self.PARAM_PATH: self.def_path, self.PARAM_REPO: self.def_repo} 64 65 def fetch(self): 66 with self._make_repo( 67 cache_dir=self.repo.cache.local.cache_dir 68 ) as repo: 69 self.def_repo[self.PARAM_REV_LOCK] = repo.scm.get_rev() 70 71 out = repo.find_out_by_relpath(self.def_path) 72 with repo.state: 73 repo.cloud.pull(out.get_used_cache()) 74 75 return out 76 77 @staticmethod 78 def _is_git_file(repo, path): 79 if not os.path.isabs(path): 80 try: 81 output = repo.find_out_by_relpath(path) 82 if not output.use_cache: 83 return True 84 except OutputNotFoundError: 85 return True 86 return False 87 88 def _copy_if_git_file(self, to_path): 89 src_path = self.def_path 90 with self._make_repo( 91 cache_dir=self.repo.cache.local.cache_dir 92 ) as repo: 93 if not self._is_git_file(repo, src_path): 94 return False 95 96 src_full_path = os.path.join(repo.root_dir, src_path) 97 dst_full_path = os.path.abspath(to_path) 98 fs_copy(src_full_path, dst_full_path) 99 return True 100 101 def download(self, to): 102 try: 103 if self._copy_if_git_file(to.fspath): 104 return 105 106 out = self.fetch() 107 to.info = copy.copy(out.info) 108 to.checkout() 109 except (FileNotFoundError): 110 raise PathMissingError( 111 self.def_path, self.def_repo[self.PARAM_URL] 112 ) 113 114 def update(self): 115 with self._make_repo(rev_lock=None) as repo: 116 self.def_repo[self.PARAM_REV_LOCK] = repo.scm.get_rev() 117 [end of dvc/dependency/repo.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/dependency/repo.py b/dvc/dependency/repo.py --- a/dvc/dependency/repo.py +++ b/dvc/dependency/repo.py @@ -6,7 +6,9 @@ from funcy import merge from .local import DependencyLOCAL +from dvc.external_repo import cached_clone from dvc.external_repo import external_repo +from dvc.exceptions import NotDvcRepoError from dvc.exceptions import OutputNotFoundError from dvc.exceptions import PathMissingError from dvc.utils.fs import fs_copy @@ -75,27 +77,35 @@ return out @staticmethod - def _is_git_file(repo, path): - if not os.path.isabs(path): - try: - output = repo.find_out_by_relpath(path) - if not output.use_cache: - return True - except OutputNotFoundError: - return True - return False + def _is_git_file(repo_dir, path): + from dvc.repo import Repo + + if os.path.isabs(path): + return False + + try: + repo = Repo(repo_dir) + except NotDvcRepoError: + return True + + try: + output = repo.find_out_by_relpath(path) + return not output.use_cache + except OutputNotFoundError: + return True + finally: + repo.close() def _copy_if_git_file(self, to_path): src_path = self.def_path - with self._make_repo( - cache_dir=self.repo.cache.local.cache_dir - ) as repo: - if not self._is_git_file(repo, src_path): - return False + repo_dir = cached_clone(**self.def_repo) + + if not self._is_git_file(repo_dir, src_path): + return False - src_full_path = os.path.join(repo.root_dir, src_path) - dst_full_path = os.path.abspath(to_path) - fs_copy(src_full_path, dst_full_path) + src_full_path = os.path.join(repo_dir, src_path) + dst_full_path = os.path.abspath(to_path) + fs_copy(src_full_path, dst_full_path) return True def download(self, to): diff --git a/dvc/external_repo.py b/dvc/external_repo.py --- a/dvc/external_repo.py +++ b/dvc/external_repo.py @@ -33,18 +33,20 @@ repo.close() -def _external_repo(url=None, rev=None, cache_dir=None): - from dvc.config import Config - from dvc.cache import CacheConfig - from dvc.repo import Repo +def cached_clone(url, rev=None, **_ignored_kwargs): + """Clone an external git repo to a temporary directory. - key = (url, rev, cache_dir) - if key in REPO_CACHE: - return REPO_CACHE[key] + Returns the path to a local temporary directory with the specified + revision checked out. + + Uses the REPO_CACHE to avoid accessing the remote server again if + cloning from the same URL twice in the same session. + + """ new_path = tempfile.mkdtemp("dvc-erepo") - # Copy and adjust existing clone + # Copy and adjust existing clean clone if (url, None, None) in REPO_CACHE: old_path = REPO_CACHE[url, None, None] @@ -59,13 +61,24 @@ copy_tree(new_path, clean_clone_path) REPO_CACHE[url, None, None] = clean_clone_path - # Adjust new clone/copy to fit rev and cache_dir - - # Checkout needs to be done first because current branch might not be - # DVC repository + # Check out the specified revision if rev is not None: _git_checkout(new_path, rev) + return new_path + + +def _external_repo(url=None, rev=None, cache_dir=None): + from dvc.config import Config + from dvc.cache import CacheConfig + from dvc.repo import Repo + + key = (url, rev, cache_dir) + if key in REPO_CACHE: + return REPO_CACHE[key] + + new_path = cached_clone(url, rev=rev) + repo = Repo(new_path) try: # check if the URL is local and no default remote is present
{"golden_diff": "diff --git a/dvc/dependency/repo.py b/dvc/dependency/repo.py\n--- a/dvc/dependency/repo.py\n+++ b/dvc/dependency/repo.py\n@@ -6,7 +6,9 @@\n from funcy import merge\n \n from .local import DependencyLOCAL\n+from dvc.external_repo import cached_clone\n from dvc.external_repo import external_repo\n+from dvc.exceptions import NotDvcRepoError\n from dvc.exceptions import OutputNotFoundError\n from dvc.exceptions import PathMissingError\n from dvc.utils.fs import fs_copy\n@@ -75,27 +77,35 @@\n return out\n \n @staticmethod\n- def _is_git_file(repo, path):\n- if not os.path.isabs(path):\n- try:\n- output = repo.find_out_by_relpath(path)\n- if not output.use_cache:\n- return True\n- except OutputNotFoundError:\n- return True\n- return False\n+ def _is_git_file(repo_dir, path):\n+ from dvc.repo import Repo\n+\n+ if os.path.isabs(path):\n+ return False\n+\n+ try:\n+ repo = Repo(repo_dir)\n+ except NotDvcRepoError:\n+ return True\n+\n+ try:\n+ output = repo.find_out_by_relpath(path)\n+ return not output.use_cache\n+ except OutputNotFoundError:\n+ return True\n+ finally:\n+ repo.close()\n \n def _copy_if_git_file(self, to_path):\n src_path = self.def_path\n- with self._make_repo(\n- cache_dir=self.repo.cache.local.cache_dir\n- ) as repo:\n- if not self._is_git_file(repo, src_path):\n- return False\n+ repo_dir = cached_clone(**self.def_repo)\n+\n+ if not self._is_git_file(repo_dir, src_path):\n+ return False\n \n- src_full_path = os.path.join(repo.root_dir, src_path)\n- dst_full_path = os.path.abspath(to_path)\n- fs_copy(src_full_path, dst_full_path)\n+ src_full_path = os.path.join(repo_dir, src_path)\n+ dst_full_path = os.path.abspath(to_path)\n+ fs_copy(src_full_path, dst_full_path)\n return True\n \n def download(self, to):\ndiff --git a/dvc/external_repo.py b/dvc/external_repo.py\n--- a/dvc/external_repo.py\n+++ b/dvc/external_repo.py\n@@ -33,18 +33,20 @@\n repo.close()\n \n \n-def _external_repo(url=None, rev=None, cache_dir=None):\n- from dvc.config import Config\n- from dvc.cache import CacheConfig\n- from dvc.repo import Repo\n+def cached_clone(url, rev=None, **_ignored_kwargs):\n+ \"\"\"Clone an external git repo to a temporary directory.\n \n- key = (url, rev, cache_dir)\n- if key in REPO_CACHE:\n- return REPO_CACHE[key]\n+ Returns the path to a local temporary directory with the specified\n+ revision checked out.\n+\n+ Uses the REPO_CACHE to avoid accessing the remote server again if\n+ cloning from the same URL twice in the same session.\n+\n+ \"\"\"\n \n new_path = tempfile.mkdtemp(\"dvc-erepo\")\n \n- # Copy and adjust existing clone\n+ # Copy and adjust existing clean clone\n if (url, None, None) in REPO_CACHE:\n old_path = REPO_CACHE[url, None, None]\n \n@@ -59,13 +61,24 @@\n copy_tree(new_path, clean_clone_path)\n REPO_CACHE[url, None, None] = clean_clone_path\n \n- # Adjust new clone/copy to fit rev and cache_dir\n-\n- # Checkout needs to be done first because current branch might not be\n- # DVC repository\n+ # Check out the specified revision\n if rev is not None:\n _git_checkout(new_path, rev)\n \n+ return new_path\n+\n+\n+def _external_repo(url=None, rev=None, cache_dir=None):\n+ from dvc.config import Config\n+ from dvc.cache import CacheConfig\n+ from dvc.repo import Repo\n+\n+ key = (url, rev, cache_dir)\n+ if key in REPO_CACHE:\n+ return REPO_CACHE[key]\n+\n+ new_path = cached_clone(url, rev=rev)\n+\n repo = Repo(new_path)\n try:\n # check if the URL is local and no default remote is present\n", "issue": "import: Handle non-DVC Git repositories \nAfter https://github.com/iterative/dvc/pull/2889, `dvc import` can also import files that are tracked by Git but not DVC. DVC still requires that they come from a DVC repository rather than any Git repository, although there is no longer need for that.\n", "before_files": [{"content": "import os\nimport tempfile\nfrom contextlib import contextmanager\nfrom distutils.dir_util import copy_tree\n\nfrom funcy import retry\n\nfrom dvc.config import NoRemoteError, ConfigError\nfrom dvc.exceptions import NoRemoteInExternalRepoError\nfrom dvc.remote import RemoteConfig\nfrom dvc.exceptions import NoOutputInExternalRepoError\nfrom dvc.exceptions import OutputNotFoundError\nfrom dvc.utils.fs import remove\n\n\nREPO_CACHE = {}\n\n\n@contextmanager\ndef external_repo(url=None, rev=None, rev_lock=None, cache_dir=None):\n from dvc.repo import Repo\n\n path = _external_repo(url=url, rev=rev_lock or rev, cache_dir=cache_dir)\n repo = Repo(path)\n try:\n yield repo\n except NoRemoteError:\n raise NoRemoteInExternalRepoError(url)\n except OutputNotFoundError as exc:\n if exc.repo is repo:\n raise NoOutputInExternalRepoError(exc.output, repo.root_dir, url)\n raise\n repo.close()\n\n\ndef _external_repo(url=None, rev=None, cache_dir=None):\n from dvc.config import Config\n from dvc.cache import CacheConfig\n from dvc.repo import Repo\n\n key = (url, rev, cache_dir)\n if key in REPO_CACHE:\n return REPO_CACHE[key]\n\n new_path = tempfile.mkdtemp(\"dvc-erepo\")\n\n # Copy and adjust existing clone\n if (url, None, None) in REPO_CACHE:\n old_path = REPO_CACHE[url, None, None]\n\n # This one unlike shutil.copytree() works with an existing dir\n copy_tree(old_path, new_path)\n else:\n # Create a new clone\n _clone_repo(url, new_path)\n\n # Save clean clone dir so that we will have access to a default branch\n clean_clone_path = tempfile.mkdtemp(\"dvc-erepo\")\n copy_tree(new_path, clean_clone_path)\n REPO_CACHE[url, None, None] = clean_clone_path\n\n # Adjust new clone/copy to fit rev and cache_dir\n\n # Checkout needs to be done first because current branch might not be\n # DVC repository\n if rev is not None:\n _git_checkout(new_path, rev)\n\n repo = Repo(new_path)\n try:\n # check if the URL is local and no default remote is present\n # add default remote pointing to the original repo's cache location\n if os.path.isdir(url):\n rconfig = RemoteConfig(repo.config)\n if not _default_remote_set(rconfig):\n original_repo = Repo(url)\n try:\n rconfig.add(\n \"auto-generated-upstream\",\n original_repo.cache.local.cache_dir,\n default=True,\n level=Config.LEVEL_LOCAL,\n )\n finally:\n original_repo.close()\n\n if cache_dir is not None:\n cache_config = CacheConfig(repo.config)\n cache_config.set_dir(cache_dir, level=Config.LEVEL_LOCAL)\n finally:\n # Need to close/reopen repo to force config reread\n repo.close()\n\n REPO_CACHE[key] = new_path\n return new_path\n\n\ndef _git_checkout(repo_path, revision):\n from dvc.scm import Git\n\n git = Git(repo_path)\n try:\n git.checkout(revision)\n finally:\n git.close()\n\n\ndef clean_repos():\n # Outside code should not see cache while we are removing\n repo_paths = list(REPO_CACHE.values())\n REPO_CACHE.clear()\n\n for path in repo_paths:\n _remove(path)\n\n\ndef _remove(path):\n if os.name == \"nt\":\n # git.exe may hang for a while not permitting to remove temp dir\n os_retry = retry(5, errors=OSError, timeout=0.1)\n os_retry(remove)(path)\n else:\n remove(path)\n\n\ndef _clone_repo(url, path):\n from dvc.scm.git import Git\n\n git = Git.clone(url, path)\n git.close()\n\n\ndef _default_remote_set(rconfig):\n \"\"\"\n Checks if default remote config is present.\n Args:\n rconfig: a remote config\n\n Returns:\n True if the default remote config is set, else False\n \"\"\"\n try:\n rconfig.get_default()\n return True\n except ConfigError:\n return False\n", "path": "dvc/external_repo.py"}, {"content": "import copy\nimport os\nfrom contextlib import contextmanager\nfrom dvc.utils.compat import FileNotFoundError\n\nfrom funcy import merge\n\nfrom .local import DependencyLOCAL\nfrom dvc.external_repo import external_repo\nfrom dvc.exceptions import OutputNotFoundError\nfrom dvc.exceptions import PathMissingError\nfrom dvc.utils.fs import fs_copy\n\n\nclass DependencyREPO(DependencyLOCAL):\n PARAM_REPO = \"repo\"\n PARAM_URL = \"url\"\n PARAM_REV = \"rev\"\n PARAM_REV_LOCK = \"rev_lock\"\n\n REPO_SCHEMA = {PARAM_URL: str, PARAM_REV: str, PARAM_REV_LOCK: str}\n\n def __init__(self, def_repo, stage, *args, **kwargs):\n self.def_repo = def_repo\n super(DependencyREPO, self).__init__(stage, *args, **kwargs)\n\n def _parse_path(self, remote, path):\n return None\n\n @property\n def is_in_repo(self):\n return False\n\n @property\n def repo_pair(self):\n d = self.def_repo\n return d[self.PARAM_URL], d[self.PARAM_REV_LOCK] or d[self.PARAM_REV]\n\n def __str__(self):\n return \"{} ({})\".format(self.def_path, self.def_repo[self.PARAM_URL])\n\n @contextmanager\n def _make_repo(self, **overrides):\n with external_repo(**merge(self.def_repo, overrides)) as repo:\n yield repo\n\n def status(self):\n with self._make_repo() as repo:\n current = repo.find_out_by_relpath(self.def_path).info\n\n with self._make_repo(rev_lock=None) as repo:\n updated = repo.find_out_by_relpath(self.def_path).info\n\n if current != updated:\n return {str(self): \"update available\"}\n\n return {}\n\n def save(self):\n pass\n\n def dumpd(self):\n return {self.PARAM_PATH: self.def_path, self.PARAM_REPO: self.def_repo}\n\n def fetch(self):\n with self._make_repo(\n cache_dir=self.repo.cache.local.cache_dir\n ) as repo:\n self.def_repo[self.PARAM_REV_LOCK] = repo.scm.get_rev()\n\n out = repo.find_out_by_relpath(self.def_path)\n with repo.state:\n repo.cloud.pull(out.get_used_cache())\n\n return out\n\n @staticmethod\n def _is_git_file(repo, path):\n if not os.path.isabs(path):\n try:\n output = repo.find_out_by_relpath(path)\n if not output.use_cache:\n return True\n except OutputNotFoundError:\n return True\n return False\n\n def _copy_if_git_file(self, to_path):\n src_path = self.def_path\n with self._make_repo(\n cache_dir=self.repo.cache.local.cache_dir\n ) as repo:\n if not self._is_git_file(repo, src_path):\n return False\n\n src_full_path = os.path.join(repo.root_dir, src_path)\n dst_full_path = os.path.abspath(to_path)\n fs_copy(src_full_path, dst_full_path)\n return True\n\n def download(self, to):\n try:\n if self._copy_if_git_file(to.fspath):\n return\n\n out = self.fetch()\n to.info = copy.copy(out.info)\n to.checkout()\n except (FileNotFoundError):\n raise PathMissingError(\n self.def_path, self.def_repo[self.PARAM_URL]\n )\n\n def update(self):\n with self._make_repo(rev_lock=None) as repo:\n self.def_repo[self.PARAM_REV_LOCK] = repo.scm.get_rev()\n", "path": "dvc/dependency/repo.py"}]}
2,932
1,001
gh_patches_debug_607
rasdani/github-patches
git_diff
pex-tool__pex-1446
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.49 On the docket: + [ ] Avoid re-using old ~/.pex/code/ caches. #1444 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.48" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.48" +__version__ = "2.1.49"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.48\"\n+__version__ = \"2.1.49\"\n", "issue": "Release 2.1.49\nOn the docket:\r\n+ [ ] Avoid re-using old ~/.pex/code/ caches. #1444 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.48\"\n", "path": "pex/version.py"}]}
617
96
gh_patches_debug_20339
rasdani/github-patches
git_diff
docker__docker-py-1581
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> images.build fails when using tag argument. When using the `tag` kwarg on `client.images.build`, it will always throw a Build Error. The api.build output looks like this ``` ['{"stream":"Step 1/4 : FROM scratch\\n"}\r\n', '{"stream":" ---\\u003e \\n"}\r\n', '{"stream":"Step 2/4 : LABEL com.nvidia.volumes.needed \\"nvidia_driver\\"\\n"}\r\n', '{"stream":" ---\\u003e Using cache\\n"}\r\n', '{"stream":" ---\\u003e 36ec3942c5f5\\n"}\r\n', '{"stream":"Step 3/4 : SHELL /usr/local/nvidia/bin/nvidia-smi\\n"}\r\n', '{"stream":" ---\\u003e Running in f875d54529eb\\n"}\r\n', '{"stream":" ---\\u003e b750cf87aed6\\n"}\r\n', '{"stream":"Step 4/4 : CMD -L\\n"}\r\n', '{"stream":" ---\\u003e Running in f3f9e21b8171\\n"}\r\n', '{"stream":" ---\\u003e 61c46a80da73\\n"}\r\n', '{"stream":"Successfully built 61c46a80da73\\n"}\r\n', '{"stream":"Successfully tagged 5b3bc129-d296-4b7d-872c-bc7117d4f327:latest\\n"}\r\n'] ``` The problem can be tracked down to [here](https://github.com/docker/docker-py/blob/2.2.1-release/docker/models/images.py#L164-L167). The code is assuming the "Successfully built" comes last, however in the tag case, this is not true. Two ideas are to either 1. Search the last two lines 2. Or in case that is not enough, search all the lines for "Successfully built" I'm using docker server 17.05.0-ce-rc1 </issue> <code> [start of docker/models/images.py] 1 import re 2 3 import six 4 5 from ..api import APIClient 6 from ..errors import BuildError 7 from ..utils.json_stream import json_stream 8 from .resource import Collection, Model 9 10 11 class Image(Model): 12 """ 13 An image on the server. 14 """ 15 def __repr__(self): 16 return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags)) 17 18 @property 19 def labels(self): 20 """ 21 The labels of an image as dictionary. 22 """ 23 result = self.attrs['Config'].get('Labels') 24 return result or {} 25 26 @property 27 def short_id(self): 28 """ 29 The ID of the image truncated to 10 characters, plus the ``sha256:`` 30 prefix. 31 """ 32 if self.id.startswith('sha256:'): 33 return self.id[:17] 34 return self.id[:10] 35 36 @property 37 def tags(self): 38 """ 39 The image's tags. 40 """ 41 tags = self.attrs.get('RepoTags') 42 if tags is None: 43 tags = [] 44 return [tag for tag in tags if tag != '<none>:<none>'] 45 46 def history(self): 47 """ 48 Show the history of an image. 49 50 Returns: 51 (str): The history of the image. 52 53 Raises: 54 :py:class:`docker.errors.APIError` 55 If the server returns an error. 56 """ 57 return self.client.api.history(self.id) 58 59 def save(self): 60 """ 61 Get a tarball of an image. Similar to the ``docker save`` command. 62 63 Returns: 64 (urllib3.response.HTTPResponse object): The response from the 65 daemon. 66 67 Raises: 68 :py:class:`docker.errors.APIError` 69 If the server returns an error. 70 71 Example: 72 73 >>> image = cli.images.get("fedora:latest") 74 >>> resp = image.save() 75 >>> f = open('/tmp/fedora-latest.tar', 'w') 76 >>> for chunk in resp.stream(): 77 >>> f.write(chunk) 78 >>> f.close() 79 """ 80 return self.client.api.get_image(self.id) 81 82 def tag(self, repository, tag=None, **kwargs): 83 """ 84 Tag this image into a repository. Similar to the ``docker tag`` 85 command. 86 87 Args: 88 repository (str): The repository to set for the tag 89 tag (str): The tag name 90 force (bool): Force 91 92 Raises: 93 :py:class:`docker.errors.APIError` 94 If the server returns an error. 95 96 Returns: 97 (bool): ``True`` if successful 98 """ 99 self.client.api.tag(self.id, repository, tag=tag, **kwargs) 100 101 102 class ImageCollection(Collection): 103 model = Image 104 105 def build(self, **kwargs): 106 """ 107 Build an image and return it. Similar to the ``docker build`` 108 command. Either ``path`` or ``fileobj`` must be set. 109 110 If you have a tar file for the Docker build context (including a 111 Dockerfile) already, pass a readable file-like object to ``fileobj`` 112 and also pass ``custom_context=True``. If the stream is compressed 113 also, set ``encoding`` to the correct value (e.g ``gzip``). 114 115 If you want to get the raw output of the build, use the 116 :py:meth:`~docker.api.build.BuildApiMixin.build` method in the 117 low-level API. 118 119 Args: 120 path (str): Path to the directory containing the Dockerfile 121 fileobj: A file object to use as the Dockerfile. (Or a file-like 122 object) 123 tag (str): A tag to add to the final image 124 quiet (bool): Whether to return the status 125 nocache (bool): Don't use the cache when set to ``True`` 126 rm (bool): Remove intermediate containers. The ``docker build`` 127 command now defaults to ``--rm=true``, but we have kept the old 128 default of `False` to preserve backward compatibility 129 stream (bool): *Deprecated for API version > 1.8 (always True)*. 130 Return a blocking generator you can iterate over to retrieve 131 build output as it happens 132 timeout (int): HTTP timeout 133 custom_context (bool): Optional if using ``fileobj`` 134 encoding (str): The encoding for a stream. Set to ``gzip`` for 135 compressing 136 pull (bool): Downloads any updates to the FROM image in Dockerfiles 137 forcerm (bool): Always remove intermediate containers, even after 138 unsuccessful builds 139 dockerfile (str): path within the build context to the Dockerfile 140 buildargs (dict): A dictionary of build arguments 141 container_limits (dict): A dictionary of limits applied to each 142 container created by the build process. Valid keys: 143 144 - memory (int): set memory limit for build 145 - memswap (int): Total memory (memory + swap), -1 to disable 146 swap 147 - cpushares (int): CPU shares (relative weight) 148 - cpusetcpus (str): CPUs in which to allow execution, e.g., 149 ``"0-3"``, ``"0,1"`` 150 decode (bool): If set to ``True``, the returned stream will be 151 decoded into dicts on the fly. Default ``False``. 152 cache_from (list): A list of images used for build cache 153 resolution. 154 155 Returns: 156 (:py:class:`Image`): The built image. 157 158 Raises: 159 :py:class:`docker.errors.BuildError` 160 If there is an error during the build. 161 :py:class:`docker.errors.APIError` 162 If the server returns any other error. 163 ``TypeError`` 164 If neither ``path`` nor ``fileobj`` is specified. 165 """ 166 resp = self.client.api.build(**kwargs) 167 if isinstance(resp, six.string_types): 168 return self.get(resp) 169 events = list(json_stream(resp)) 170 if not events: 171 return BuildError('Unknown') 172 event = events[-1] 173 if 'stream' in event: 174 match = re.search(r'(Successfully built |sha256:)([0-9a-f]+)', 175 event.get('stream', '')) 176 if match: 177 image_id = match.group(2) 178 return self.get(image_id) 179 180 raise BuildError(event.get('error') or event) 181 182 def get(self, name): 183 """ 184 Gets an image. 185 186 Args: 187 name (str): The name of the image. 188 189 Returns: 190 (:py:class:`Image`): The image. 191 192 Raises: 193 :py:class:`docker.errors.ImageNotFound` If the image does not 194 exist. 195 :py:class:`docker.errors.APIError` 196 If the server returns an error. 197 """ 198 return self.prepare_model(self.client.api.inspect_image(name)) 199 200 def list(self, name=None, all=False, filters=None): 201 """ 202 List images on the server. 203 204 Args: 205 name (str): Only show images belonging to the repository ``name`` 206 all (bool): Show intermediate image layers. By default, these are 207 filtered out. 208 filters (dict): Filters to be processed on the image list. 209 Available filters: 210 - ``dangling`` (bool) 211 - ``label`` (str): format either ``key`` or ``key=value`` 212 213 Returns: 214 (list of :py:class:`Image`): The images. 215 216 Raises: 217 :py:class:`docker.errors.APIError` 218 If the server returns an error. 219 """ 220 resp = self.client.api.images(name=name, all=all, filters=filters) 221 return [self.prepare_model(r) for r in resp] 222 223 def load(self, data): 224 """ 225 Load an image that was previously saved using 226 :py:meth:`~docker.models.images.Image.save` (or ``docker save``). 227 Similar to ``docker load``. 228 229 Args: 230 data (binary): Image data to be loaded. 231 232 Raises: 233 :py:class:`docker.errors.APIError` 234 If the server returns an error. 235 """ 236 return self.client.api.load_image(data) 237 238 def pull(self, name, **kwargs): 239 """ 240 Pull an image of the given name and return it. Similar to the 241 ``docker pull`` command. 242 243 If you want to get the raw pull output, use the 244 :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the 245 low-level API. 246 247 Args: 248 repository (str): The repository to pull 249 tag (str): The tag to pull 250 insecure_registry (bool): Use an insecure registry 251 auth_config (dict): Override the credentials that 252 :py:meth:`~docker.client.DockerClient.login` has set for 253 this request. ``auth_config`` should contain the ``username`` 254 and ``password`` keys to be valid. 255 256 Returns: 257 (:py:class:`Image`): The image that has been pulled. 258 259 Raises: 260 :py:class:`docker.errors.APIError` 261 If the server returns an error. 262 263 Example: 264 265 >>> image = client.images.pull('busybox') 266 """ 267 self.client.api.pull(name, **kwargs) 268 return self.get(name) 269 270 def push(self, repository, tag=None, **kwargs): 271 return self.client.api.push(repository, tag=tag, **kwargs) 272 push.__doc__ = APIClient.push.__doc__ 273 274 def remove(self, *args, **kwargs): 275 self.client.api.remove_image(*args, **kwargs) 276 remove.__doc__ = APIClient.remove_image.__doc__ 277 278 def search(self, *args, **kwargs): 279 return self.client.api.search(*args, **kwargs) 280 search.__doc__ = APIClient.search.__doc__ 281 282 def prune(self, filters=None): 283 return self.client.api.prune_images(filters=filters) 284 prune.__doc__ = APIClient.prune_images.__doc__ 285 [end of docker/models/images.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docker/models/images.py b/docker/models/images.py --- a/docker/models/images.py +++ b/docker/models/images.py @@ -166,18 +166,18 @@ resp = self.client.api.build(**kwargs) if isinstance(resp, six.string_types): return self.get(resp) - events = list(json_stream(resp)) - if not events: - return BuildError('Unknown') - event = events[-1] - if 'stream' in event: - match = re.search(r'(Successfully built |sha256:)([0-9a-f]+)', - event.get('stream', '')) - if match: - image_id = match.group(2) - return self.get(image_id) - - raise BuildError(event.get('error') or event) + for chunk in json_stream(resp): + if 'error' in chunk: + raise BuildError(chunk['error']) + break + if 'stream' in chunk: + match = re.search(r'(Successfully built |sha256:)([0-9a-f]+)', + chunk['stream']) + if match: + image_id = match.group(2) + return self.get(image_id) + + return BuildError('Unknown') def get(self, name): """
{"golden_diff": "diff --git a/docker/models/images.py b/docker/models/images.py\n--- a/docker/models/images.py\n+++ b/docker/models/images.py\n@@ -166,18 +166,18 @@\n resp = self.client.api.build(**kwargs)\n if isinstance(resp, six.string_types):\n return self.get(resp)\n- events = list(json_stream(resp))\n- if not events:\n- return BuildError('Unknown')\n- event = events[-1]\n- if 'stream' in event:\n- match = re.search(r'(Successfully built |sha256:)([0-9a-f]+)',\n- event.get('stream', ''))\n- if match:\n- image_id = match.group(2)\n- return self.get(image_id)\n-\n- raise BuildError(event.get('error') or event)\n+ for chunk in json_stream(resp):\n+ if 'error' in chunk:\n+ raise BuildError(chunk['error'])\n+ break\n+ if 'stream' in chunk:\n+ match = re.search(r'(Successfully built |sha256:)([0-9a-f]+)',\n+ chunk['stream'])\n+ if match:\n+ image_id = match.group(2)\n+ return self.get(image_id)\n+\n+ return BuildError('Unknown')\n \n def get(self, name):\n \"\"\"\n", "issue": "images.build fails when using tag argument.\nWhen using the `tag` kwarg on `client.images.build`, it will always throw a Build Error.\r\n\r\nThe api.build output looks like this\r\n\r\n```\r\n['{\"stream\":\"Step 1/4 : FROM scratch\\\\n\"}\\r\\n',\r\n '{\"stream\":\" ---\\\\u003e \\\\n\"}\\r\\n',\r\n '{\"stream\":\"Step 2/4 : LABEL com.nvidia.volumes.needed \\\\\"nvidia_driver\\\\\"\\\\n\"}\\r\\n',\r\n '{\"stream\":\" ---\\\\u003e Using cache\\\\n\"}\\r\\n',\r\n '{\"stream\":\" ---\\\\u003e 36ec3942c5f5\\\\n\"}\\r\\n',\r\n '{\"stream\":\"Step 3/4 : SHELL /usr/local/nvidia/bin/nvidia-smi\\\\n\"}\\r\\n',\r\n '{\"stream\":\" ---\\\\u003e Running in f875d54529eb\\\\n\"}\\r\\n',\r\n '{\"stream\":\" ---\\\\u003e b750cf87aed6\\\\n\"}\\r\\n',\r\n '{\"stream\":\"Step 4/4 : CMD -L\\\\n\"}\\r\\n',\r\n '{\"stream\":\" ---\\\\u003e Running in f3f9e21b8171\\\\n\"}\\r\\n',\r\n '{\"stream\":\" ---\\\\u003e 61c46a80da73\\\\n\"}\\r\\n',\r\n '{\"stream\":\"Successfully built 61c46a80da73\\\\n\"}\\r\\n',\r\n '{\"stream\":\"Successfully tagged 5b3bc129-d296-4b7d-872c-bc7117d4f327:latest\\\\n\"}\\r\\n']\r\n```\r\n\r\nThe problem can be tracked down to [here](https://github.com/docker/docker-py/blob/2.2.1-release/docker/models/images.py#L164-L167). The code is assuming the \"Successfully built\" comes last, however in the tag case, this is not true.\r\n\r\nTwo ideas are to either \r\n\r\n1. Search the last two lines\r\n2. Or in case that is not enough, search all the lines for \"Successfully built\"\r\n\r\n\r\nI'm using docker server 17.05.0-ce-rc1\r\n\n", "before_files": [{"content": "import re\n\nimport six\n\nfrom ..api import APIClient\nfrom ..errors import BuildError\nfrom ..utils.json_stream import json_stream\nfrom .resource import Collection, Model\n\n\nclass Image(Model):\n \"\"\"\n An image on the server.\n \"\"\"\n def __repr__(self):\n return \"<%s: '%s'>\" % (self.__class__.__name__, \"', '\".join(self.tags))\n\n @property\n def labels(self):\n \"\"\"\n The labels of an image as dictionary.\n \"\"\"\n result = self.attrs['Config'].get('Labels')\n return result or {}\n\n @property\n def short_id(self):\n \"\"\"\n The ID of the image truncated to 10 characters, plus the ``sha256:``\n prefix.\n \"\"\"\n if self.id.startswith('sha256:'):\n return self.id[:17]\n return self.id[:10]\n\n @property\n def tags(self):\n \"\"\"\n The image's tags.\n \"\"\"\n tags = self.attrs.get('RepoTags')\n if tags is None:\n tags = []\n return [tag for tag in tags if tag != '<none>:<none>']\n\n def history(self):\n \"\"\"\n Show the history of an image.\n\n Returns:\n (str): The history of the image.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.history(self.id)\n\n def save(self):\n \"\"\"\n Get a tarball of an image. Similar to the ``docker save`` command.\n\n Returns:\n (urllib3.response.HTTPResponse object): The response from the\n daemon.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> image = cli.images.get(\"fedora:latest\")\n >>> resp = image.save()\n >>> f = open('/tmp/fedora-latest.tar', 'w')\n >>> for chunk in resp.stream():\n >>> f.write(chunk)\n >>> f.close()\n \"\"\"\n return self.client.api.get_image(self.id)\n\n def tag(self, repository, tag=None, **kwargs):\n \"\"\"\n Tag this image into a repository. Similar to the ``docker tag``\n command.\n\n Args:\n repository (str): The repository to set for the tag\n tag (str): The tag name\n force (bool): Force\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Returns:\n (bool): ``True`` if successful\n \"\"\"\n self.client.api.tag(self.id, repository, tag=tag, **kwargs)\n\n\nclass ImageCollection(Collection):\n model = Image\n\n def build(self, **kwargs):\n \"\"\"\n Build an image and return it. Similar to the ``docker build``\n command. Either ``path`` or ``fileobj`` must be set.\n\n If you have a tar file for the Docker build context (including a\n Dockerfile) already, pass a readable file-like object to ``fileobj``\n and also pass ``custom_context=True``. If the stream is compressed\n also, set ``encoding`` to the correct value (e.g ``gzip``).\n\n If you want to get the raw output of the build, use the\n :py:meth:`~docker.api.build.BuildApiMixin.build` method in the\n low-level API.\n\n Args:\n path (str): Path to the directory containing the Dockerfile\n fileobj: A file object to use as the Dockerfile. (Or a file-like\n object)\n tag (str): A tag to add to the final image\n quiet (bool): Whether to return the status\n nocache (bool): Don't use the cache when set to ``True``\n rm (bool): Remove intermediate containers. The ``docker build``\n command now defaults to ``--rm=true``, but we have kept the old\n default of `False` to preserve backward compatibility\n stream (bool): *Deprecated for API version > 1.8 (always True)*.\n Return a blocking generator you can iterate over to retrieve\n build output as it happens\n timeout (int): HTTP timeout\n custom_context (bool): Optional if using ``fileobj``\n encoding (str): The encoding for a stream. Set to ``gzip`` for\n compressing\n pull (bool): Downloads any updates to the FROM image in Dockerfiles\n forcerm (bool): Always remove intermediate containers, even after\n unsuccessful builds\n dockerfile (str): path within the build context to the Dockerfile\n buildargs (dict): A dictionary of build arguments\n container_limits (dict): A dictionary of limits applied to each\n container created by the build process. Valid keys:\n\n - memory (int): set memory limit for build\n - memswap (int): Total memory (memory + swap), -1 to disable\n swap\n - cpushares (int): CPU shares (relative weight)\n - cpusetcpus (str): CPUs in which to allow execution, e.g.,\n ``\"0-3\"``, ``\"0,1\"``\n decode (bool): If set to ``True``, the returned stream will be\n decoded into dicts on the fly. Default ``False``.\n cache_from (list): A list of images used for build cache\n resolution.\n\n Returns:\n (:py:class:`Image`): The built image.\n\n Raises:\n :py:class:`docker.errors.BuildError`\n If there is an error during the build.\n :py:class:`docker.errors.APIError`\n If the server returns any other error.\n ``TypeError``\n If neither ``path`` nor ``fileobj`` is specified.\n \"\"\"\n resp = self.client.api.build(**kwargs)\n if isinstance(resp, six.string_types):\n return self.get(resp)\n events = list(json_stream(resp))\n if not events:\n return BuildError('Unknown')\n event = events[-1]\n if 'stream' in event:\n match = re.search(r'(Successfully built |sha256:)([0-9a-f]+)',\n event.get('stream', ''))\n if match:\n image_id = match.group(2)\n return self.get(image_id)\n\n raise BuildError(event.get('error') or event)\n\n def get(self, name):\n \"\"\"\n Gets an image.\n\n Args:\n name (str): The name of the image.\n\n Returns:\n (:py:class:`Image`): The image.\n\n Raises:\n :py:class:`docker.errors.ImageNotFound` If the image does not\n exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.prepare_model(self.client.api.inspect_image(name))\n\n def list(self, name=None, all=False, filters=None):\n \"\"\"\n List images on the server.\n\n Args:\n name (str): Only show images belonging to the repository ``name``\n all (bool): Show intermediate image layers. By default, these are\n filtered out.\n filters (dict): Filters to be processed on the image list.\n Available filters:\n - ``dangling`` (bool)\n - ``label`` (str): format either ``key`` or ``key=value``\n\n Returns:\n (list of :py:class:`Image`): The images.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.images(name=name, all=all, filters=filters)\n return [self.prepare_model(r) for r in resp]\n\n def load(self, data):\n \"\"\"\n Load an image that was previously saved using\n :py:meth:`~docker.models.images.Image.save` (or ``docker save``).\n Similar to ``docker load``.\n\n Args:\n data (binary): Image data to be loaded.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.load_image(data)\n\n def pull(self, name, **kwargs):\n \"\"\"\n Pull an image of the given name and return it. Similar to the\n ``docker pull`` command.\n\n If you want to get the raw pull output, use the\n :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the\n low-level API.\n\n Args:\n repository (str): The repository to pull\n tag (str): The tag to pull\n insecure_registry (bool): Use an insecure registry\n auth_config (dict): Override the credentials that\n :py:meth:`~docker.client.DockerClient.login` has set for\n this request. ``auth_config`` should contain the ``username``\n and ``password`` keys to be valid.\n\n Returns:\n (:py:class:`Image`): The image that has been pulled.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> image = client.images.pull('busybox')\n \"\"\"\n self.client.api.pull(name, **kwargs)\n return self.get(name)\n\n def push(self, repository, tag=None, **kwargs):\n return self.client.api.push(repository, tag=tag, **kwargs)\n push.__doc__ = APIClient.push.__doc__\n\n def remove(self, *args, **kwargs):\n self.client.api.remove_image(*args, **kwargs)\n remove.__doc__ = APIClient.remove_image.__doc__\n\n def search(self, *args, **kwargs):\n return self.client.api.search(*args, **kwargs)\n search.__doc__ = APIClient.search.__doc__\n\n def prune(self, filters=None):\n return self.client.api.prune_images(filters=filters)\n prune.__doc__ = APIClient.prune_images.__doc__\n", "path": "docker/models/images.py"}]}
3,919
293
gh_patches_debug_6873
rasdani/github-patches
git_diff
DDMAL__CantusDB-454
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> required fields On OldCantus, to create a source you need both a manuscript ID and a siglum (fields marked with asterisk) otherwise it won't create the source. NewCantus has no asterisks on these fields, and was quite happy to let me make sources with no siglum (though it does tell me to fill out an ID field if I try to submit without it.) On the chant level, Folio and Sequence seem to be required fields (they are not on OldCantus!) but are not marked as such with asterisks, either. </issue> <code> [start of django/cantusdb_project/main_app/models/source.py] 1 from django.db import models 2 from main_app.models import BaseModel, Segment 3 from django.contrib.auth import get_user_model 4 5 6 class Source(BaseModel): 7 cursus_choices = [("Monastic", "Monastic"), ("Secular", "Secular")] 8 source_status_choices = [ 9 ( 10 "Editing process (not all the fields have been proofread)", 11 "Editing process (not all the fields have been proofread)", 12 ), 13 ("Published / Complete", "Published / Complete"), 14 ("Published / Proofread pending", "Published / Proofread pending"), 15 ("Unpublished / Editing process", "Unpublished / Editing process"), 16 ("Unpublished / Indexing process", "Unpublished / Indexing process"), 17 ("Unpublished / Proofread pending", "Unpublished / Proofread pending"), 18 ("Unpublished / Proofreading process", "Unpublished / Proofreading process"), 19 ("Unpublished / No indexing activity", "Unpublished / No indexing activity"), 20 ] 21 22 # The old Cantus uses two fields to jointly control the access to sources. 23 # Here in the new Cantus, we only use one field, and there are two levels: published and unpublished. 24 # Published sources are available to the public. 25 # Unpublished sources are hidden from the list and cannot be accessed by URL until the user logs in. 26 published = models.BooleanField(blank=False, null=False, default=False) 27 28 title = models.CharField( 29 max_length=255, 30 help_text="Full Manuscript Identification (City, Archive, Shelf-mark)", 31 ) 32 # the siglum field as implemented on the old Cantus is composed of both the RISM siglum and the shelfmark 33 # it is a human-readable ID for a source 34 siglum = models.CharField( 35 max_length=63, 36 null=True, 37 blank=True, 38 help_text="RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).", 39 ) 40 # the RISM siglum uniquely identifies a library or holding institution 41 rism_siglum = models.ForeignKey( 42 "RismSiglum", on_delete=models.PROTECT, null=True, blank=True, 43 ) 44 provenance = models.ForeignKey( 45 "Provenance", 46 on_delete=models.PROTECT, 47 help_text="If the origin is unknown, select a location where the source was " 48 "used later in its lifetime and provide details in the " 49 '"Provenance notes" field.', 50 null=True, 51 blank=True, 52 related_name="sources", 53 ) 54 provenance_notes = models.TextField( 55 blank=True, 56 null=True, 57 help_text="More exact indication of the provenance (if necessary)", 58 ) 59 full_source = models.BooleanField(blank=True, null=True) 60 date = models.CharField( 61 blank=True, 62 null=True, 63 max_length=63, 64 help_text='Date of the manuscript (e.g. "1200s", "1300-1350", etc.)', 65 ) 66 century = models.ManyToManyField("Century", related_name="sources", blank=True) 67 notation = models.ManyToManyField("Notation", related_name="sources", blank=True) 68 cursus = models.CharField( 69 blank=True, null=True, choices=cursus_choices, max_length=63 70 ) 71 current_editors = models.ManyToManyField(get_user_model(), related_name="sources_user_can_edit", blank=True) 72 73 inventoried_by = models.ManyToManyField( 74 get_user_model(), related_name="inventoried_sources", blank=True 75 ) 76 full_text_entered_by = models.ManyToManyField( 77 get_user_model(), related_name="entered_full_text_for_sources", blank=True 78 ) 79 melodies_entered_by = models.ManyToManyField( 80 get_user_model(), related_name="entered_melody_for_sources", blank=True 81 ) 82 proofreaders = models.ManyToManyField(get_user_model(), related_name="proofread_sources", blank=True) 83 other_editors = models.ManyToManyField(get_user_model(), related_name="edited_sources", blank=True) 84 85 86 segment = models.ForeignKey( 87 "Segment", on_delete=models.PROTECT, blank=True, null=True 88 ) 89 source_status = models.CharField(blank=True, null=True, choices=source_status_choices, max_length=255) 90 complete_inventory = models.BooleanField(blank=True, null=True) 91 summary = models.TextField(blank=True, null=True) 92 liturgical_occasions = models.TextField(blank=True, null=True) 93 description = models.TextField(blank=True, null=True) 94 selected_bibliography = models.TextField(blank=True, null=True) 95 image_link = models.URLField( 96 blank=True, 97 null=True, 98 help_text='HTTP link to the image gallery of the source.', 99 ) 100 indexing_notes = models.TextField(blank=True, null=True) 101 indexing_date = models.TextField(blank=True, null=True) 102 json_info = models.JSONField(blank=True, null=True) 103 fragmentarium_id = models.CharField(max_length=15, blank=True, null=True) 104 dact_id = models.CharField(max_length=15, blank=True, null=True) 105 106 # number_of_chants and number_of_melodies are used for rendering the source-list page (perhaps among other places) 107 # they are automatically recalculated in main_app.signals.update_source_chant_count and 108 # main_app.signals.update_source_melody_count every time a chant or sequence is saved or deleted 109 number_of_chants = models.IntegerField(blank=True, null=True) 110 number_of_melodies = models.IntegerField(blank=True, null=True) 111 112 def __str__(self): 113 string = '[{s}] {t} ({i})'.format(s=self.rism_siglum, t=self.title, i=self.id) 114 return string 115 116 def save(self, *args, **kwargs): 117 # when creating a source, assign it to "CANTUS Database" segment by default 118 if not self.segment: 119 cantus_db_segment = Segment.objects.get(name="CANTUS Database") 120 self.segment = cantus_db_segment 121 super().save(*args, **kwargs) 122 [end of django/cantusdb_project/main_app/models/source.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django/cantusdb_project/main_app/models/source.py b/django/cantusdb_project/main_app/models/source.py --- a/django/cantusdb_project/main_app/models/source.py +++ b/django/cantusdb_project/main_app/models/source.py @@ -33,8 +33,8 @@ # it is a human-readable ID for a source siglum = models.CharField( max_length=63, - null=True, - blank=True, + null=False, + blank=False, help_text="RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).", ) # the RISM siglum uniquely identifies a library or holding institution
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/models/source.py b/django/cantusdb_project/main_app/models/source.py\n--- a/django/cantusdb_project/main_app/models/source.py\n+++ b/django/cantusdb_project/main_app/models/source.py\n@@ -33,8 +33,8 @@\n # it is a human-readable ID for a source\n siglum = models.CharField(\n max_length=63, \n- null=True, \n- blank=True,\n+ null=False, \n+ blank=False,\n help_text=\"RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).\",\n )\n # the RISM siglum uniquely identifies a library or holding institution\n", "issue": "required fields \nOn OldCantus, to create a source you need both a manuscript ID and a siglum (fields marked with asterisk) otherwise it won't create the source. \r\nNewCantus has no asterisks on these fields, and was quite happy to let me make sources with no siglum (though it does tell me to fill out an ID field if I try to submit without it.)\r\n\r\nOn the chant level, Folio and Sequence seem to be required fields (they are not on OldCantus!) but are not marked as such with asterisks, either. \n", "before_files": [{"content": "from django.db import models\nfrom main_app.models import BaseModel, Segment\nfrom django.contrib.auth import get_user_model\n\n\nclass Source(BaseModel):\n cursus_choices = [(\"Monastic\", \"Monastic\"), (\"Secular\", \"Secular\")]\n source_status_choices = [\n (\n \"Editing process (not all the fields have been proofread)\",\n \"Editing process (not all the fields have been proofread)\",\n ),\n (\"Published / Complete\", \"Published / Complete\"),\n (\"Published / Proofread pending\", \"Published / Proofread pending\"),\n (\"Unpublished / Editing process\", \"Unpublished / Editing process\"),\n (\"Unpublished / Indexing process\", \"Unpublished / Indexing process\"),\n (\"Unpublished / Proofread pending\", \"Unpublished / Proofread pending\"),\n (\"Unpublished / Proofreading process\", \"Unpublished / Proofreading process\"),\n (\"Unpublished / No indexing activity\", \"Unpublished / No indexing activity\"),\n ]\n\n # The old Cantus uses two fields to jointly control the access to sources. \n # Here in the new Cantus, we only use one field, and there are two levels: published and unpublished.\n # Published sources are available to the public. \n # Unpublished sources are hidden from the list and cannot be accessed by URL until the user logs in.\n published = models.BooleanField(blank=False, null=False, default=False)\n\n title = models.CharField(\n max_length=255,\n help_text=\"Full Manuscript Identification (City, Archive, Shelf-mark)\",\n )\n # the siglum field as implemented on the old Cantus is composed of both the RISM siglum and the shelfmark\n # it is a human-readable ID for a source\n siglum = models.CharField(\n max_length=63, \n null=True, \n blank=True,\n help_text=\"RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).\",\n )\n # the RISM siglum uniquely identifies a library or holding institution\n rism_siglum = models.ForeignKey(\n \"RismSiglum\", on_delete=models.PROTECT, null=True, blank=True,\n )\n provenance = models.ForeignKey(\n \"Provenance\",\n on_delete=models.PROTECT,\n help_text=\"If the origin is unknown, select a location where the source was \"\n \"used later in its lifetime and provide details in the \"\n '\"Provenance notes\" field.',\n null=True,\n blank=True,\n related_name=\"sources\",\n )\n provenance_notes = models.TextField(\n blank=True,\n null=True,\n help_text=\"More exact indication of the provenance (if necessary)\",\n )\n full_source = models.BooleanField(blank=True, null=True)\n date = models.CharField(\n blank=True,\n null=True,\n max_length=63,\n help_text='Date of the manuscript (e.g. \"1200s\", \"1300-1350\", etc.)',\n )\n century = models.ManyToManyField(\"Century\", related_name=\"sources\", blank=True)\n notation = models.ManyToManyField(\"Notation\", related_name=\"sources\", blank=True)\n cursus = models.CharField(\n blank=True, null=True, choices=cursus_choices, max_length=63\n )\n current_editors = models.ManyToManyField(get_user_model(), related_name=\"sources_user_can_edit\", blank=True)\n \n inventoried_by = models.ManyToManyField(\n get_user_model(), related_name=\"inventoried_sources\", blank=True\n )\n full_text_entered_by = models.ManyToManyField(\n get_user_model(), related_name=\"entered_full_text_for_sources\", blank=True\n )\n melodies_entered_by = models.ManyToManyField(\n get_user_model(), related_name=\"entered_melody_for_sources\", blank=True\n )\n proofreaders = models.ManyToManyField(get_user_model(), related_name=\"proofread_sources\", blank=True)\n other_editors = models.ManyToManyField(get_user_model(), related_name=\"edited_sources\", blank=True)\n \n\n segment = models.ForeignKey(\n \"Segment\", on_delete=models.PROTECT, blank=True, null=True\n )\n source_status = models.CharField(blank=True, null=True, choices=source_status_choices, max_length=255)\n complete_inventory = models.BooleanField(blank=True, null=True)\n summary = models.TextField(blank=True, null=True)\n liturgical_occasions = models.TextField(blank=True, null=True)\n description = models.TextField(blank=True, null=True)\n selected_bibliography = models.TextField(blank=True, null=True)\n image_link = models.URLField(\n blank=True, \n null=True,\n help_text='HTTP link to the image gallery of the source.',\n )\n indexing_notes = models.TextField(blank=True, null=True)\n indexing_date = models.TextField(blank=True, null=True)\n json_info = models.JSONField(blank=True, null=True)\n fragmentarium_id = models.CharField(max_length=15, blank=True, null=True)\n dact_id = models.CharField(max_length=15, blank=True, null=True)\n\n # number_of_chants and number_of_melodies are used for rendering the source-list page (perhaps among other places)\n # they are automatically recalculated in main_app.signals.update_source_chant_count and\n # main_app.signals.update_source_melody_count every time a chant or sequence is saved or deleted\n number_of_chants = models.IntegerField(blank=True, null=True)\n number_of_melodies = models.IntegerField(blank=True, null=True)\n\n def __str__(self):\n string = '[{s}] {t} ({i})'.format(s=self.rism_siglum, t=self.title, i=self.id)\n return string\n\n def save(self, *args, **kwargs):\n # when creating a source, assign it to \"CANTUS Database\" segment by default\n if not self.segment:\n cantus_db_segment = Segment.objects.get(name=\"CANTUS Database\")\n self.segment = cantus_db_segment\n super().save(*args, **kwargs)\n", "path": "django/cantusdb_project/main_app/models/source.py"}]}
2,216
166
gh_patches_debug_26278
rasdani/github-patches
git_diff
apache__airflow-13012
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> from airflow.operators.python import PythonOperator does not work This is not necessarily a bug in core Airflow, but the upgrade-check scripts recommend this as a solution when the old 1.10.x version of importing the python operator is used. So, there is a mismatch between the core Airflow code and the recommendations given in the upgrade check. <!-- Welcome to Apache Airflow! For a smooth issue process, try to answer the following questions. Don't worry if they're not all applicable; just try to include what you can :-) If you need to include code snippets or logs, please put them in fenced code blocks. If they're super-long, please use the details tag like <details><summary>super-long log</summary> lots of stuff </details> Please delete these comment blocks before submitting the issue. --> <!-- IMPORTANT!!! PLEASE CHECK "SIMILAR TO X EXISTING ISSUES" OPTION IF VISIBLE NEXT TO "SUBMIT NEW ISSUE" BUTTON!!! PLEASE CHECK IF THIS ISSUE HAS BEEN REPORTED PREVIOUSLY USING SEARCH!!! Please complete the next sections or the issue will be closed. These questions are the first thing we need to know to understand the context. --> **Apache Airflow version**: **Kubernetes version (if you are using kubernetes)** (use `kubectl version`): **Environment**: - **Cloud provider or hardware configuration**: - **OS** (e.g. from /etc/os-release): - **Kernel** (e.g. `uname -a`): - **Install tools**: - **Others**: **What happened**: <!-- (please include exact error messages if you can) --> **What you expected to happen**: <!-- What do you think went wrong? --> **How to reproduce it**: <!--- As minimally and precisely as possible. Keep in mind we do not have access to your cluster or dags. If you are using kubernetes, please attempt to recreate the issue using minikube or kind. ## Install minikube/kind - Minikube https://minikube.sigs.k8s.io/docs/start/ - Kind https://kind.sigs.k8s.io/docs/user/quick-start/ If this is a UI bug, please provide a screenshot of the bug or a link to a youtube video of the bug in action You can include images using the .md style of ![alt text](http://url/to/img.png) To record a screencast, mac users can use QuickTime and then create an unlisted youtube video with the resulting .mov file. ---> **Anything else we need to know**: <!-- How often does this problem occur? Once? Every time etc? Any relevant logs to include? Put them here in side a detail tag: <details><summary>x.log</summary> lots of stuff </details> --> </issue> <code> [start of airflow/upgrade/rules/import_changes.py] 1 # Licensed to the Apache Software Foundation (ASF) under one 2 # or more contributor license agreements. See the NOTICE file 3 # distributed with this work for additional information 4 # regarding copyright ownership. The ASF licenses this file 5 # to you under the Apache License, Version 2.0 (the 6 # "License"); you may not use this file except in compliance 7 # with the License. You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, 12 # software distributed under the License is distributed on an 13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 # KIND, either express or implied. See the License for the 15 # specific language governing permissions and limitations 16 # under the License. 17 18 import itertools 19 from typing import NamedTuple, Optional, List 20 21 from cached_property import cached_property 22 from packaging.version import Version 23 24 from airflow import conf 25 from airflow.upgrade.rules.base_rule import BaseRule 26 from airflow.upgrade.rules.renamed_classes import ALL 27 from airflow.utils.dag_processing import list_py_file_paths 28 29 try: 30 from importlib_metadata import PackageNotFoundError, distribution 31 except ImportError: 32 from importlib.metadata import PackageNotFoundError, distribution 33 34 35 class ImportChange( 36 NamedTuple( 37 "ImportChange", 38 [("old_path", str), ("new_path", str), ("providers_package", Optional[str])], 39 ) 40 ): 41 def info(self, file_path=None): 42 msg = "Using `{}` should be replaced by `{}`".format(self.old_path, self.new_path) 43 if file_path: 44 msg += ". Affected file: {}".format(file_path) 45 return msg 46 47 @cached_property 48 def old_class(self): 49 return self.old_path.split(".")[-1] 50 51 @cached_property 52 def new_class(self): 53 return self.new_path.split(".")[-1] 54 55 @classmethod 56 def provider_stub_from_module(cls, module): 57 if "providers" not in module: 58 return None 59 60 # [2:] strips off the airflow.providers. part 61 parts = module.split(".")[2:] 62 if parts[0] in ('apache', 'cncf', 'microsoft'): 63 return '-'.join(parts[:2]) 64 return parts[0] 65 66 @classmethod 67 def from_new_old_paths(cls, new_path, old_path): 68 providers_package = cls.provider_stub_from_module(new_path) 69 return cls( 70 old_path=old_path, new_path=new_path, providers_package=providers_package 71 ) 72 73 74 class ImportChangesRule(BaseRule): 75 title = "Changes in import paths of hooks, operators, sensors and others" 76 description = ( 77 "Many hooks, operators and other classes has been renamed and moved. Those changes were part of " 78 "unifying names and imports paths as described in AIP-21.\nThe `contrib` folder has been replaced " 79 "by `providers` directory and packages:\n" 80 "https://github.com/apache/airflow#backport-packages" 81 ) 82 83 ALL_CHANGES = [ 84 ImportChange.from_new_old_paths(*args) for args in ALL 85 ] # type: List[ImportChange] 86 87 @staticmethod 88 def _check_file(file_path): 89 problems = [] 90 providers = set() 91 with open(file_path, "r") as file: 92 content = file.read() 93 for change in ImportChangesRule.ALL_CHANGES: 94 if change.old_class in content: 95 problems.append(change.info(file_path)) 96 if change.providers_package: 97 providers.add(change.providers_package) 98 return problems, providers 99 100 @staticmethod 101 def _check_missing_providers(providers): 102 103 current_airflow_version = Version(__import__("airflow").__version__) 104 if current_airflow_version.major >= 2: 105 prefix = "apache-airflow-providers-" 106 else: 107 prefix = "apache-airflow-backport-providers-" 108 109 for provider in providers: 110 dist_name = prefix + provider 111 try: 112 distribution(dist_name) 113 except PackageNotFoundError: 114 yield "Please install `{}`".format(dist_name) 115 116 def check(self): 117 dag_folder = conf.get("core", "dags_folder") 118 files = list_py_file_paths(directory=dag_folder, include_examples=False) 119 problems = [] 120 providers = set() 121 # Split in to two groups - install backports first, then make changes 122 for file in files: 123 new_problems, new_providers = self._check_file(file) 124 problems.extend(new_problems) 125 providers |= new_providers 126 127 return itertools.chain( 128 self._check_missing_providers(sorted(providers)), 129 problems, 130 ) 131 [end of airflow/upgrade/rules/import_changes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/airflow/upgrade/rules/import_changes.py b/airflow/upgrade/rules/import_changes.py --- a/airflow/upgrade/rules/import_changes.py +++ b/airflow/upgrade/rules/import_changes.py @@ -39,7 +39,9 @@ ) ): def info(self, file_path=None): - msg = "Using `{}` should be replaced by `{}`".format(self.old_path, self.new_path) + msg = "Using `{}` should be replaced by `{}`".format( + self.old_path, self.new_path + ) if file_path: msg += ". Affected file: {}".format(file_path) return msg @@ -80,10 +82,30 @@ "https://github.com/apache/airflow#backport-packages" ) + current_airflow_version = Version(__import__("airflow").__version__) + + if current_airflow_version < Version("2.0.0"): + + def _filter_incompatible_renames(arg): + new_path = arg[1] + return ( + not new_path.startswith("airflow.operators") + and not new_path.startswith("airflow.sensors") + and not new_path.startswith("airflow.hooks") + ) + + else: + # Everything allowed on 2.0.0+ + def _filter_incompatible_renames(arg): + return True + ALL_CHANGES = [ - ImportChange.from_new_old_paths(*args) for args in ALL + ImportChange.from_new_old_paths(*args) + for args in filter(_filter_incompatible_renames, ALL) ] # type: List[ImportChange] + del _filter_incompatible_renames + @staticmethod def _check_file(file_path): problems = []
{"golden_diff": "diff --git a/airflow/upgrade/rules/import_changes.py b/airflow/upgrade/rules/import_changes.py\n--- a/airflow/upgrade/rules/import_changes.py\n+++ b/airflow/upgrade/rules/import_changes.py\n@@ -39,7 +39,9 @@\n )\n ):\n def info(self, file_path=None):\n- msg = \"Using `{}` should be replaced by `{}`\".format(self.old_path, self.new_path)\n+ msg = \"Using `{}` should be replaced by `{}`\".format(\n+ self.old_path, self.new_path\n+ )\n if file_path:\n msg += \". Affected file: {}\".format(file_path)\n return msg\n@@ -80,10 +82,30 @@\n \"https://github.com/apache/airflow#backport-packages\"\n )\n \n+ current_airflow_version = Version(__import__(\"airflow\").__version__)\n+\n+ if current_airflow_version < Version(\"2.0.0\"):\n+\n+ def _filter_incompatible_renames(arg):\n+ new_path = arg[1]\n+ return (\n+ not new_path.startswith(\"airflow.operators\")\n+ and not new_path.startswith(\"airflow.sensors\")\n+ and not new_path.startswith(\"airflow.hooks\")\n+ )\n+\n+ else:\n+ # Everything allowed on 2.0.0+\n+ def _filter_incompatible_renames(arg):\n+ return True\n+\n ALL_CHANGES = [\n- ImportChange.from_new_old_paths(*args) for args in ALL\n+ ImportChange.from_new_old_paths(*args)\n+ for args in filter(_filter_incompatible_renames, ALL)\n ] # type: List[ImportChange]\n \n+ del _filter_incompatible_renames\n+\n @staticmethod\n def _check_file(file_path):\n problems = []\n", "issue": "from airflow.operators.python import PythonOperator does not work\nThis is not necessarily a bug in core Airflow, but the upgrade-check scripts recommend this as a solution when the old 1.10.x version of importing the python operator is used. \r\n\r\nSo, there is a mismatch between the core Airflow code and the recommendations given in the upgrade check. \r\n\r\n<!--\r\n\r\nWelcome to Apache Airflow! For a smooth issue process, try to answer the following questions.\r\nDon't worry if they're not all applicable; just try to include what you can :-)\r\n\r\nIf you need to include code snippets or logs, please put them in fenced code\r\nblocks. If they're super-long, please use the details tag like\r\n<details><summary>super-long log</summary> lots of stuff </details>\r\n\r\nPlease delete these comment blocks before submitting the issue.\r\n\r\n-->\r\n\r\n<!--\r\n\r\nIMPORTANT!!!\r\n\r\nPLEASE CHECK \"SIMILAR TO X EXISTING ISSUES\" OPTION IF VISIBLE\r\nNEXT TO \"SUBMIT NEW ISSUE\" BUTTON!!!\r\n\r\nPLEASE CHECK IF THIS ISSUE HAS BEEN REPORTED PREVIOUSLY USING SEARCH!!!\r\n\r\nPlease complete the next sections or the issue will be closed.\r\nThese questions are the first thing we need to know to understand the context.\r\n\r\n-->\r\n\r\n**Apache Airflow version**:\r\n\r\n\r\n**Kubernetes version (if you are using kubernetes)** (use `kubectl version`):\r\n\r\n**Environment**:\r\n\r\n- **Cloud provider or hardware configuration**:\r\n- **OS** (e.g. from /etc/os-release):\r\n- **Kernel** (e.g. `uname -a`):\r\n- **Install tools**:\r\n- **Others**:\r\n\r\n**What happened**:\r\n\r\n<!-- (please include exact error messages if you can) -->\r\n\r\n**What you expected to happen**:\r\n\r\n<!-- What do you think went wrong? -->\r\n\r\n**How to reproduce it**:\r\n<!---\r\n\r\nAs minimally and precisely as possible. Keep in mind we do not have access to your cluster or dags.\r\n\r\nIf you are using kubernetes, please attempt to recreate the issue using minikube or kind.\r\n\r\n## Install minikube/kind\r\n\r\n- Minikube https://minikube.sigs.k8s.io/docs/start/\r\n- Kind https://kind.sigs.k8s.io/docs/user/quick-start/\r\n\r\nIf this is a UI bug, please provide a screenshot of the bug or a link to a youtube video of the bug in action\r\n\r\nYou can include images using the .md style of\r\n![alt text](http://url/to/img.png)\r\n\r\nTo record a screencast, mac users can use QuickTime and then create an unlisted youtube video with the resulting .mov file.\r\n\r\n--->\r\n\r\n\r\n**Anything else we need to know**:\r\n\r\n<!--\r\n\r\nHow often does this problem occur? Once? Every time etc?\r\n\r\nAny relevant logs to include? Put them here in side a detail tag:\r\n<details><summary>x.log</summary> lots of stuff </details>\r\n\r\n-->\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport itertools\nfrom typing import NamedTuple, Optional, List\n\nfrom cached_property import cached_property\nfrom packaging.version import Version\n\nfrom airflow import conf\nfrom airflow.upgrade.rules.base_rule import BaseRule\nfrom airflow.upgrade.rules.renamed_classes import ALL\nfrom airflow.utils.dag_processing import list_py_file_paths\n\ntry:\n from importlib_metadata import PackageNotFoundError, distribution\nexcept ImportError:\n from importlib.metadata import PackageNotFoundError, distribution\n\n\nclass ImportChange(\n NamedTuple(\n \"ImportChange\",\n [(\"old_path\", str), (\"new_path\", str), (\"providers_package\", Optional[str])],\n )\n):\n def info(self, file_path=None):\n msg = \"Using `{}` should be replaced by `{}`\".format(self.old_path, self.new_path)\n if file_path:\n msg += \". Affected file: {}\".format(file_path)\n return msg\n\n @cached_property\n def old_class(self):\n return self.old_path.split(\".\")[-1]\n\n @cached_property\n def new_class(self):\n return self.new_path.split(\".\")[-1]\n\n @classmethod\n def provider_stub_from_module(cls, module):\n if \"providers\" not in module:\n return None\n\n # [2:] strips off the airflow.providers. part\n parts = module.split(\".\")[2:]\n if parts[0] in ('apache', 'cncf', 'microsoft'):\n return '-'.join(parts[:2])\n return parts[0]\n\n @classmethod\n def from_new_old_paths(cls, new_path, old_path):\n providers_package = cls.provider_stub_from_module(new_path)\n return cls(\n old_path=old_path, new_path=new_path, providers_package=providers_package\n )\n\n\nclass ImportChangesRule(BaseRule):\n title = \"Changes in import paths of hooks, operators, sensors and others\"\n description = (\n \"Many hooks, operators and other classes has been renamed and moved. Those changes were part of \"\n \"unifying names and imports paths as described in AIP-21.\\nThe `contrib` folder has been replaced \"\n \"by `providers` directory and packages:\\n\"\n \"https://github.com/apache/airflow#backport-packages\"\n )\n\n ALL_CHANGES = [\n ImportChange.from_new_old_paths(*args) for args in ALL\n ] # type: List[ImportChange]\n\n @staticmethod\n def _check_file(file_path):\n problems = []\n providers = set()\n with open(file_path, \"r\") as file:\n content = file.read()\n for change in ImportChangesRule.ALL_CHANGES:\n if change.old_class in content:\n problems.append(change.info(file_path))\n if change.providers_package:\n providers.add(change.providers_package)\n return problems, providers\n\n @staticmethod\n def _check_missing_providers(providers):\n\n current_airflow_version = Version(__import__(\"airflow\").__version__)\n if current_airflow_version.major >= 2:\n prefix = \"apache-airflow-providers-\"\n else:\n prefix = \"apache-airflow-backport-providers-\"\n\n for provider in providers:\n dist_name = prefix + provider\n try:\n distribution(dist_name)\n except PackageNotFoundError:\n yield \"Please install `{}`\".format(dist_name)\n\n def check(self):\n dag_folder = conf.get(\"core\", \"dags_folder\")\n files = list_py_file_paths(directory=dag_folder, include_examples=False)\n problems = []\n providers = set()\n # Split in to two groups - install backports first, then make changes\n for file in files:\n new_problems, new_providers = self._check_file(file)\n problems.extend(new_problems)\n providers |= new_providers\n\n return itertools.chain(\n self._check_missing_providers(sorted(providers)),\n problems,\n )\n", "path": "airflow/upgrade/rules/import_changes.py"}]}
2,428
402
gh_patches_debug_21494
rasdani/github-patches
git_diff
encode__httpx-637
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> header value surrounded by whitespace httpx [doesn't accept header value surrounded by whitespace](https://github.com/python-hyper/hyper-h2/blob/13005074d14c7d32f8eaf1683b854446a09d09d3/h2/utilities.py#L265), but other http2 implementations seems to accept them (nghttp2, browsers Firefox, Chrome). Example: ``` async with httpx.Client(http2=True) as client: response = await client.get('https://a.searx.space/headervaluetest') ``` Result : ``` ProtocolError: Received header value surrounded by whitespace b'A value ' ``` The nghttp2 implementation seems here: * https://github.com/nghttp2/nghttp2/blob/bb519154fe62f7ff7e5eb7269e05043dec6d3682/lib/nghttp2_http.c#L332 * https://github.com/nghttp2/nghttp2/blob/bb519154fe62f7ff7e5eb7269e05043dec6d3682/lib/nghttp2_helper.c#L498 I'm not sure if it should be declared as a bug or not. </issue> <code> [start of httpx/dispatch/http2.py] 1 import typing 2 3 import h2.connection 4 import h2.events 5 from h2.settings import SettingCodes, Settings 6 7 from ..concurrency.base import ( 8 BaseEvent, 9 BaseSocketStream, 10 ConcurrencyBackend, 11 lookup_backend, 12 ) 13 from ..config import Timeout 14 from ..exceptions import ProtocolError 15 from ..models import Request, Response 16 from ..utils import get_logger 17 from .base import OpenConnection 18 19 logger = get_logger(__name__) 20 21 22 class HTTP2Connection(OpenConnection): 23 READ_NUM_BYTES = 4096 24 25 def __init__( 26 self, 27 socket: BaseSocketStream, 28 backend: typing.Union[str, ConcurrencyBackend] = "auto", 29 on_release: typing.Callable = None, 30 ): 31 self.socket = socket 32 self.backend = lookup_backend(backend) 33 self.on_release = on_release 34 self.state = h2.connection.H2Connection() 35 36 self.streams = {} # type: typing.Dict[int, HTTP2Stream] 37 self.events = {} # type: typing.Dict[int, typing.List[h2.events.Event]] 38 39 self.init_started = False 40 41 @property 42 def is_http2(self) -> bool: 43 return True 44 45 @property 46 def init_complete(self) -> BaseEvent: 47 # We do this lazily, to make sure backend autodetection always 48 # runs within an async context. 49 if not hasattr(self, "_initialization_complete"): 50 self._initialization_complete = self.backend.create_event() 51 return self._initialization_complete 52 53 async def send(self, request: Request, timeout: Timeout = None) -> Response: 54 timeout = Timeout() if timeout is None else timeout 55 56 if not self.init_started: 57 # The very first stream is responsible for initiating the connection. 58 self.init_started = True 59 await self.send_connection_init(timeout) 60 stream_id = self.state.get_next_available_stream_id() 61 self.init_complete.set() 62 else: 63 # All other streams need to wait until the connection is established. 64 await self.init_complete.wait() 65 stream_id = self.state.get_next_available_stream_id() 66 67 stream = HTTP2Stream(stream_id=stream_id, connection=self) 68 self.streams[stream_id] = stream 69 self.events[stream_id] = [] 70 return await stream.send(request, timeout) 71 72 async def send_connection_init(self, timeout: Timeout) -> None: 73 """ 74 The HTTP/2 connection requires some initial setup before we can start 75 using individual request/response streams on it. 76 """ 77 78 # Need to set these manually here instead of manipulating via 79 # __setitem__() otherwise the H2Connection will emit SettingsUpdate 80 # frames in addition to sending the undesired defaults. 81 self.state.local_settings = Settings( 82 client=True, 83 initial_values={ 84 # Disable PUSH_PROMISE frames from the server since we don't do anything 85 # with them for now. Maybe when we support caching? 86 SettingCodes.ENABLE_PUSH: 0, 87 # These two are taken from h2 for safe defaults 88 SettingCodes.MAX_CONCURRENT_STREAMS: 100, 89 SettingCodes.MAX_HEADER_LIST_SIZE: 65536, 90 }, 91 ) 92 93 # Some websites (*cough* Yahoo *cough*) balk at this setting being 94 # present in the initial handshake since it's not defined in the original 95 # RFC despite the RFC mandating ignoring settings you don't know about. 96 del self.state.local_settings[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL] 97 98 self.state.initiate_connection() 99 self.state.increment_flow_control_window(2 ** 24) 100 data_to_send = self.state.data_to_send() 101 await self.socket.write(data_to_send, timeout) 102 103 @property 104 def is_closed(self) -> bool: 105 return False 106 107 def is_connection_dropped(self) -> bool: 108 return self.socket.is_connection_dropped() 109 110 async def close(self) -> None: 111 await self.socket.close() 112 113 async def wait_for_outgoing_flow(self, stream_id: int, timeout: Timeout) -> int: 114 """ 115 Returns the maximum allowable outgoing flow for a given stream. 116 117 If the allowable flow is zero, then waits on the network until 118 WindowUpdated frames have increased the flow rate. 119 120 https://tools.ietf.org/html/rfc7540#section-6.9 121 """ 122 local_flow = self.state.local_flow_control_window(stream_id) 123 connection_flow = self.state.max_outbound_frame_size 124 flow = min(local_flow, connection_flow) 125 while flow == 0: 126 await self.receive_events(timeout) 127 local_flow = self.state.local_flow_control_window(stream_id) 128 connection_flow = self.state.max_outbound_frame_size 129 flow = min(local_flow, connection_flow) 130 return flow 131 132 async def wait_for_event(self, stream_id: int, timeout: Timeout) -> h2.events.Event: 133 """ 134 Returns the next event for a given stream. 135 136 If no events are available yet, then waits on the network until 137 an event is available. 138 """ 139 while not self.events[stream_id]: 140 await self.receive_events(timeout) 141 return self.events[stream_id].pop(0) 142 143 async def receive_events(self, timeout: Timeout) -> None: 144 """ 145 Read some data from the network, and update the H2 state. 146 """ 147 data = await self.socket.read(self.READ_NUM_BYTES, timeout) 148 events = self.state.receive_data(data) 149 for event in events: 150 event_stream_id = getattr(event, "stream_id", 0) 151 logger.trace(f"receive_event stream_id={event_stream_id} event={event!r}") 152 153 if hasattr(event, "error_code"): 154 raise ProtocolError(event) 155 156 if event_stream_id in self.events: 157 self.events[event_stream_id].append(event) 158 159 data_to_send = self.state.data_to_send() 160 await self.socket.write(data_to_send, timeout) 161 162 async def send_headers( 163 self, 164 stream_id: int, 165 headers: typing.List[typing.Tuple[bytes, bytes]], 166 timeout: Timeout, 167 ) -> None: 168 self.state.send_headers(stream_id, headers) 169 self.state.increment_flow_control_window(2 ** 24, stream_id=stream_id) 170 data_to_send = self.state.data_to_send() 171 await self.socket.write(data_to_send, timeout) 172 173 async def send_data(self, stream_id: int, chunk: bytes, timeout: Timeout) -> None: 174 self.state.send_data(stream_id, chunk) 175 data_to_send = self.state.data_to_send() 176 await self.socket.write(data_to_send, timeout) 177 178 async def end_stream(self, stream_id: int, timeout: Timeout) -> None: 179 self.state.end_stream(stream_id) 180 data_to_send = self.state.data_to_send() 181 await self.socket.write(data_to_send, timeout) 182 183 async def acknowledge_received_data( 184 self, stream_id: int, amount: int, timeout: Timeout 185 ) -> None: 186 self.state.acknowledge_received_data(amount, stream_id) 187 data_to_send = self.state.data_to_send() 188 await self.socket.write(data_to_send, timeout) 189 190 async def close_stream(self, stream_id: int) -> None: 191 del self.streams[stream_id] 192 del self.events[stream_id] 193 194 if not self.streams and self.on_release is not None: 195 await self.on_release() 196 197 198 class HTTP2Stream: 199 def __init__(self, stream_id: int, connection: HTTP2Connection) -> None: 200 self.stream_id = stream_id 201 self.connection = connection 202 203 async def send(self, request: Request, timeout: Timeout) -> Response: 204 # Send the request. 205 await self.send_headers(request, timeout) 206 await self.send_body(request, timeout) 207 208 # Receive the response. 209 status_code, headers = await self.receive_response(timeout) 210 content = self.body_iter(timeout) 211 return Response( 212 status_code=status_code, 213 http_version="HTTP/2", 214 headers=headers, 215 content=content, 216 on_close=self.close, 217 request=request, 218 ) 219 220 async def send_headers(self, request: Request, timeout: Timeout) -> None: 221 headers = [ 222 (b":method", request.method.encode("ascii")), 223 (b":authority", request.url.authority.encode("ascii")), 224 (b":scheme", request.url.scheme.encode("ascii")), 225 (b":path", request.url.full_path.encode("ascii")), 226 ] + [(k, v) for k, v in request.headers.raw if k != b"host"] 227 228 logger.trace( 229 f"send_headers " 230 f"stream_id={self.stream_id} " 231 f"method={request.method!r} " 232 f"target={request.url.full_path!r} " 233 f"headers={headers!r}" 234 ) 235 await self.connection.send_headers(self.stream_id, headers, timeout) 236 237 async def send_body(self, request: Request, timeout: Timeout) -> None: 238 logger.trace(f"send_body stream_id={self.stream_id}") 239 async for data in request.stream(): 240 while data: 241 max_flow = await self.connection.wait_for_outgoing_flow( 242 self.stream_id, timeout 243 ) 244 chunk_size = min(len(data), max_flow) 245 chunk, data = data[:chunk_size], data[chunk_size:] 246 await self.connection.send_data(self.stream_id, chunk, timeout) 247 248 await self.connection.end_stream(self.stream_id, timeout) 249 250 async def receive_response( 251 self, timeout: Timeout 252 ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]: 253 """ 254 Read the response status and headers from the network. 255 """ 256 while True: 257 event = await self.connection.wait_for_event(self.stream_id, timeout) 258 if isinstance(event, h2.events.ResponseReceived): 259 break 260 261 status_code = 200 262 headers = [] 263 for k, v in event.headers: 264 if k == b":status": 265 status_code = int(v.decode("ascii", errors="ignore")) 266 elif not k.startswith(b":"): 267 headers.append((k, v)) 268 269 return (status_code, headers) 270 271 async def body_iter(self, timeout: Timeout) -> typing.AsyncIterator[bytes]: 272 while True: 273 event = await self.connection.wait_for_event(self.stream_id, timeout) 274 if isinstance(event, h2.events.DataReceived): 275 amount = event.flow_controlled_length 276 await self.connection.acknowledge_received_data( 277 self.stream_id, amount, timeout 278 ) 279 yield event.data 280 elif isinstance(event, (h2.events.StreamEnded, h2.events.StreamReset)): 281 break 282 283 async def close(self) -> None: 284 await self.connection.close_stream(self.stream_id) 285 [end of httpx/dispatch/http2.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/httpx/dispatch/http2.py b/httpx/dispatch/http2.py --- a/httpx/dispatch/http2.py +++ b/httpx/dispatch/http2.py @@ -2,6 +2,7 @@ import h2.connection import h2.events +from h2.config import H2Configuration from h2.settings import SettingCodes, Settings from ..concurrency.base import ( @@ -21,6 +22,7 @@ class HTTP2Connection(OpenConnection): READ_NUM_BYTES = 4096 + CONFIG = H2Configuration(validate_inbound_headers=False) def __init__( self, @@ -31,7 +33,7 @@ self.socket = socket self.backend = lookup_backend(backend) self.on_release = on_release - self.state = h2.connection.H2Connection() + self.state = h2.connection.H2Connection(config=self.CONFIG) self.streams = {} # type: typing.Dict[int, HTTP2Stream] self.events = {} # type: typing.Dict[int, typing.List[h2.events.Event]]
{"golden_diff": "diff --git a/httpx/dispatch/http2.py b/httpx/dispatch/http2.py\n--- a/httpx/dispatch/http2.py\n+++ b/httpx/dispatch/http2.py\n@@ -2,6 +2,7 @@\n \n import h2.connection\n import h2.events\n+from h2.config import H2Configuration\n from h2.settings import SettingCodes, Settings\n \n from ..concurrency.base import (\n@@ -21,6 +22,7 @@\n \n class HTTP2Connection(OpenConnection):\n READ_NUM_BYTES = 4096\n+ CONFIG = H2Configuration(validate_inbound_headers=False)\n \n def __init__(\n self,\n@@ -31,7 +33,7 @@\n self.socket = socket\n self.backend = lookup_backend(backend)\n self.on_release = on_release\n- self.state = h2.connection.H2Connection()\n+ self.state = h2.connection.H2Connection(config=self.CONFIG)\n \n self.streams = {} # type: typing.Dict[int, HTTP2Stream]\n self.events = {} # type: typing.Dict[int, typing.List[h2.events.Event]]\n", "issue": "header value surrounded by whitespace\nhttpx [doesn't accept header value surrounded by whitespace](https://github.com/python-hyper/hyper-h2/blob/13005074d14c7d32f8eaf1683b854446a09d09d3/h2/utilities.py#L265), but other http2 implementations seems to accept them (nghttp2, browsers Firefox, Chrome).\r\n\r\nExample:\r\n```\r\nasync with httpx.Client(http2=True) as client:\r\n\tresponse = await client.get('https://a.searx.space/headervaluetest')\r\n```\r\n\r\nResult :\r\n```\r\nProtocolError: Received header value surrounded by whitespace b'A value '\r\n```\r\n\r\nThe nghttp2 implementation seems here:\r\n* https://github.com/nghttp2/nghttp2/blob/bb519154fe62f7ff7e5eb7269e05043dec6d3682/lib/nghttp2_http.c#L332\r\n* https://github.com/nghttp2/nghttp2/blob/bb519154fe62f7ff7e5eb7269e05043dec6d3682/lib/nghttp2_helper.c#L498\r\n\r\nI'm not sure if it should be declared as a bug or not.\n", "before_files": [{"content": "import typing\n\nimport h2.connection\nimport h2.events\nfrom h2.settings import SettingCodes, Settings\n\nfrom ..concurrency.base import (\n BaseEvent,\n BaseSocketStream,\n ConcurrencyBackend,\n lookup_backend,\n)\nfrom ..config import Timeout\nfrom ..exceptions import ProtocolError\nfrom ..models import Request, Response\nfrom ..utils import get_logger\nfrom .base import OpenConnection\n\nlogger = get_logger(__name__)\n\n\nclass HTTP2Connection(OpenConnection):\n READ_NUM_BYTES = 4096\n\n def __init__(\n self,\n socket: BaseSocketStream,\n backend: typing.Union[str, ConcurrencyBackend] = \"auto\",\n on_release: typing.Callable = None,\n ):\n self.socket = socket\n self.backend = lookup_backend(backend)\n self.on_release = on_release\n self.state = h2.connection.H2Connection()\n\n self.streams = {} # type: typing.Dict[int, HTTP2Stream]\n self.events = {} # type: typing.Dict[int, typing.List[h2.events.Event]]\n\n self.init_started = False\n\n @property\n def is_http2(self) -> bool:\n return True\n\n @property\n def init_complete(self) -> BaseEvent:\n # We do this lazily, to make sure backend autodetection always\n # runs within an async context.\n if not hasattr(self, \"_initialization_complete\"):\n self._initialization_complete = self.backend.create_event()\n return self._initialization_complete\n\n async def send(self, request: Request, timeout: Timeout = None) -> Response:\n timeout = Timeout() if timeout is None else timeout\n\n if not self.init_started:\n # The very first stream is responsible for initiating the connection.\n self.init_started = True\n await self.send_connection_init(timeout)\n stream_id = self.state.get_next_available_stream_id()\n self.init_complete.set()\n else:\n # All other streams need to wait until the connection is established.\n await self.init_complete.wait()\n stream_id = self.state.get_next_available_stream_id()\n\n stream = HTTP2Stream(stream_id=stream_id, connection=self)\n self.streams[stream_id] = stream\n self.events[stream_id] = []\n return await stream.send(request, timeout)\n\n async def send_connection_init(self, timeout: Timeout) -> None:\n \"\"\"\n The HTTP/2 connection requires some initial setup before we can start\n using individual request/response streams on it.\n \"\"\"\n\n # Need to set these manually here instead of manipulating via\n # __setitem__() otherwise the H2Connection will emit SettingsUpdate\n # frames in addition to sending the undesired defaults.\n self.state.local_settings = Settings(\n client=True,\n initial_values={\n # Disable PUSH_PROMISE frames from the server since we don't do anything\n # with them for now. Maybe when we support caching?\n SettingCodes.ENABLE_PUSH: 0,\n # These two are taken from h2 for safe defaults\n SettingCodes.MAX_CONCURRENT_STREAMS: 100,\n SettingCodes.MAX_HEADER_LIST_SIZE: 65536,\n },\n )\n\n # Some websites (*cough* Yahoo *cough*) balk at this setting being\n # present in the initial handshake since it's not defined in the original\n # RFC despite the RFC mandating ignoring settings you don't know about.\n del self.state.local_settings[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL]\n\n self.state.initiate_connection()\n self.state.increment_flow_control_window(2 ** 24)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n @property\n def is_closed(self) -> bool:\n return False\n\n def is_connection_dropped(self) -> bool:\n return self.socket.is_connection_dropped()\n\n async def close(self) -> None:\n await self.socket.close()\n\n async def wait_for_outgoing_flow(self, stream_id: int, timeout: Timeout) -> int:\n \"\"\"\n Returns the maximum allowable outgoing flow for a given stream.\n\n If the allowable flow is zero, then waits on the network until\n WindowUpdated frames have increased the flow rate.\n\n https://tools.ietf.org/html/rfc7540#section-6.9\n \"\"\"\n local_flow = self.state.local_flow_control_window(stream_id)\n connection_flow = self.state.max_outbound_frame_size\n flow = min(local_flow, connection_flow)\n while flow == 0:\n await self.receive_events(timeout)\n local_flow = self.state.local_flow_control_window(stream_id)\n connection_flow = self.state.max_outbound_frame_size\n flow = min(local_flow, connection_flow)\n return flow\n\n async def wait_for_event(self, stream_id: int, timeout: Timeout) -> h2.events.Event:\n \"\"\"\n Returns the next event for a given stream.\n\n If no events are available yet, then waits on the network until\n an event is available.\n \"\"\"\n while not self.events[stream_id]:\n await self.receive_events(timeout)\n return self.events[stream_id].pop(0)\n\n async def receive_events(self, timeout: Timeout) -> None:\n \"\"\"\n Read some data from the network, and update the H2 state.\n \"\"\"\n data = await self.socket.read(self.READ_NUM_BYTES, timeout)\n events = self.state.receive_data(data)\n for event in events:\n event_stream_id = getattr(event, \"stream_id\", 0)\n logger.trace(f\"receive_event stream_id={event_stream_id} event={event!r}\")\n\n if hasattr(event, \"error_code\"):\n raise ProtocolError(event)\n\n if event_stream_id in self.events:\n self.events[event_stream_id].append(event)\n\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def send_headers(\n self,\n stream_id: int,\n headers: typing.List[typing.Tuple[bytes, bytes]],\n timeout: Timeout,\n ) -> None:\n self.state.send_headers(stream_id, headers)\n self.state.increment_flow_control_window(2 ** 24, stream_id=stream_id)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def send_data(self, stream_id: int, chunk: bytes, timeout: Timeout) -> None:\n self.state.send_data(stream_id, chunk)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def end_stream(self, stream_id: int, timeout: Timeout) -> None:\n self.state.end_stream(stream_id)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def acknowledge_received_data(\n self, stream_id: int, amount: int, timeout: Timeout\n ) -> None:\n self.state.acknowledge_received_data(amount, stream_id)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def close_stream(self, stream_id: int) -> None:\n del self.streams[stream_id]\n del self.events[stream_id]\n\n if not self.streams and self.on_release is not None:\n await self.on_release()\n\n\nclass HTTP2Stream:\n def __init__(self, stream_id: int, connection: HTTP2Connection) -> None:\n self.stream_id = stream_id\n self.connection = connection\n\n async def send(self, request: Request, timeout: Timeout) -> Response:\n # Send the request.\n await self.send_headers(request, timeout)\n await self.send_body(request, timeout)\n\n # Receive the response.\n status_code, headers = await self.receive_response(timeout)\n content = self.body_iter(timeout)\n return Response(\n status_code=status_code,\n http_version=\"HTTP/2\",\n headers=headers,\n content=content,\n on_close=self.close,\n request=request,\n )\n\n async def send_headers(self, request: Request, timeout: Timeout) -> None:\n headers = [\n (b\":method\", request.method.encode(\"ascii\")),\n (b\":authority\", request.url.authority.encode(\"ascii\")),\n (b\":scheme\", request.url.scheme.encode(\"ascii\")),\n (b\":path\", request.url.full_path.encode(\"ascii\")),\n ] + [(k, v) for k, v in request.headers.raw if k != b\"host\"]\n\n logger.trace(\n f\"send_headers \"\n f\"stream_id={self.stream_id} \"\n f\"method={request.method!r} \"\n f\"target={request.url.full_path!r} \"\n f\"headers={headers!r}\"\n )\n await self.connection.send_headers(self.stream_id, headers, timeout)\n\n async def send_body(self, request: Request, timeout: Timeout) -> None:\n logger.trace(f\"send_body stream_id={self.stream_id}\")\n async for data in request.stream():\n while data:\n max_flow = await self.connection.wait_for_outgoing_flow(\n self.stream_id, timeout\n )\n chunk_size = min(len(data), max_flow)\n chunk, data = data[:chunk_size], data[chunk_size:]\n await self.connection.send_data(self.stream_id, chunk, timeout)\n\n await self.connection.end_stream(self.stream_id, timeout)\n\n async def receive_response(\n self, timeout: Timeout\n ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]:\n \"\"\"\n Read the response status and headers from the network.\n \"\"\"\n while True:\n event = await self.connection.wait_for_event(self.stream_id, timeout)\n if isinstance(event, h2.events.ResponseReceived):\n break\n\n status_code = 200\n headers = []\n for k, v in event.headers:\n if k == b\":status\":\n status_code = int(v.decode(\"ascii\", errors=\"ignore\"))\n elif not k.startswith(b\":\"):\n headers.append((k, v))\n\n return (status_code, headers)\n\n async def body_iter(self, timeout: Timeout) -> typing.AsyncIterator[bytes]:\n while True:\n event = await self.connection.wait_for_event(self.stream_id, timeout)\n if isinstance(event, h2.events.DataReceived):\n amount = event.flow_controlled_length\n await self.connection.acknowledge_received_data(\n self.stream_id, amount, timeout\n )\n yield event.data\n elif isinstance(event, (h2.events.StreamEnded, h2.events.StreamReset)):\n break\n\n async def close(self) -> None:\n await self.connection.close_stream(self.stream_id)\n", "path": "httpx/dispatch/http2.py"}]}
3,890
243
gh_patches_debug_20492
rasdani/github-patches
git_diff
frappe__frappe-23132
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add jitter on scheduled jobs hourly, daily long processes if all started at once can cause sudden increase in workload if you have many sites/benches. Adding simple jitter to scheduled time can lessen the impact of such issues. Jitter is common pattern used for solving problems with "frequency" becomes a problem. E.g. gunicorn adds jitter to avoid restarting all workers at same time, profilers add jitter to avoid amplifying some pattern of repeated work. retry/backoff implementations also use to avoid creating patterns. Possible implementation: When importing scheduled job types add some random delays in cron. E.g. daily jobs will start in the range of 12:00-12:15 AM instead of all starting at 12:00 AM. Cons: Some jobs are required to be executed at specific times e.g. birthday reminders. So adding negative offset can introduce bugs for them, positive offset however should be fine AFAIK. </issue> <code> [start of frappe/core/doctype/scheduled_job_type/scheduled_job_type.py] 1 # Copyright (c) 2021, Frappe Technologies and contributors 2 # License: MIT. See LICENSE 3 4 import json 5 from datetime import datetime 6 7 import click 8 from croniter import croniter 9 10 import frappe 11 from frappe.model.document import Document 12 from frappe.utils import get_datetime, now_datetime 13 from frappe.utils.background_jobs import enqueue, is_job_enqueued 14 15 16 class ScheduledJobType(Document): 17 # begin: auto-generated types 18 # This code is auto-generated. Do not modify anything in this block. 19 20 from typing import TYPE_CHECKING 21 22 if TYPE_CHECKING: 23 from frappe.types import DF 24 25 create_log: DF.Check 26 cron_format: DF.Data | None 27 frequency: DF.Literal[ 28 "All", 29 "Hourly", 30 "Hourly Long", 31 "Daily", 32 "Daily Long", 33 "Weekly", 34 "Weekly Long", 35 "Monthly", 36 "Monthly Long", 37 "Cron", 38 "Yearly", 39 "Annual", 40 ] 41 last_execution: DF.Datetime | None 42 method: DF.Data 43 next_execution: DF.Datetime | None 44 server_script: DF.Link | None 45 stopped: DF.Check 46 # end: auto-generated types 47 def autoname(self): 48 self.name = ".".join(self.method.split(".")[-2:]) 49 50 def validate(self): 51 if self.frequency != "All": 52 # force logging for all events other than continuous ones (ALL) 53 self.create_log = 1 54 55 def enqueue(self, force=False) -> bool: 56 # enqueue event if last execution is done 57 if self.is_event_due() or force: 58 if not self.is_job_in_queue(): 59 enqueue( 60 "frappe.core.doctype.scheduled_job_type.scheduled_job_type.run_scheduled_job", 61 queue=self.get_queue_name(), 62 job_type=self.method, 63 job_id=self.rq_job_id, 64 ) 65 return True 66 else: 67 frappe.logger("scheduler").error( 68 f"Skipped queueing {self.method} because it was found in queue for {frappe.local.site}" 69 ) 70 71 return False 72 73 def is_event_due(self, current_time=None): 74 """Return true if event is due based on time lapsed since last execution""" 75 # if the next scheduled event is before NOW, then its due! 76 return self.get_next_execution() <= (current_time or now_datetime()) 77 78 def is_job_in_queue(self) -> bool: 79 return is_job_enqueued(self.rq_job_id) 80 81 @property 82 def rq_job_id(self): 83 """Unique ID created to deduplicate jobs with single RQ call.""" 84 return f"scheduled_job::{self.method}" 85 86 @property 87 def next_execution(self): 88 return self.get_next_execution() 89 90 def get_next_execution(self): 91 CRON_MAP = { 92 "Yearly": "0 0 1 1 *", 93 "Annual": "0 0 1 1 *", 94 "Monthly": "0 0 1 * *", 95 "Monthly Long": "0 0 1 * *", 96 "Weekly": "0 0 * * 0", 97 "Weekly Long": "0 0 * * 0", 98 "Daily": "0 0 * * *", 99 "Daily Long": "0 0 * * *", 100 "Hourly": "0 * * * *", 101 "Hourly Long": "0 * * * *", 102 "All": f"*/{(frappe.get_conf().scheduler_interval or 240) // 60} * * * *", 103 } 104 105 if not self.cron_format: 106 self.cron_format = CRON_MAP[self.frequency] 107 108 # If this is a cold start then last_execution will not be set. 109 # Creation is set as fallback because if very old fallback is set job might trigger 110 # immediately, even when it's meant to be daily. 111 # A dynamic fallback like current time might miss the scheduler interval and job will never start. 112 last_execution = get_datetime(self.last_execution or self.creation) 113 return croniter(self.cron_format, last_execution).get_next(datetime) 114 115 def execute(self): 116 self.scheduler_log = None 117 try: 118 self.log_status("Start") 119 if self.server_script: 120 script_name = frappe.db.get_value("Server Script", self.server_script) 121 if script_name: 122 frappe.get_doc("Server Script", script_name).execute_scheduled_method() 123 else: 124 frappe.get_attr(self.method)() 125 frappe.db.commit() 126 self.log_status("Complete") 127 except Exception: 128 frappe.db.rollback() 129 self.log_status("Failed") 130 131 def log_status(self, status): 132 # log file 133 frappe.logger("scheduler").info(f"Scheduled Job {status}: {self.method} for {frappe.local.site}") 134 self.update_scheduler_log(status) 135 136 def update_scheduler_log(self, status): 137 if not self.create_log: 138 # self.get_next_execution will work properly iff self.last_execution is properly set 139 if self.frequency == "All" and status == "Start": 140 self.db_set("last_execution", now_datetime(), update_modified=False) 141 frappe.db.commit() 142 return 143 if not self.scheduler_log: 144 self.scheduler_log = frappe.get_doc( 145 dict(doctype="Scheduled Job Log", scheduled_job_type=self.name) 146 ).insert(ignore_permissions=True) 147 self.scheduler_log.db_set("status", status) 148 if frappe.debug_log: 149 self.scheduler_log.db_set("debug_log", "\n".join(frappe.debug_log)) 150 if status == "Failed": 151 self.scheduler_log.db_set("details", frappe.get_traceback()) 152 if status == "Start": 153 self.db_set("last_execution", now_datetime(), update_modified=False) 154 frappe.db.commit() 155 156 def get_queue_name(self): 157 return "long" if ("Long" in self.frequency) else "default" 158 159 def on_trash(self): 160 frappe.db.delete("Scheduled Job Log", {"scheduled_job_type": self.name}) 161 162 163 @frappe.whitelist() 164 def execute_event(doc: str): 165 frappe.only_for("System Manager") 166 doc = json.loads(doc) 167 frappe.get_doc("Scheduled Job Type", doc.get("name")).enqueue(force=True) 168 return doc 169 170 171 def run_scheduled_job(job_type: str): 172 """This is a wrapper function that runs a hooks.scheduler_events method""" 173 try: 174 frappe.get_doc("Scheduled Job Type", dict(method=job_type)).execute() 175 except Exception: 176 print(frappe.get_traceback()) 177 178 179 def sync_jobs(hooks: dict = None): 180 frappe.reload_doc("core", "doctype", "scheduled_job_type") 181 scheduler_events = hooks or frappe.get_hooks("scheduler_events") 182 all_events = insert_events(scheduler_events) 183 clear_events(all_events) 184 185 186 def insert_events(scheduler_events: dict) -> list: 187 cron_jobs, event_jobs = [], [] 188 for event_type in scheduler_events: 189 events = scheduler_events.get(event_type) 190 if isinstance(events, dict): 191 cron_jobs += insert_cron_jobs(events) 192 else: 193 # hourly, daily etc 194 event_jobs += insert_event_jobs(events, event_type) 195 return cron_jobs + event_jobs 196 197 198 def insert_cron_jobs(events: dict) -> list: 199 cron_jobs = [] 200 for cron_format in events: 201 for event in events.get(cron_format): 202 cron_jobs.append(event) 203 insert_single_event("Cron", event, cron_format) 204 return cron_jobs 205 206 207 def insert_event_jobs(events: list, event_type: str) -> list: 208 event_jobs = [] 209 for event in events: 210 event_jobs.append(event) 211 frequency = event_type.replace("_", " ").title() 212 insert_single_event(frequency, event) 213 return event_jobs 214 215 216 def insert_single_event(frequency: str, event: str, cron_format: str = None): 217 cron_expr = {"cron_format": cron_format} if cron_format else {} 218 219 try: 220 frappe.get_attr(event) 221 except Exception as e: 222 click.secho(f"{event} is not a valid method: {e}", fg="yellow") 223 224 doc = frappe.get_doc( 225 { 226 "doctype": "Scheduled Job Type", 227 "method": event, 228 "cron_format": cron_format, 229 "frequency": frequency, 230 } 231 ) 232 233 if not frappe.db.exists( 234 "Scheduled Job Type", {"method": event, "frequency": frequency, **cron_expr} 235 ): 236 savepoint = "scheduled_job_type_creation" 237 try: 238 frappe.db.savepoint(savepoint) 239 doc.insert() 240 except frappe.DuplicateEntryError: 241 frappe.db.rollback(save_point=savepoint) 242 doc.delete() 243 doc.insert() 244 245 246 def clear_events(all_events: list): 247 for event in frappe.get_all("Scheduled Job Type", fields=["name", "method", "server_script"]): 248 is_server_script = event.server_script 249 is_defined_in_hooks = event.method in all_events 250 251 if not (is_defined_in_hooks or is_server_script): 252 frappe.delete_doc("Scheduled Job Type", event.name) 253 [end of frappe/core/doctype/scheduled_job_type/scheduled_job_type.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py --- a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py +++ b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py @@ -2,7 +2,8 @@ # License: MIT. See LICENSE import json -from datetime import datetime +from datetime import datetime, timedelta +from random import randint import click from croniter import croniter @@ -110,7 +111,12 @@ # immediately, even when it's meant to be daily. # A dynamic fallback like current time might miss the scheduler interval and job will never start. last_execution = get_datetime(self.last_execution or self.creation) - return croniter(self.cron_format, last_execution).get_next(datetime) + next_execution = croniter(self.cron_format, last_execution).get_next(datetime) + + jitter = 0 + if self.frequency in ("Hourly Long", "Daily Long"): + jitter = randint(1, 600) + return next_execution + timedelta(seconds=jitter) def execute(self): self.scheduler_log = None
{"golden_diff": "diff --git a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n--- a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n+++ b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n@@ -2,7 +2,8 @@\n # License: MIT. See LICENSE\n \n import json\n-from datetime import datetime\n+from datetime import datetime, timedelta\n+from random import randint\n \n import click\n from croniter import croniter\n@@ -110,7 +111,12 @@\n \t\t# immediately, even when it's meant to be daily.\n \t\t# A dynamic fallback like current time might miss the scheduler interval and job will never start.\n \t\tlast_execution = get_datetime(self.last_execution or self.creation)\n-\t\treturn croniter(self.cron_format, last_execution).get_next(datetime)\n+\t\tnext_execution = croniter(self.cron_format, last_execution).get_next(datetime)\n+\n+\t\tjitter = 0\n+\t\tif self.frequency in (\"Hourly Long\", \"Daily Long\"):\n+\t\t\tjitter = randint(1, 600)\n+\t\treturn next_execution + timedelta(seconds=jitter)\n \n \tdef execute(self):\n \t\tself.scheduler_log = None\n", "issue": "Add jitter on scheduled jobs\nhourly, daily long processes if all started at once can cause sudden increase in workload if you have many sites/benches. \r\n\r\n\r\nAdding simple jitter to scheduled time can lessen the impact of such issues. Jitter is common pattern used for solving problems with \"frequency\" becomes a problem. E.g. gunicorn adds jitter to avoid restarting all workers at same time, profilers add jitter to avoid amplifying some pattern of repeated work. retry/backoff implementations also use to avoid creating patterns.\r\n\r\n\r\nPossible implementation: When importing scheduled job types add some random delays in cron. E.g. daily jobs will start in the range of 12:00-12:15 AM instead of all starting at 12:00 AM.\r\n\r\n\r\nCons: Some jobs are required to be executed at specific times e.g. birthday reminders. So adding negative offset can introduce bugs for them, positive offset however should be fine AFAIK. \n", "before_files": [{"content": "# Copyright (c) 2021, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport json\nfrom datetime import datetime\n\nimport click\nfrom croniter import croniter\n\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe.utils import get_datetime, now_datetime\nfrom frappe.utils.background_jobs import enqueue, is_job_enqueued\n\n\nclass ScheduledJobType(Document):\n\t# begin: auto-generated types\n\t# This code is auto-generated. Do not modify anything in this block.\n\n\tfrom typing import TYPE_CHECKING\n\n\tif TYPE_CHECKING:\n\t\tfrom frappe.types import DF\n\n\t\tcreate_log: DF.Check\n\t\tcron_format: DF.Data | None\n\t\tfrequency: DF.Literal[\n\t\t\t\"All\",\n\t\t\t\"Hourly\",\n\t\t\t\"Hourly Long\",\n\t\t\t\"Daily\",\n\t\t\t\"Daily Long\",\n\t\t\t\"Weekly\",\n\t\t\t\"Weekly Long\",\n\t\t\t\"Monthly\",\n\t\t\t\"Monthly Long\",\n\t\t\t\"Cron\",\n\t\t\t\"Yearly\",\n\t\t\t\"Annual\",\n\t\t]\n\t\tlast_execution: DF.Datetime | None\n\t\tmethod: DF.Data\n\t\tnext_execution: DF.Datetime | None\n\t\tserver_script: DF.Link | None\n\t\tstopped: DF.Check\n\t# end: auto-generated types\n\tdef autoname(self):\n\t\tself.name = \".\".join(self.method.split(\".\")[-2:])\n\n\tdef validate(self):\n\t\tif self.frequency != \"All\":\n\t\t\t# force logging for all events other than continuous ones (ALL)\n\t\t\tself.create_log = 1\n\n\tdef enqueue(self, force=False) -> bool:\n\t\t# enqueue event if last execution is done\n\t\tif self.is_event_due() or force:\n\t\t\tif not self.is_job_in_queue():\n\t\t\t\tenqueue(\n\t\t\t\t\t\"frappe.core.doctype.scheduled_job_type.scheduled_job_type.run_scheduled_job\",\n\t\t\t\t\tqueue=self.get_queue_name(),\n\t\t\t\t\tjob_type=self.method,\n\t\t\t\t\tjob_id=self.rq_job_id,\n\t\t\t\t)\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tfrappe.logger(\"scheduler\").error(\n\t\t\t\t\tf\"Skipped queueing {self.method} because it was found in queue for {frappe.local.site}\"\n\t\t\t\t)\n\n\t\treturn False\n\n\tdef is_event_due(self, current_time=None):\n\t\t\"\"\"Return true if event is due based on time lapsed since last execution\"\"\"\n\t\t# if the next scheduled event is before NOW, then its due!\n\t\treturn self.get_next_execution() <= (current_time or now_datetime())\n\n\tdef is_job_in_queue(self) -> bool:\n\t\treturn is_job_enqueued(self.rq_job_id)\n\n\t@property\n\tdef rq_job_id(self):\n\t\t\"\"\"Unique ID created to deduplicate jobs with single RQ call.\"\"\"\n\t\treturn f\"scheduled_job::{self.method}\"\n\n\t@property\n\tdef next_execution(self):\n\t\treturn self.get_next_execution()\n\n\tdef get_next_execution(self):\n\t\tCRON_MAP = {\n\t\t\t\"Yearly\": \"0 0 1 1 *\",\n\t\t\t\"Annual\": \"0 0 1 1 *\",\n\t\t\t\"Monthly\": \"0 0 1 * *\",\n\t\t\t\"Monthly Long\": \"0 0 1 * *\",\n\t\t\t\"Weekly\": \"0 0 * * 0\",\n\t\t\t\"Weekly Long\": \"0 0 * * 0\",\n\t\t\t\"Daily\": \"0 0 * * *\",\n\t\t\t\"Daily Long\": \"0 0 * * *\",\n\t\t\t\"Hourly\": \"0 * * * *\",\n\t\t\t\"Hourly Long\": \"0 * * * *\",\n\t\t\t\"All\": f\"*/{(frappe.get_conf().scheduler_interval or 240) // 60} * * * *\",\n\t\t}\n\n\t\tif not self.cron_format:\n\t\t\tself.cron_format = CRON_MAP[self.frequency]\n\n\t\t# If this is a cold start then last_execution will not be set.\n\t\t# Creation is set as fallback because if very old fallback is set job might trigger\n\t\t# immediately, even when it's meant to be daily.\n\t\t# A dynamic fallback like current time might miss the scheduler interval and job will never start.\n\t\tlast_execution = get_datetime(self.last_execution or self.creation)\n\t\treturn croniter(self.cron_format, last_execution).get_next(datetime)\n\n\tdef execute(self):\n\t\tself.scheduler_log = None\n\t\ttry:\n\t\t\tself.log_status(\"Start\")\n\t\t\tif self.server_script:\n\t\t\t\tscript_name = frappe.db.get_value(\"Server Script\", self.server_script)\n\t\t\t\tif script_name:\n\t\t\t\t\tfrappe.get_doc(\"Server Script\", script_name).execute_scheduled_method()\n\t\t\telse:\n\t\t\t\tfrappe.get_attr(self.method)()\n\t\t\tfrappe.db.commit()\n\t\t\tself.log_status(\"Complete\")\n\t\texcept Exception:\n\t\t\tfrappe.db.rollback()\n\t\t\tself.log_status(\"Failed\")\n\n\tdef log_status(self, status):\n\t\t# log file\n\t\tfrappe.logger(\"scheduler\").info(f\"Scheduled Job {status}: {self.method} for {frappe.local.site}\")\n\t\tself.update_scheduler_log(status)\n\n\tdef update_scheduler_log(self, status):\n\t\tif not self.create_log:\n\t\t\t# self.get_next_execution will work properly iff self.last_execution is properly set\n\t\t\tif self.frequency == \"All\" and status == \"Start\":\n\t\t\t\tself.db_set(\"last_execution\", now_datetime(), update_modified=False)\n\t\t\t\tfrappe.db.commit()\n\t\t\treturn\n\t\tif not self.scheduler_log:\n\t\t\tself.scheduler_log = frappe.get_doc(\n\t\t\t\tdict(doctype=\"Scheduled Job Log\", scheduled_job_type=self.name)\n\t\t\t).insert(ignore_permissions=True)\n\t\tself.scheduler_log.db_set(\"status\", status)\n\t\tif frappe.debug_log:\n\t\t\tself.scheduler_log.db_set(\"debug_log\", \"\\n\".join(frappe.debug_log))\n\t\tif status == \"Failed\":\n\t\t\tself.scheduler_log.db_set(\"details\", frappe.get_traceback())\n\t\tif status == \"Start\":\n\t\t\tself.db_set(\"last_execution\", now_datetime(), update_modified=False)\n\t\tfrappe.db.commit()\n\n\tdef get_queue_name(self):\n\t\treturn \"long\" if (\"Long\" in self.frequency) else \"default\"\n\n\tdef on_trash(self):\n\t\tfrappe.db.delete(\"Scheduled Job Log\", {\"scheduled_job_type\": self.name})\n\n\[email protected]()\ndef execute_event(doc: str):\n\tfrappe.only_for(\"System Manager\")\n\tdoc = json.loads(doc)\n\tfrappe.get_doc(\"Scheduled Job Type\", doc.get(\"name\")).enqueue(force=True)\n\treturn doc\n\n\ndef run_scheduled_job(job_type: str):\n\t\"\"\"This is a wrapper function that runs a hooks.scheduler_events method\"\"\"\n\ttry:\n\t\tfrappe.get_doc(\"Scheduled Job Type\", dict(method=job_type)).execute()\n\texcept Exception:\n\t\tprint(frappe.get_traceback())\n\n\ndef sync_jobs(hooks: dict = None):\n\tfrappe.reload_doc(\"core\", \"doctype\", \"scheduled_job_type\")\n\tscheduler_events = hooks or frappe.get_hooks(\"scheduler_events\")\n\tall_events = insert_events(scheduler_events)\n\tclear_events(all_events)\n\n\ndef insert_events(scheduler_events: dict) -> list:\n\tcron_jobs, event_jobs = [], []\n\tfor event_type in scheduler_events:\n\t\tevents = scheduler_events.get(event_type)\n\t\tif isinstance(events, dict):\n\t\t\tcron_jobs += insert_cron_jobs(events)\n\t\telse:\n\t\t\t# hourly, daily etc\n\t\t\tevent_jobs += insert_event_jobs(events, event_type)\n\treturn cron_jobs + event_jobs\n\n\ndef insert_cron_jobs(events: dict) -> list:\n\tcron_jobs = []\n\tfor cron_format in events:\n\t\tfor event in events.get(cron_format):\n\t\t\tcron_jobs.append(event)\n\t\t\tinsert_single_event(\"Cron\", event, cron_format)\n\treturn cron_jobs\n\n\ndef insert_event_jobs(events: list, event_type: str) -> list:\n\tevent_jobs = []\n\tfor event in events:\n\t\tevent_jobs.append(event)\n\t\tfrequency = event_type.replace(\"_\", \" \").title()\n\t\tinsert_single_event(frequency, event)\n\treturn event_jobs\n\n\ndef insert_single_event(frequency: str, event: str, cron_format: str = None):\n\tcron_expr = {\"cron_format\": cron_format} if cron_format else {}\n\n\ttry:\n\t\tfrappe.get_attr(event)\n\texcept Exception as e:\n\t\tclick.secho(f\"{event} is not a valid method: {e}\", fg=\"yellow\")\n\n\tdoc = frappe.get_doc(\n\t\t{\n\t\t\t\"doctype\": \"Scheduled Job Type\",\n\t\t\t\"method\": event,\n\t\t\t\"cron_format\": cron_format,\n\t\t\t\"frequency\": frequency,\n\t\t}\n\t)\n\n\tif not frappe.db.exists(\n\t\t\"Scheduled Job Type\", {\"method\": event, \"frequency\": frequency, **cron_expr}\n\t):\n\t\tsavepoint = \"scheduled_job_type_creation\"\n\t\ttry:\n\t\t\tfrappe.db.savepoint(savepoint)\n\t\t\tdoc.insert()\n\t\texcept frappe.DuplicateEntryError:\n\t\t\tfrappe.db.rollback(save_point=savepoint)\n\t\t\tdoc.delete()\n\t\t\tdoc.insert()\n\n\ndef clear_events(all_events: list):\n\tfor event in frappe.get_all(\"Scheduled Job Type\", fields=[\"name\", \"method\", \"server_script\"]):\n\t\tis_server_script = event.server_script\n\t\tis_defined_in_hooks = event.method in all_events\n\n\t\tif not (is_defined_in_hooks or is_server_script):\n\t\t\tfrappe.delete_doc(\"Scheduled Job Type\", event.name)\n", "path": "frappe/core/doctype/scheduled_job_type/scheduled_job_type.py"}]}
3,461
278
gh_patches_debug_33430
rasdani/github-patches
git_diff
Mailu__Mailu-1268
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Make roundcube log to the console Roundcube is currently hard to debug because it logs into a special folder (/var/www/html/logs). It should log to stdout/stderr instead. </issue> <code> [start of webmails/roundcube/start.py] 1 #!/usr/bin/python3 2 3 import os 4 import logging as log 5 import sys 6 from socrate import conf 7 8 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING")) 9 10 os.environ["MAX_FILESIZE"] = str(int(int(os.environ.get("MESSAGE_SIZE_LIMIT"))*0.66/1048576)) 11 12 conf.jinja("/php.ini", os.environ, "/usr/local/etc/php/conf.d/roundcube.ini") 13 14 # Fix some permissions 15 os.system("mkdir -p /data/gpg") 16 os.system("chown -R www-data:www-data /data") 17 18 # Run apache 19 os.execv("/usr/local/bin/apache2-foreground", ["apache2-foreground"]) 20 [end of webmails/roundcube/start.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/webmails/roundcube/start.py b/webmails/roundcube/start.py --- a/webmails/roundcube/start.py +++ b/webmails/roundcube/start.py @@ -4,16 +4,61 @@ import logging as log import sys from socrate import conf +import subprocess log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING")) os.environ["MAX_FILESIZE"] = str(int(int(os.environ.get("MESSAGE_SIZE_LIMIT"))*0.66/1048576)) +db_flavor=os.environ.get("ROUNDCUBE_DB_FLAVOR",os.environ.get("DB_FLAVOR","sqlite")) +if db_flavor=="sqlite": + os.environ["DB_DSNW"]="sqlite:////data/roundcube.db" +elif db_flavor=="mysql": + os.environ["DB_DSNW"]="mysql://%s:%s@%s/%s" % ( + os.environ.get("ROUNDCUBE_DB_USER","roundcube"), + os.environ.get("ROUNDCUBE_DB_PW"), + os.environ.get("ROUNDCUBE_DB_HOST",os.environ.get("DB_HOST","database")), + os.environ.get("ROUNDCUBE_DB_NAME","roundcube") + ) +elif db_flavor=="postgresql": + os.environ["DB_DSNW"]="pgsql://%s:%s@%s/%s" % ( + os.environ.get("ROUNDCUBE_DB_USER","roundcube"), + os.environ.get("ROUNDCUBE_DB_PW"), + os.environ.get("ROUNDCUBE_DB_HOST",os.environ.get("DB_HOST","database")), + os.environ.get("ROUNDCUBE_DB_NAME","roundcube") + ) +else: + print("Unknown ROUNDCUBE_DB_FLAVOR: %s",db_flavor) + exit(1) + + + conf.jinja("/php.ini", os.environ, "/usr/local/etc/php/conf.d/roundcube.ini") # Fix some permissions -os.system("mkdir -p /data/gpg") -os.system("chown -R www-data:www-data /data") +os.system("mkdir -p /data/gpg /var/www/html/logs") +os.system("touch /var/www/html/logs/errors") +os.system("chown -R www-data:www-data /data /var/www/html/logs") + +try: + print("Initializing database") + result=subprocess.check_output(["/var/www/html/bin/initdb.sh","--dir","/var/www/html/SQL"],stderr=subprocess.STDOUT) + print(result.decode()) +except subprocess.CalledProcessError as e: + if "already exists" in e.stdout.decode(): + print("Already initialzed") + else: + print(e.stdout.decode()) + quit(1) + +try: + print("Upgrading database") + subprocess.check_call(["/var/www/html/bin/update.sh","--version=?","-y"],stderr=subprocess.STDOUT) +except subprocess.CalledProcessError as e: + quit(1) + +# Tail roundcube logs +subprocess.Popen(["tail","-f","-n","0","/var/www/html/logs/errors"]) # Run apache os.execv("/usr/local/bin/apache2-foreground", ["apache2-foreground"])
{"golden_diff": "diff --git a/webmails/roundcube/start.py b/webmails/roundcube/start.py\n--- a/webmails/roundcube/start.py\n+++ b/webmails/roundcube/start.py\n@@ -4,16 +4,61 @@\n import logging as log\n import sys\n from socrate import conf\n+import subprocess\n \n log.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n \n os.environ[\"MAX_FILESIZE\"] = str(int(int(os.environ.get(\"MESSAGE_SIZE_LIMIT\"))*0.66/1048576))\n \n+db_flavor=os.environ.get(\"ROUNDCUBE_DB_FLAVOR\",os.environ.get(\"DB_FLAVOR\",\"sqlite\"))\n+if db_flavor==\"sqlite\":\n+ os.environ[\"DB_DSNW\"]=\"sqlite:////data/roundcube.db\"\n+elif db_flavor==\"mysql\":\n+ os.environ[\"DB_DSNW\"]=\"mysql://%s:%s@%s/%s\" % (\n+ os.environ.get(\"ROUNDCUBE_DB_USER\",\"roundcube\"),\n+ os.environ.get(\"ROUNDCUBE_DB_PW\"),\n+ os.environ.get(\"ROUNDCUBE_DB_HOST\",os.environ.get(\"DB_HOST\",\"database\")),\n+ os.environ.get(\"ROUNDCUBE_DB_NAME\",\"roundcube\")\n+ )\n+elif db_flavor==\"postgresql\":\n+ os.environ[\"DB_DSNW\"]=\"pgsql://%s:%s@%s/%s\" % (\n+ os.environ.get(\"ROUNDCUBE_DB_USER\",\"roundcube\"),\n+ os.environ.get(\"ROUNDCUBE_DB_PW\"),\n+ os.environ.get(\"ROUNDCUBE_DB_HOST\",os.environ.get(\"DB_HOST\",\"database\")),\n+ os.environ.get(\"ROUNDCUBE_DB_NAME\",\"roundcube\")\n+ )\n+else:\n+ print(\"Unknown ROUNDCUBE_DB_FLAVOR: %s\",db_flavor)\n+ exit(1)\n+\n+\n+\n conf.jinja(\"/php.ini\", os.environ, \"/usr/local/etc/php/conf.d/roundcube.ini\")\n \n # Fix some permissions\n-os.system(\"mkdir -p /data/gpg\")\n-os.system(\"chown -R www-data:www-data /data\")\n+os.system(\"mkdir -p /data/gpg /var/www/html/logs\")\n+os.system(\"touch /var/www/html/logs/errors\")\n+os.system(\"chown -R www-data:www-data /data /var/www/html/logs\")\n+\n+try:\n+ print(\"Initializing database\")\n+ result=subprocess.check_output([\"/var/www/html/bin/initdb.sh\",\"--dir\",\"/var/www/html/SQL\"],stderr=subprocess.STDOUT)\n+ print(result.decode())\n+except subprocess.CalledProcessError as e:\n+ if \"already exists\" in e.stdout.decode():\n+ print(\"Already initialzed\")\n+ else:\n+ print(e.stdout.decode())\n+ quit(1)\n+\n+try:\n+ print(\"Upgrading database\")\n+ subprocess.check_call([\"/var/www/html/bin/update.sh\",\"--version=?\",\"-y\"],stderr=subprocess.STDOUT)\n+except subprocess.CalledProcessError as e:\n+ quit(1)\n+\n+# Tail roundcube logs\n+subprocess.Popen([\"tail\",\"-f\",\"-n\",\"0\",\"/var/www/html/logs/errors\"])\n \n # Run apache\n os.execv(\"/usr/local/bin/apache2-foreground\", [\"apache2-foreground\"])\n", "issue": "Make roundcube log to the console\nRoundcube is currently hard to debug because it logs into a special folder (/var/www/html/logs). It should log to stdout/stderr instead.\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport logging as log\nimport sys\nfrom socrate import conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\nos.environ[\"MAX_FILESIZE\"] = str(int(int(os.environ.get(\"MESSAGE_SIZE_LIMIT\"))*0.66/1048576))\n\nconf.jinja(\"/php.ini\", os.environ, \"/usr/local/etc/php/conf.d/roundcube.ini\")\n\n# Fix some permissions\nos.system(\"mkdir -p /data/gpg\")\nos.system(\"chown -R www-data:www-data /data\")\n\n# Run apache\nos.execv(\"/usr/local/bin/apache2-foreground\", [\"apache2-foreground\"])\n", "path": "webmails/roundcube/start.py"}]}
765
721
gh_patches_debug_36050
rasdani/github-patches
git_diff
carpentries__amy-228
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bulk-upload: Not checking whether it is a valid csv file When a file with first line in valid .csv format and if the following lines are not in a valid format, for example if the file uploaded is : """ personal,middle,family,email This is a test file """ There is no error being displayed for the wrong file uploaded, instead a ticket is being displayed. I think there should be a check to validate csv file before trying to upload the data. Am I right? ![screenshot from 2015-03-17 13 28 34](https://cloud.githubusercontent.com/assets/6889955/6683386/8f7ff0ec-ccab-11e4-97af-4f72e51b9d82.png) </issue> <code> [start of workshops/util.py] 1 # coding: utf-8 2 from math import pi, sin, cos, acos 3 import csv 4 5 from django.core.exceptions import ObjectDoesNotExist 6 from django.db import IntegrityError, transaction 7 8 from .models import Event, Role, Person, Task 9 10 11 class InternalError(Exception): 12 pass 13 14 15 def earth_distance(pos1, pos2): 16 '''Taken from http://www.johndcook.com/python_longitude_latitude.html.''' 17 18 # Extract fields. 19 lat1, long1 = pos1 20 lat2, long2 = pos2 21 22 # Convert latitude and longitude to spherical coordinates in radians. 23 degrees_to_radians = pi/180.0 24 25 # phi = 90 - latitude 26 phi1 = (90.0 - lat1) * degrees_to_radians 27 phi2 = (90.0 - lat2) * degrees_to_radians 28 29 # theta = longitude 30 theta1 = long1 * degrees_to_radians 31 theta2 = long2 * degrees_to_radians 32 33 # Compute spherical distance from spherical coordinates. 34 # For two locations in spherical coordinates 35 # (1, theta, phi) and (1, theta, phi) 36 # cosine( arc length ) = sin phi sin phi' cos(theta-theta') + cos phi cos phi' 37 # distance = rho * arc length 38 c = sin(phi1) * sin(phi2) * cos(theta1 - theta2) + cos(phi1) * cos(phi2) 39 arc = acos(c) 40 41 # Multiply by 6373 to get distance in km. 42 return arc * 6373 43 44 45 def upload_person_task_csv(stream): 46 """Read people from CSV and return a JSON-serializable list of dicts. 47 48 The input `stream` should be a file-like object that returns 49 Unicode data. 50 51 "Serializability" is required because we put this data into session. See 52 https://docs.djangoproject.com/en/1.7/topics/http/sessions/ for details. 53 54 Also return a list of fields from Person.PERSON_UPLOAD_FIELDS for which 55 no data was given. 56 """ 57 58 result = [] 59 reader = csv.DictReader(stream) 60 empty_fields = set() 61 62 for row in reader: 63 entry = {} 64 for col in Person.PERSON_UPLOAD_FIELDS: 65 if col in row: 66 entry[col] = row[col].strip() 67 else: 68 entry[col] = None 69 empty_fields.add(col) 70 71 for col in Person.PERSON_TASK_EXTRA_FIELDS: 72 entry[col] = row.get(col, None) 73 entry['errors'] = None 74 75 result.append(entry) 76 77 return result, list(empty_fields) 78 79 80 def verify_upload_person_task(data): 81 """ 82 Verify that uploaded data is correct. Show errors by populating ``errors`` 83 dictionary item. This function changes ``data`` in place. 84 """ 85 86 errors_occur = False 87 for item in data: 88 errors = [] 89 90 event = item.get('event', None) 91 if event: 92 try: 93 Event.objects.get(slug=event) 94 except Event.DoesNotExist: 95 errors.append(u'Event with slug {0} does not exist.' 96 .format(event)) 97 98 role = item.get('role', None) 99 if role: 100 try: 101 Role.objects.get(name=role) 102 except Role.DoesNotExist: 103 errors.append(u'Role with name {0} does not exist.' 104 .format(role)) 105 except Role.MultipleObjectsReturned: 106 errors.append(u'More than one role named {0} exists.' 107 .format(role)) 108 109 # check if the user exists, and if so: check if existing user's 110 # personal and family names are the same as uploaded 111 email = item.get('email', None) 112 personal = item.get('personal', None) 113 middle = item.get('middle', None) 114 family = item.get('family', None) 115 person = None 116 if email: 117 # we don't have to check if the user exists in the database 118 # but we should check if, in case the email matches, family and 119 # personal names match, too 120 121 try: 122 person = Person.objects.get(email__iexact=email) 123 124 assert person.personal == personal 125 assert person.middle == middle 126 assert person.family == family 127 128 except Person.DoesNotExist: 129 # in this case we need to add the user 130 pass 131 132 except AssertionError: 133 errors.append( 134 "Personal, middle or family name of existing user don't" 135 " match: {0} vs {1}, {2} vs {3}, {4} vs {5}" 136 .format(personal, person.personal, middle, person.middle, 137 family, person.family) 138 ) 139 140 if person: 141 if not any([event, role]): 142 errors.append("User exists but no event and role to assign" 143 " the user to was provided") 144 145 else: 146 # check for duplicate Task 147 try: 148 Task.objects.get(event__slug=event, role__name=role, 149 person=person) 150 except Task.DoesNotExist: 151 pass 152 else: 153 errors.append("Existing person {2} already has role {0}" 154 " in event {1}".format(role, event, person)) 155 156 if (event and not role) or (role and not event): 157 errors.append("Must have both or either of event ({0}) and role" 158 " ({1})".format(event, role)) 159 160 if errors: 161 errors_occur = True 162 item['errors'] = errors 163 164 return errors_occur 165 166 167 def create_uploaded_persons_tasks(data): 168 """ 169 Create persons and tasks from upload data. 170 """ 171 172 # Quick sanity check. 173 if any([row.get('errors') for row in data]): 174 raise InternalError('Uploaded data contains errors, cancelling upload') 175 176 persons_created = [] 177 tasks_created = [] 178 with transaction.atomic(): 179 for row in data: 180 try: 181 fields = {key: row[key] for key in Person.PERSON_UPLOAD_FIELDS} 182 fields['username'] = create_username(row['personal'], 183 row['family']) 184 if fields['email']: 185 # we should use existing Person or create one 186 p, created = Person.objects.get_or_create( 187 email=fields['email'], defaults=fields 188 ) 189 190 if created: 191 persons_created.append(p) 192 193 else: 194 # we should create a new Person without any email provided 195 p = Person(**fields) 196 p.save() 197 persons_created.append(p) 198 199 if row['event'] and row['role']: 200 e = Event.objects.get(slug=row['event']) 201 r = Role.objects.get(name=row['role']) 202 t = Task(person=p, event=e, role=r) 203 t.save() 204 tasks_created.append(t) 205 206 except IntegrityError as e: 207 raise IntegrityError('{0} (for {1})'.format(str(e), row)) 208 209 except ObjectDoesNotExist as e: 210 raise ObjectDoesNotExist('{0} (for {1})'.format(str(e), row)) 211 212 return persons_created, tasks_created 213 214 215 def create_username(personal, family): 216 '''Generate unique username.''' 217 stem = normalize_name(family) + '.' + normalize_name(personal) 218 counter = None 219 while True: 220 try: 221 if counter is None: 222 username = stem 223 counter = 1 224 else: 225 counter += 1 226 username = '{0}.{1}'.format(stem, counter) 227 Person.objects.get(username=username) 228 except ObjectDoesNotExist: 229 break 230 231 if any([ord(c) >= 128 for c in username]): 232 raise InternalError('Normalized username still contains non-normal ' 233 'characters "{0}"'.format(username)) 234 235 return username 236 237 238 def normalize_name(name): 239 '''Get rid of spaces, funky characters, etc.''' 240 name = name.strip() 241 for (accented, flat) in [(' ', '-')]: 242 name = name.replace(accented, flat) 243 244 # We should use lower-cased username, because it directly corresponds to 245 # some files Software Carpentry stores about some people - and, as we know, 246 # some filesystems are not case-sensitive. 247 return name.lower() 248 [end of workshops/util.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/workshops/util.py b/workshops/util.py --- a/workshops/util.py +++ b/workshops/util.py @@ -62,9 +62,12 @@ for row in reader: entry = {} for col in Person.PERSON_UPLOAD_FIELDS: - if col in row: + try: entry[col] = row[col].strip() - else: + except (KeyError, IndexError, AttributeError): + # either `col` is not in `entry`, or not in `row`, or + # `.strip()` doesn't work (e.g. `row[col]` gives `None` instead + # of string) entry[col] = None empty_fields.add(col) @@ -139,20 +142,9 @@ if person: if not any([event, role]): - errors.append("User exists but no event and role to assign" + errors.append("User exists but no event and role to assign to" " the user to was provided") - else: - # check for duplicate Task - try: - Task.objects.get(event__slug=event, role__name=role, - person=person) - except Task.DoesNotExist: - pass - else: - errors.append("Existing person {2} already has role {0}" - " in event {1}".format(role, event, person)) - if (event and not role) or (role and not event): errors.append("Must have both or either of event ({0}) and role" " ({1})".format(event, role)) @@ -199,9 +191,10 @@ if row['event'] and row['role']: e = Event.objects.get(slug=row['event']) r = Role.objects.get(name=row['role']) - t = Task(person=p, event=e, role=r) - t.save() - tasks_created.append(t) + t, created = Task.objects.get_or_create(person=p, event=e, + role=r) + if created: + tasks_created.append(t) except IntegrityError as e: raise IntegrityError('{0} (for {1})'.format(str(e), row))
{"golden_diff": "diff --git a/workshops/util.py b/workshops/util.py\n--- a/workshops/util.py\n+++ b/workshops/util.py\n@@ -62,9 +62,12 @@\n for row in reader:\n entry = {}\n for col in Person.PERSON_UPLOAD_FIELDS:\n- if col in row:\n+ try:\n entry[col] = row[col].strip()\n- else:\n+ except (KeyError, IndexError, AttributeError):\n+ # either `col` is not in `entry`, or not in `row`, or\n+ # `.strip()` doesn't work (e.g. `row[col]` gives `None` instead\n+ # of string)\n entry[col] = None\n empty_fields.add(col)\n \n@@ -139,20 +142,9 @@\n \n if person:\n if not any([event, role]):\n- errors.append(\"User exists but no event and role to assign\"\n+ errors.append(\"User exists but no event and role to assign to\"\n \" the user to was provided\")\n \n- else:\n- # check for duplicate Task\n- try:\n- Task.objects.get(event__slug=event, role__name=role,\n- person=person)\n- except Task.DoesNotExist:\n- pass\n- else:\n- errors.append(\"Existing person {2} already has role {0}\"\n- \" in event {1}\".format(role, event, person))\n-\n if (event and not role) or (role and not event):\n errors.append(\"Must have both or either of event ({0}) and role\"\n \" ({1})\".format(event, role))\n@@ -199,9 +191,10 @@\n if row['event'] and row['role']:\n e = Event.objects.get(slug=row['event'])\n r = Role.objects.get(name=row['role'])\n- t = Task(person=p, event=e, role=r)\n- t.save()\n- tasks_created.append(t)\n+ t, created = Task.objects.get_or_create(person=p, event=e,\n+ role=r)\n+ if created:\n+ tasks_created.append(t)\n \n except IntegrityError as e:\n raise IntegrityError('{0} (for {1})'.format(str(e), row))\n", "issue": "Bulk-upload: Not checking whether it is a valid csv file\nWhen a file with first line in valid .csv format and if the following lines are not in a valid format, \n\nfor example if the file uploaded is :\n\"\"\"\npersonal,middle,family,email\nThis is a test file\n\"\"\"\n\nThere is no error being displayed for the wrong file uploaded, instead a ticket is being displayed. I think there should be a check to validate csv file before trying to upload the data.\nAm I right?\n![screenshot from 2015-03-17 13 28 34](https://cloud.githubusercontent.com/assets/6889955/6683386/8f7ff0ec-ccab-11e4-97af-4f72e51b9d82.png)\n\n", "before_files": [{"content": "# coding: utf-8\nfrom math import pi, sin, cos, acos\nimport csv\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import IntegrityError, transaction\n\nfrom .models import Event, Role, Person, Task\n\n\nclass InternalError(Exception):\n pass\n\n\ndef earth_distance(pos1, pos2):\n '''Taken from http://www.johndcook.com/python_longitude_latitude.html.'''\n\n # Extract fields.\n lat1, long1 = pos1\n lat2, long2 = pos2\n\n # Convert latitude and longitude to spherical coordinates in radians.\n degrees_to_radians = pi/180.0\n\n # phi = 90 - latitude\n phi1 = (90.0 - lat1) * degrees_to_radians\n phi2 = (90.0 - lat2) * degrees_to_radians\n\n # theta = longitude\n theta1 = long1 * degrees_to_radians\n theta2 = long2 * degrees_to_radians\n\n # Compute spherical distance from spherical coordinates.\n # For two locations in spherical coordinates\n # (1, theta, phi) and (1, theta, phi)\n # cosine( arc length ) = sin phi sin phi' cos(theta-theta') + cos phi cos phi'\n # distance = rho * arc length\n c = sin(phi1) * sin(phi2) * cos(theta1 - theta2) + cos(phi1) * cos(phi2)\n arc = acos(c)\n\n # Multiply by 6373 to get distance in km.\n return arc * 6373\n\n\ndef upload_person_task_csv(stream):\n \"\"\"Read people from CSV and return a JSON-serializable list of dicts.\n\n The input `stream` should be a file-like object that returns\n Unicode data.\n\n \"Serializability\" is required because we put this data into session. See\n https://docs.djangoproject.com/en/1.7/topics/http/sessions/ for details.\n\n Also return a list of fields from Person.PERSON_UPLOAD_FIELDS for which\n no data was given.\n \"\"\"\n\n result = []\n reader = csv.DictReader(stream)\n empty_fields = set()\n\n for row in reader:\n entry = {}\n for col in Person.PERSON_UPLOAD_FIELDS:\n if col in row:\n entry[col] = row[col].strip()\n else:\n entry[col] = None\n empty_fields.add(col)\n\n for col in Person.PERSON_TASK_EXTRA_FIELDS:\n entry[col] = row.get(col, None)\n entry['errors'] = None\n\n result.append(entry)\n\n return result, list(empty_fields)\n\n\ndef verify_upload_person_task(data):\n \"\"\"\n Verify that uploaded data is correct. Show errors by populating ``errors``\n dictionary item. This function changes ``data`` in place.\n \"\"\"\n\n errors_occur = False\n for item in data:\n errors = []\n\n event = item.get('event', None)\n if event:\n try:\n Event.objects.get(slug=event)\n except Event.DoesNotExist:\n errors.append(u'Event with slug {0} does not exist.'\n .format(event))\n\n role = item.get('role', None)\n if role:\n try:\n Role.objects.get(name=role)\n except Role.DoesNotExist:\n errors.append(u'Role with name {0} does not exist.'\n .format(role))\n except Role.MultipleObjectsReturned:\n errors.append(u'More than one role named {0} exists.'\n .format(role))\n\n # check if the user exists, and if so: check if existing user's\n # personal and family names are the same as uploaded\n email = item.get('email', None)\n personal = item.get('personal', None)\n middle = item.get('middle', None)\n family = item.get('family', None)\n person = None\n if email:\n # we don't have to check if the user exists in the database\n # but we should check if, in case the email matches, family and\n # personal names match, too\n\n try:\n person = Person.objects.get(email__iexact=email)\n\n assert person.personal == personal\n assert person.middle == middle\n assert person.family == family\n\n except Person.DoesNotExist:\n # in this case we need to add the user\n pass\n\n except AssertionError:\n errors.append(\n \"Personal, middle or family name of existing user don't\"\n \" match: {0} vs {1}, {2} vs {3}, {4} vs {5}\"\n .format(personal, person.personal, middle, person.middle,\n family, person.family)\n )\n\n if person:\n if not any([event, role]):\n errors.append(\"User exists but no event and role to assign\"\n \" the user to was provided\")\n\n else:\n # check for duplicate Task\n try:\n Task.objects.get(event__slug=event, role__name=role,\n person=person)\n except Task.DoesNotExist:\n pass\n else:\n errors.append(\"Existing person {2} already has role {0}\"\n \" in event {1}\".format(role, event, person))\n\n if (event and not role) or (role and not event):\n errors.append(\"Must have both or either of event ({0}) and role\"\n \" ({1})\".format(event, role))\n\n if errors:\n errors_occur = True\n item['errors'] = errors\n\n return errors_occur\n\n\ndef create_uploaded_persons_tasks(data):\n \"\"\"\n Create persons and tasks from upload data.\n \"\"\"\n\n # Quick sanity check.\n if any([row.get('errors') for row in data]):\n raise InternalError('Uploaded data contains errors, cancelling upload')\n\n persons_created = []\n tasks_created = []\n with transaction.atomic():\n for row in data:\n try:\n fields = {key: row[key] for key in Person.PERSON_UPLOAD_FIELDS}\n fields['username'] = create_username(row['personal'],\n row['family'])\n if fields['email']:\n # we should use existing Person or create one\n p, created = Person.objects.get_or_create(\n email=fields['email'], defaults=fields\n )\n\n if created:\n persons_created.append(p)\n\n else:\n # we should create a new Person without any email provided\n p = Person(**fields)\n p.save()\n persons_created.append(p)\n\n if row['event'] and row['role']:\n e = Event.objects.get(slug=row['event'])\n r = Role.objects.get(name=row['role'])\n t = Task(person=p, event=e, role=r)\n t.save()\n tasks_created.append(t)\n\n except IntegrityError as e:\n raise IntegrityError('{0} (for {1})'.format(str(e), row))\n\n except ObjectDoesNotExist as e:\n raise ObjectDoesNotExist('{0} (for {1})'.format(str(e), row))\n\n return persons_created, tasks_created\n\n\ndef create_username(personal, family):\n '''Generate unique username.'''\n stem = normalize_name(family) + '.' + normalize_name(personal)\n counter = None\n while True:\n try:\n if counter is None:\n username = stem\n counter = 1\n else:\n counter += 1\n username = '{0}.{1}'.format(stem, counter)\n Person.objects.get(username=username)\n except ObjectDoesNotExist:\n break\n\n if any([ord(c) >= 128 for c in username]):\n raise InternalError('Normalized username still contains non-normal '\n 'characters \"{0}\"'.format(username))\n\n return username\n\n\ndef normalize_name(name):\n '''Get rid of spaces, funky characters, etc.'''\n name = name.strip()\n for (accented, flat) in [(' ', '-')]:\n name = name.replace(accented, flat)\n\n # We should use lower-cased username, because it directly corresponds to\n # some files Software Carpentry stores about some people - and, as we know,\n # some filesystems are not case-sensitive.\n return name.lower()\n", "path": "workshops/util.py"}]}
3,125
493
gh_patches_debug_35767
rasdani/github-patches
git_diff
microsoft__ptvsd-895
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Race condition in JsonMessageChannel ```py def send_request(self, command, arguments=None): d = {'command': command} if arguments is not None: d['arguments'] = arguments seq = self._send_message('request', d) request = Request(self, seq) with self._lock: self._requests[seq] = request return request ``` Note that the message is sent before the requests dict is updated. If it goes fast enough, the response handler will receive a response to an "unknown" request. </issue> <code> [start of ptvsd/messaging.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. See LICENSE in the project root 3 # for license information. 4 5 from __future__ import print_function, with_statement, absolute_import 6 7 import collections 8 import itertools 9 import json 10 import sys 11 import threading 12 13 14 class JsonIOStream(object): 15 """Implements a JSON value stream over two byte streams (input and output). 16 17 Each value is encoded as a packet consisting of a header and a body, as defined by the 18 Debug Adapter Protocol (https://microsoft.github.io/debug-adapter-protocol/overview). 19 """ 20 21 MAX_BODY_SIZE = 0xFFFFFF 22 23 @classmethod 24 def from_stdio(cls): 25 if sys.version_info >= (3,): 26 stdin = sys.stdin.buffer 27 stdout = sys.stdout.buffer 28 else: 29 stdin = sys.stdin 30 stdout = sys.stdout 31 if sys.platform == 'win32': 32 import os, msvcrt 33 msvcrt.setmode(stdin.fileno(), os.O_BINARY) 34 msvcrt.setmode(stdout.fileno(), os.O_BINARY) 35 return cls(stdin, stdout) 36 37 @classmethod 38 def from_socket(cls, socket): 39 if socket.gettimeout() is not None: 40 raise ValueError('Socket must be in blocking mode') 41 socket_io = socket.makefile('rwb', 0) 42 return cls(socket_io, socket_io) 43 44 def __init__(self, reader, writer): 45 """Creates a new JsonIOStream. 46 47 reader is a BytesIO-like object from which incoming messages are read; 48 reader.readline() must treat '\n' as the line terminator, and must leave 49 '\r' as is (i.e. it must not translate '\r\n' to just plain '\n'!). 50 51 writer is a BytesIO-like object to which outgoing messages are written. 52 """ 53 self._reader = reader 54 self._writer = writer 55 self._is_closing = False 56 57 def close(self): 58 self._is_closing = True 59 self._reader.close() 60 self._writer.close() 61 62 def _read_line(self): 63 line = b'' 64 while True: 65 line += self._reader.readline() 66 if not line: 67 raise EOFError 68 if line.endswith(b'\r\n'): 69 line = line[0:-2] 70 return line 71 72 def read_json(self): 73 """Read a single JSON value from reader. 74 75 Returns JSON value as parsed by json.loads(), or raises EOFError 76 if there are no more objects to be read. 77 """ 78 79 headers = {} 80 while True: 81 try: 82 line = self._read_line() 83 except Exception: 84 if self._is_closing: 85 raise EOFError 86 else: 87 raise 88 89 if line == b'': 90 break 91 key, _, value = line.partition(b':') 92 headers[key] = value 93 94 try: 95 length = int(headers[b'Content-Length']) 96 if not (0 <= length <= self.MAX_BODY_SIZE): 97 raise ValueError 98 except (KeyError, ValueError): 99 raise IOError('Content-Length is missing or invalid') 100 101 try: 102 body = self._reader.read(length) 103 except Exception: 104 if self._is_closing: 105 raise EOFError 106 else: 107 raise 108 109 if isinstance(body, bytes): 110 body = body.decode('utf-8') 111 return json.loads(body) 112 113 def write_json(self, value): 114 """Write a single JSON object to writer. 115 116 object must be in the format suitable for json.dump(). 117 """ 118 119 body = json.dumps(value, sort_keys=True) 120 if not isinstance(body, bytes): 121 body = body.encode('utf-8') 122 123 header = 'Content-Length: %d\r\n\r\n' % len(body) 124 if not isinstance(header, bytes): 125 header = header.encode('ascii') 126 127 self._writer.write(header) 128 self._writer.write(body) 129 130 131 Response = collections.namedtuple('Response', ('success', 'command', 'error_message', 'body')) 132 Response.__new__.__defaults__ = (None, None) 133 class Response(Response): 134 """Represents a received response to a Request.""" 135 136 137 class RequestFailure(Exception): 138 def __init__(self, message): 139 self.message = message 140 141 142 class Request(object): 143 """Represents a request that was sent to the other party, and is awaiting or has 144 already received a response. 145 """ 146 147 def __init__(self, channel, seq): 148 self.channel = channel 149 self.seq = seq 150 self.response = None 151 self._lock = threading.Lock() 152 self._got_response = threading.Event() 153 self._callback = lambda _: None 154 155 def _handle_response(self, success, command, error_message=None, body=None): 156 assert self.response is None 157 with self._lock: 158 response = Response(success, command, error_message, body) 159 self.response = response 160 callback = self._callback 161 callback(response) 162 self._got_response.set() 163 164 def wait_for_response(self, raise_if_failed=True): 165 """Waits until a response is received for this request, records that 166 response as a new Response object accessible via self.response, 167 and returns self.response.body. 168 169 If raise_if_failed is True, and the received response does not indicate 170 success, raises RequestFailure. Otherwise, self.response.success has to 171 be inspected to determine whether the request failed or succeeded, since 172 self.response.body can be None in either case. 173 """ 174 175 self._got_response.wait() 176 if raise_if_failed and not self.response.success: 177 raise RequestFailure(self.response.error_message) 178 return self.response 179 180 def on_response(self, callback): 181 """Registers a callback to invoke when a response is received for this 182 request. If response was already received, invokes callback immediately. 183 Callback is invoked with Response object as the sole argument. 184 185 The callback is invoked on an unspecified background thread that performs 186 processing of incoming messages; therefore, no further message processing 187 occurs until the callback returns. 188 """ 189 190 with self._lock: 191 response = self.response 192 if response is None: 193 self._callback = callback 194 return 195 callback(response) 196 197 198 class JsonMessageChannel(object): 199 """Implements a JSON message channel on top of a JSON stream, with 200 support for generic Request, Response and Event messages as defined by the 201 Debug Adapter Protocol (https://microsoft.github.io/debug-adapter-protocol/overview). 202 """ 203 204 def __init__(self, stream, handlers=None): 205 self.stream = stream 206 self.send_callback = lambda channel, message: None 207 self.receive_callback = lambda channel, message: None 208 self._lock = threading.Lock() 209 self._stop = threading.Event() 210 self._seq_iter = itertools.count(1) 211 self._requests = {} 212 self._handlers = handlers 213 self._worker = threading.Thread(target=self._process_incoming_messages) 214 self._worker.daemon = True 215 216 def close(self): 217 self.stream.close() 218 219 def start(self): 220 self._worker.start() 221 222 def wait(self): 223 self._worker.join() 224 225 def _send_message(self, type, rest={}): 226 with self._lock: 227 seq = next(self._seq_iter) 228 message = { 229 'seq': seq, 230 'type': type, 231 } 232 message.update(rest) 233 with self._lock: 234 self.stream.write_json(message) 235 self.send_callback(self, message) 236 return seq 237 238 def send_request(self, command, arguments=None): 239 d = {'command': command} 240 if arguments is not None: 241 d['arguments'] = arguments 242 seq = self._send_message('request', d) 243 request = Request(self, seq) 244 with self._lock: 245 self._requests[seq] = request 246 return request 247 248 def send_event(self, event, body=None): 249 d = {'event': event} 250 if body is not None: 251 d['body'] = body 252 self._send_message('event', d) 253 254 def send_response(self, request_seq, success, command, error_message=None, body=None): 255 d = { 256 'request_seq': request_seq, 257 'success': success, 258 'command': command, 259 } 260 if success: 261 if body is not None: 262 d['body'] = body 263 else: 264 if error_message is not None: 265 d['message'] = error_message 266 self._send_message('response', d) 267 268 def on_message(self, message): 269 self.receive_callback(self, message) 270 seq = message['seq'] 271 typ = message['type'] 272 if typ == 'request': 273 command = message['command'] 274 arguments = message.get('arguments', None) 275 self.on_request(seq, command, arguments) 276 elif typ == 'event': 277 event = message['event'] 278 body = message.get('body', None) 279 self.on_event(seq, event, body) 280 elif typ == 'response': 281 request_seq = message['request_seq'] 282 success = message['success'] 283 command = message['command'] 284 error_message = message.get('message', None) 285 body = message.get('body', None) 286 self.on_response(seq, request_seq, success, command, error_message, body) 287 else: 288 raise IOError('Incoming message has invalid "type":\n%r' % message) 289 290 def on_request(self, seq, command, arguments): 291 handler_name = '%s_request' % command 292 specific_handler = getattr(self._handlers, handler_name, None) 293 if specific_handler is not None: 294 handler = lambda: specific_handler(self, arguments) 295 else: 296 generic_handler = getattr(self._handlers, 'request') 297 handler = lambda: generic_handler(self, command, arguments) 298 try: 299 response_body = handler() 300 except Exception as ex: 301 self.send_response(seq, False, command, str(ex)) 302 else: 303 self.send_response(seq, True, command, None, response_body) 304 305 def on_event(self, seq, event, body): 306 handler_name = '%s_event' % event 307 specific_handler = getattr(self._handlers, handler_name, None) 308 if specific_handler is not None: 309 handler = lambda: specific_handler(self, body) 310 else: 311 generic_handler = getattr(self._handlers, 'event') 312 handler = lambda: generic_handler(self, event, body) 313 handler() 314 315 def on_response(self, seq, request_seq, success, command, error_message, body): 316 try: 317 with self._lock: 318 request = self._requests.pop(request_seq) 319 except KeyError: 320 raise KeyError('Received response to unknown request %d', request_seq) 321 return request._handle_response(success, command, error_message, body) 322 323 def _process_incoming_messages(self): 324 while True: 325 try: 326 message = self.stream.read_json() 327 except EOFError: 328 break 329 try: 330 self.on_message(message) 331 except Exception: 332 print('Error while processing message:\n%r\n\n' % message, file=sys.__stderr__) 333 raise 334 335 336 class MessageHandlers(object): 337 """A simple delegating message handlers object for use with JsonMessageChannel. 338 For every argument provided, the object has an attribute with the corresponding 339 name and value. Example: 340 """ 341 342 def __init__(self, **kwargs): 343 for name, func in kwargs.items(): 344 setattr(self, name, func) [end of ptvsd/messaging.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ptvsd/messaging.py b/ptvsd/messaging.py --- a/ptvsd/messaging.py +++ b/ptvsd/messaging.py @@ -5,6 +5,7 @@ from __future__ import print_function, with_statement, absolute_import import collections +import contextlib import itertools import json import sys @@ -222,6 +223,7 @@ def wait(self): self._worker.join() + @contextlib.contextmanager def _send_message(self, type, rest={}): with self._lock: seq = next(self._seq_iter) @@ -231,17 +233,16 @@ } message.update(rest) with self._lock: + yield seq self.stream.write_json(message) self.send_callback(self, message) - return seq def send_request(self, command, arguments=None): d = {'command': command} if arguments is not None: d['arguments'] = arguments - seq = self._send_message('request', d) - request = Request(self, seq) - with self._lock: + with self._send_message('request', d) as seq: + request = Request(self, seq) self._requests[seq] = request return request @@ -249,7 +250,8 @@ d = {'event': event} if body is not None: d['body'] = body - self._send_message('event', d) + with self._send_message('event', d): + pass def send_response(self, request_seq, success, command, error_message=None, body=None): d = { @@ -263,7 +265,8 @@ else: if error_message is not None: d['message'] = error_message - self._send_message('response', d) + with self._send_message('response', d): + pass def on_message(self, message): self.receive_callback(self, message)
{"golden_diff": "diff --git a/ptvsd/messaging.py b/ptvsd/messaging.py\n--- a/ptvsd/messaging.py\n+++ b/ptvsd/messaging.py\n@@ -5,6 +5,7 @@\n from __future__ import print_function, with_statement, absolute_import\n \n import collections\n+import contextlib\n import itertools\n import json\n import sys\n@@ -222,6 +223,7 @@\n def wait(self):\n self._worker.join()\n \n+ @contextlib.contextmanager\n def _send_message(self, type, rest={}):\n with self._lock:\n seq = next(self._seq_iter)\n@@ -231,17 +233,16 @@\n }\n message.update(rest)\n with self._lock:\n+ yield seq\n self.stream.write_json(message)\n self.send_callback(self, message)\n- return seq\n \n def send_request(self, command, arguments=None):\n d = {'command': command}\n if arguments is not None:\n d['arguments'] = arguments\n- seq = self._send_message('request', d)\n- request = Request(self, seq)\n- with self._lock:\n+ with self._send_message('request', d) as seq:\n+ request = Request(self, seq)\n self._requests[seq] = request\n return request\n \n@@ -249,7 +250,8 @@\n d = {'event': event}\n if body is not None:\n d['body'] = body\n- self._send_message('event', d)\n+ with self._send_message('event', d):\n+ pass\n \n def send_response(self, request_seq, success, command, error_message=None, body=None):\n d = {\n@@ -263,7 +265,8 @@\n else:\n if error_message is not None:\n d['message'] = error_message\n- self._send_message('response', d)\n+ with self._send_message('response', d):\n+ pass\n \n def on_message(self, message):\n self.receive_callback(self, message)\n", "issue": "Race condition in JsonMessageChannel\n```py\r\n def send_request(self, command, arguments=None):\r\n d = {'command': command}\r\n if arguments is not None:\r\n d['arguments'] = arguments\r\n seq = self._send_message('request', d)\r\n request = Request(self, seq)\r\n with self._lock:\r\n self._requests[seq] = request\r\n return request\r\n```\r\nNote that the message is sent before the requests dict is updated. If it goes fast enough, the response handler will receive a response to an \"unknown\" request.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import print_function, with_statement, absolute_import\n\nimport collections\nimport itertools\nimport json\nimport sys\nimport threading\n\n\nclass JsonIOStream(object):\n \"\"\"Implements a JSON value stream over two byte streams (input and output).\n\n Each value is encoded as a packet consisting of a header and a body, as defined by the\n Debug Adapter Protocol (https://microsoft.github.io/debug-adapter-protocol/overview).\n \"\"\"\n\n MAX_BODY_SIZE = 0xFFFFFF\n\n @classmethod\n def from_stdio(cls):\n if sys.version_info >= (3,):\n stdin = sys.stdin.buffer\n stdout = sys.stdout.buffer\n else:\n stdin = sys.stdin\n stdout = sys.stdout\n if sys.platform == 'win32':\n import os, msvcrt\n msvcrt.setmode(stdin.fileno(), os.O_BINARY)\n msvcrt.setmode(stdout.fileno(), os.O_BINARY)\n return cls(stdin, stdout)\n\n @classmethod\n def from_socket(cls, socket):\n if socket.gettimeout() is not None:\n raise ValueError('Socket must be in blocking mode')\n socket_io = socket.makefile('rwb', 0)\n return cls(socket_io, socket_io)\n\n def __init__(self, reader, writer):\n \"\"\"Creates a new JsonIOStream.\n\n reader is a BytesIO-like object from which incoming messages are read;\n reader.readline() must treat '\\n' as the line terminator, and must leave\n '\\r' as is (i.e. it must not translate '\\r\\n' to just plain '\\n'!).\n\n writer is a BytesIO-like object to which outgoing messages are written.\n \"\"\"\n self._reader = reader\n self._writer = writer\n self._is_closing = False\n\n def close(self):\n self._is_closing = True\n self._reader.close()\n self._writer.close()\n\n def _read_line(self):\n line = b''\n while True:\n line += self._reader.readline()\n if not line:\n raise EOFError\n if line.endswith(b'\\r\\n'):\n line = line[0:-2]\n return line\n\n def read_json(self):\n \"\"\"Read a single JSON value from reader.\n\n Returns JSON value as parsed by json.loads(), or raises EOFError\n if there are no more objects to be read.\n \"\"\"\n\n headers = {}\n while True:\n try:\n line = self._read_line()\n except Exception:\n if self._is_closing:\n raise EOFError\n else:\n raise\n\n if line == b'':\n break\n key, _, value = line.partition(b':')\n headers[key] = value\n\n try:\n length = int(headers[b'Content-Length'])\n if not (0 <= length <= self.MAX_BODY_SIZE):\n raise ValueError\n except (KeyError, ValueError):\n raise IOError('Content-Length is missing or invalid')\n\n try:\n body = self._reader.read(length)\n except Exception:\n if self._is_closing:\n raise EOFError\n else:\n raise\n\n if isinstance(body, bytes):\n body = body.decode('utf-8')\n return json.loads(body)\n\n def write_json(self, value):\n \"\"\"Write a single JSON object to writer.\n\n object must be in the format suitable for json.dump().\n \"\"\"\n\n body = json.dumps(value, sort_keys=True)\n if not isinstance(body, bytes):\n body = body.encode('utf-8')\n\n header = 'Content-Length: %d\\r\\n\\r\\n' % len(body)\n if not isinstance(header, bytes):\n header = header.encode('ascii')\n\n self._writer.write(header)\n self._writer.write(body)\n\n\nResponse = collections.namedtuple('Response', ('success', 'command', 'error_message', 'body'))\nResponse.__new__.__defaults__ = (None, None)\nclass Response(Response):\n \"\"\"Represents a received response to a Request.\"\"\"\n\n\nclass RequestFailure(Exception):\n def __init__(self, message):\n self.message = message\n\n\nclass Request(object):\n \"\"\"Represents a request that was sent to the other party, and is awaiting or has\n already received a response.\n \"\"\"\n\n def __init__(self, channel, seq):\n self.channel = channel\n self.seq = seq\n self.response = None\n self._lock = threading.Lock()\n self._got_response = threading.Event()\n self._callback = lambda _: None\n\n def _handle_response(self, success, command, error_message=None, body=None):\n assert self.response is None\n with self._lock:\n response = Response(success, command, error_message, body)\n self.response = response\n callback = self._callback\n callback(response)\n self._got_response.set()\n\n def wait_for_response(self, raise_if_failed=True):\n \"\"\"Waits until a response is received for this request, records that\n response as a new Response object accessible via self.response,\n and returns self.response.body.\n\n If raise_if_failed is True, and the received response does not indicate\n success, raises RequestFailure. Otherwise, self.response.success has to\n be inspected to determine whether the request failed or succeeded, since\n self.response.body can be None in either case.\n \"\"\"\n\n self._got_response.wait()\n if raise_if_failed and not self.response.success:\n raise RequestFailure(self.response.error_message)\n return self.response\n\n def on_response(self, callback):\n \"\"\"Registers a callback to invoke when a response is received for this\n request. If response was already received, invokes callback immediately.\n Callback is invoked with Response object as the sole argument.\n\n The callback is invoked on an unspecified background thread that performs\n processing of incoming messages; therefore, no further message processing\n occurs until the callback returns.\n \"\"\"\n\n with self._lock:\n response = self.response\n if response is None:\n self._callback = callback\n return\n callback(response)\n\n\nclass JsonMessageChannel(object):\n \"\"\"Implements a JSON message channel on top of a JSON stream, with\n support for generic Request, Response and Event messages as defined by the\n Debug Adapter Protocol (https://microsoft.github.io/debug-adapter-protocol/overview).\n \"\"\"\n\n def __init__(self, stream, handlers=None):\n self.stream = stream\n self.send_callback = lambda channel, message: None\n self.receive_callback = lambda channel, message: None\n self._lock = threading.Lock()\n self._stop = threading.Event()\n self._seq_iter = itertools.count(1)\n self._requests = {}\n self._handlers = handlers\n self._worker = threading.Thread(target=self._process_incoming_messages)\n self._worker.daemon = True\n\n def close(self):\n self.stream.close()\n\n def start(self):\n self._worker.start()\n\n def wait(self):\n self._worker.join()\n\n def _send_message(self, type, rest={}):\n with self._lock:\n seq = next(self._seq_iter)\n message = {\n 'seq': seq,\n 'type': type,\n }\n message.update(rest)\n with self._lock:\n self.stream.write_json(message)\n self.send_callback(self, message)\n return seq\n\n def send_request(self, command, arguments=None):\n d = {'command': command}\n if arguments is not None:\n d['arguments'] = arguments\n seq = self._send_message('request', d)\n request = Request(self, seq)\n with self._lock:\n self._requests[seq] = request\n return request\n\n def send_event(self, event, body=None):\n d = {'event': event}\n if body is not None:\n d['body'] = body\n self._send_message('event', d)\n\n def send_response(self, request_seq, success, command, error_message=None, body=None):\n d = {\n 'request_seq': request_seq,\n 'success': success,\n 'command': command,\n }\n if success:\n if body is not None:\n d['body'] = body\n else:\n if error_message is not None:\n d['message'] = error_message\n self._send_message('response', d)\n\n def on_message(self, message):\n self.receive_callback(self, message)\n seq = message['seq']\n typ = message['type']\n if typ == 'request':\n command = message['command']\n arguments = message.get('arguments', None)\n self.on_request(seq, command, arguments)\n elif typ == 'event':\n event = message['event']\n body = message.get('body', None)\n self.on_event(seq, event, body)\n elif typ == 'response':\n request_seq = message['request_seq']\n success = message['success']\n command = message['command']\n error_message = message.get('message', None)\n body = message.get('body', None)\n self.on_response(seq, request_seq, success, command, error_message, body)\n else:\n raise IOError('Incoming message has invalid \"type\":\\n%r' % message)\n\n def on_request(self, seq, command, arguments):\n handler_name = '%s_request' % command\n specific_handler = getattr(self._handlers, handler_name, None)\n if specific_handler is not None:\n handler = lambda: specific_handler(self, arguments)\n else:\n generic_handler = getattr(self._handlers, 'request')\n handler = lambda: generic_handler(self, command, arguments)\n try:\n response_body = handler()\n except Exception as ex:\n self.send_response(seq, False, command, str(ex))\n else:\n self.send_response(seq, True, command, None, response_body)\n\n def on_event(self, seq, event, body):\n handler_name = '%s_event' % event\n specific_handler = getattr(self._handlers, handler_name, None)\n if specific_handler is not None:\n handler = lambda: specific_handler(self, body)\n else:\n generic_handler = getattr(self._handlers, 'event')\n handler = lambda: generic_handler(self, event, body)\n handler()\n\n def on_response(self, seq, request_seq, success, command, error_message, body):\n try:\n with self._lock:\n request = self._requests.pop(request_seq)\n except KeyError:\n raise KeyError('Received response to unknown request %d', request_seq)\n return request._handle_response(success, command, error_message, body)\n\n def _process_incoming_messages(self):\n while True:\n try:\n message = self.stream.read_json()\n except EOFError:\n break\n try:\n self.on_message(message)\n except Exception:\n print('Error while processing message:\\n%r\\n\\n' % message, file=sys.__stderr__)\n raise\n\n\nclass MessageHandlers(object):\n \"\"\"A simple delegating message handlers object for use with JsonMessageChannel.\n For every argument provided, the object has an attribute with the corresponding\n name and value. Example:\n \"\"\"\n\n def __init__(self, **kwargs):\n for name, func in kwargs.items():\n setattr(self, name, func)", "path": "ptvsd/messaging.py"}]}
4,040
462
gh_patches_debug_13238
rasdani/github-patches
git_diff
mindsdb__mindsdb-2007
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Response contains 'nan' instead of `null` if do ``` select null, null, null from information_schema.tables limit 1; ``` then response will be: ``` +------+--------+--------+ | None | None_2 | None_3 | +------+--------+--------+ | nan | nan | nan | +------+--------+--------+ ``` row values must be `null` </issue> <code> [start of mindsdb/api/mysql/mysql_proxy/utilities/sql.py] 1 import duckdb 2 import pandas as pd 3 from mindsdb_sql import parse_sql 4 from mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy 5 from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender 6 7 from mindsdb.utilities.log import log 8 9 10 def _remove_table_name(root): 11 if isinstance(root, BinaryOperation): 12 _remove_table_name(root.args[0]) 13 _remove_table_name(root.args[1]) 14 elif isinstance(root, Identifier): 15 root.parts = [root.parts[-1]] 16 17 18 def query_df(df, query): 19 """ Perform simple query ('select' from one table, without subqueries and joins) on DataFrame. 20 21 Args: 22 df (pandas.DataFrame): data 23 query (mindsdb_sql.parser.ast.Select | str): select query 24 25 Returns: 26 pandas.DataFrame 27 """ 28 29 if isinstance(query, str): 30 query_ast = parse_sql(query, dialect='mysql') 31 else: 32 query_ast = query 33 34 if isinstance(query_ast, Select) is False or isinstance(query_ast.from_table, Identifier) is False: 35 raise Exception("Only 'SELECT from TABLE' statements supported for internal query") 36 37 query_ast.from_table.parts = ['df_table'] 38 for identifier in query_ast.targets: 39 if isinstance(identifier, Identifier): 40 identifier.parts = [identifier.parts[-1]] 41 if isinstance(query_ast.order_by, list): 42 for orderby in query_ast.order_by: 43 if isinstance(orderby, OrderBy) and isinstance(orderby.field, Identifier): 44 orderby.field.parts = [orderby.field.parts[-1]] 45 _remove_table_name(query_ast.where) 46 47 render = SqlalchemyRender('postgres') 48 try: 49 query_str = render.get_string(query_ast, with_failback=False) 50 except Exception as e: 51 log.error(f"Exception during query casting to 'postgres' dialect. Query: {str(query)}. Error: {e}") 52 query_str = render.get_string(query_ast, with_failback=True) 53 54 res = duckdb.query_df(df, 'df_table', query_str) 55 result_df = res.df() 56 result_df = result_df.where(pd.notnull(result_df), None) 57 return result_df 58 [end of mindsdb/api/mysql/mysql_proxy/utilities/sql.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py --- a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py +++ b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py @@ -1,5 +1,5 @@ import duckdb -import pandas as pd +import numpy as np from mindsdb_sql import parse_sql from mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender @@ -53,5 +53,5 @@ res = duckdb.query_df(df, 'df_table', query_str) result_df = res.df() - result_df = result_df.where(pd.notnull(result_df), None) + result_df = result_df.replace({np.nan: None}) return result_df
{"golden_diff": "diff --git a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n--- a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n+++ b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n@@ -1,5 +1,5 @@\n import duckdb\n-import pandas as pd\n+import numpy as np\n from mindsdb_sql import parse_sql\n from mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy\n from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\n@@ -53,5 +53,5 @@\n \n res = duckdb.query_df(df, 'df_table', query_str)\n result_df = res.df()\n- result_df = result_df.where(pd.notnull(result_df), None)\n+ result_df = result_df.replace({np.nan: None})\n return result_df\n", "issue": "Response contains 'nan' instead of `null`\nif do \r\n```\r\nselect null, null, null from information_schema.tables limit 1;\r\n```\r\nthen response will be:\r\n```\r\n+------+--------+--------+\r\n| None | None_2 | None_3 |\r\n+------+--------+--------+\r\n| nan | nan | nan |\r\n+------+--------+--------+\r\n```\r\nrow values must be `null`\r\n\n", "before_files": [{"content": "import duckdb\nimport pandas as pd\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy\nfrom mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\n\nfrom mindsdb.utilities.log import log\n\n\ndef _remove_table_name(root):\n if isinstance(root, BinaryOperation):\n _remove_table_name(root.args[0])\n _remove_table_name(root.args[1])\n elif isinstance(root, Identifier):\n root.parts = [root.parts[-1]]\n\n\ndef query_df(df, query):\n \"\"\" Perform simple query ('select' from one table, without subqueries and joins) on DataFrame.\n\n Args:\n df (pandas.DataFrame): data\n query (mindsdb_sql.parser.ast.Select | str): select query\n\n Returns:\n pandas.DataFrame\n \"\"\"\n\n if isinstance(query, str):\n query_ast = parse_sql(query, dialect='mysql')\n else:\n query_ast = query\n\n if isinstance(query_ast, Select) is False or isinstance(query_ast.from_table, Identifier) is False:\n raise Exception(\"Only 'SELECT from TABLE' statements supported for internal query\")\n\n query_ast.from_table.parts = ['df_table']\n for identifier in query_ast.targets:\n if isinstance(identifier, Identifier):\n identifier.parts = [identifier.parts[-1]]\n if isinstance(query_ast.order_by, list):\n for orderby in query_ast.order_by:\n if isinstance(orderby, OrderBy) and isinstance(orderby.field, Identifier):\n orderby.field.parts = [orderby.field.parts[-1]]\n _remove_table_name(query_ast.where)\n\n render = SqlalchemyRender('postgres')\n try:\n query_str = render.get_string(query_ast, with_failback=False)\n except Exception as e:\n log.error(f\"Exception during query casting to 'postgres' dialect. Query: {str(query)}. Error: {e}\")\n query_str = render.get_string(query_ast, with_failback=True)\n\n res = duckdb.query_df(df, 'df_table', query_str)\n result_df = res.df()\n result_df = result_df.where(pd.notnull(result_df), None)\n return result_df\n", "path": "mindsdb/api/mysql/mysql_proxy/utilities/sql.py"}]}
1,203
190
gh_patches_debug_28834
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-1837
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Bug]: ART Trier Germany collecting no more Data ### I Have A Problem With: A specific source ### What's Your Problem ART Trier Germany collecting no more Data. It worked till yesterday. I think they have a new homepage. The Calender is now empty, only one Entry on February 26th: A.R.T. Wichtiger Hinweis! The link (https://www.art-trier.de/cms/abfuhrtermine-1002.html) in the Description for ART Trier doesn't work anymore. Get a 404 Error Page. Ver. 1.45.1 ### Source (if relevant) art_trier_de ### Logs ```Shell no relevant logs ``` ### Relevant Configuration ```YAML - name: art_trier_de args: district: "Fellerich" zip_code: "54456" ``` ### Checklist Source Error - [ ] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration) - [X] Checked that the website of your service provider is still working - [ ] Tested my attributes on the service provider website (if possible) - [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version) ### Checklist Sensor Error - [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used) ### Required - [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been. - [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate. </issue> <code> [start of custom_components/waste_collection_schedule/waste_collection_schedule/source/art_trier_de.py] 1 import contextlib 2 from datetime import datetime 3 from typing import Optional 4 from urllib.parse import quote 5 6 import requests 7 from waste_collection_schedule import Collection # type: ignore[attr-defined] 8 from waste_collection_schedule.service.ICS import ICS 9 10 TITLE = "ART Trier" 11 DESCRIPTION = "Source for waste collection of ART Trier." 12 URL = "https://www.art-trier.de" 13 TEST_CASES = { 14 "Trier": { 15 "zip_code": "54296", 16 "district": "Stadt Trier, Universitätsring", 17 }, # # https://www.art-trier.de/ics-feed/54296_trier_universitaetsring_1-1800.ics 18 "Schweich": { 19 "zip_code": "54338", 20 "district": "Schweich (inkl. Issel)", 21 }, # https://www.art-trier.de/ics-feed/54338_schweich_inkl_issel_1-1800.ics 22 "Dreis": { 23 "zip_code": "54518", 24 "district": "Dreis", 25 }, # https://www.art-trier.de/ics-feed/54518_dreis_1-1800.ics 26 "Wittlich Marktplatz": { 27 "zip_code": "54516", 28 "district": "Wittlich, Marktplatz", 29 }, # https://www.art-trier.de/ics-feed/54516_wittlich_marktplatz_1-1800.ics 30 "Wittlich Wengerohr": { 31 "zip_code": "54516", 32 "district": "Wittlich-Wengerohr", 33 }, # https://www.art-trier.de/ics-feed/54516_wittlich%2Dwengerohr_1-1800.ics 34 } 35 36 API_URL = "https://www.art-trier.de/ics-feed" 37 REMINDER_DAY = ( 38 "0" # The calendar event should be on the same day as the waste collection 39 ) 40 REMINDER_TIME = "0600" # The calendar event should start on any hour of the correct day, so this does not matter much 41 ICON_MAP = { 42 "Altpapier": "mdi:package-variant", 43 "Restmüll": "mdi:trash-can", 44 "Gelber Sack": "mdi:recycle", 45 } 46 SPECIAL_CHARS = str.maketrans( 47 { 48 " ": "_", 49 "ä": "ae", 50 "ü": "ue", 51 "ö": "oe", 52 "ß": "ss", 53 "(": None, 54 ")": None, 55 ",": None, 56 ".": None, 57 } 58 ) 59 60 61 class Source: 62 def __init__(self, district: str, zip_code: str): 63 self._district = quote( 64 district.lower().removeprefix("stadt ").translate(SPECIAL_CHARS).strip() 65 ) 66 self._zip_code = zip_code 67 self._ics = ICS(regex=r"^A.R.T. Abfuhrtermin: (.*)", split_at=r" & ") 68 69 def fetch(self): 70 url = f"{API_URL}/{self._zip_code}_{self._district}_{REMINDER_DAY}-{REMINDER_TIME}.ics" 71 72 res = requests.get(url) 73 res.raise_for_status() 74 75 schedule = self._ics.convert(res.text) 76 77 return [ 78 Collection(date=entry[0], t=entry[1], icon=ICON_MAP.get(entry[1])) 79 for entry in schedule 80 ] 81 [end of custom_components/waste_collection_schedule/waste_collection_schedule/source/art_trier_de.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/art_trier_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/art_trier_de.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/art_trier_de.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/art_trier_de.py @@ -1,13 +1,11 @@ -import contextlib -from datetime import datetime -from typing import Optional +import logging from urllib.parse import quote import requests from waste_collection_schedule import Collection # type: ignore[attr-defined] from waste_collection_schedule.service.ICS import ICS -TITLE = "ART Trier" +TITLE = "ART Trier (Depreciated)" DESCRIPTION = "Source for waste collection of ART Trier." URL = "https://www.art-trier.de" TEST_CASES = { @@ -56,6 +54,7 @@ ".": None, } ) +LOGGER = logging.getLogger(__name__) class Source: @@ -67,7 +66,11 @@ self._ics = ICS(regex=r"^A.R.T. Abfuhrtermin: (.*)", split_at=r" & ") def fetch(self): - url = f"{API_URL}/{self._zip_code}_{self._district}_{REMINDER_DAY}-{REMINDER_TIME}.ics" + LOGGER.warning( + "The ART Trier source is deprecated and might not work with all addresses anymore." + " Please use the ICS instead: https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/doc/ics/art_trier_de.md" + ) + url = f"{API_URL}/{self._zip_code}:{self._district}::@{REMINDER_DAY}-{REMINDER_TIME}.ics" res = requests.get(url) res.raise_for_status()
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/art_trier_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/art_trier_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/art_trier_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/art_trier_de.py\n@@ -1,13 +1,11 @@\n-import contextlib\n-from datetime import datetime\n-from typing import Optional\n+import logging\n from urllib.parse import quote\n \n import requests\n from waste_collection_schedule import Collection # type: ignore[attr-defined]\n from waste_collection_schedule.service.ICS import ICS\n \n-TITLE = \"ART Trier\"\n+TITLE = \"ART Trier (Depreciated)\"\n DESCRIPTION = \"Source for waste collection of ART Trier.\"\n URL = \"https://www.art-trier.de\"\n TEST_CASES = {\n@@ -56,6 +54,7 @@\n \".\": None,\n }\n )\n+LOGGER = logging.getLogger(__name__)\n \n \n class Source:\n@@ -67,7 +66,11 @@\n self._ics = ICS(regex=r\"^A.R.T. Abfuhrtermin: (.*)\", split_at=r\" & \")\n \n def fetch(self):\n- url = f\"{API_URL}/{self._zip_code}_{self._district}_{REMINDER_DAY}-{REMINDER_TIME}.ics\"\n+ LOGGER.warning(\n+ \"The ART Trier source is deprecated and might not work with all addresses anymore.\"\n+ \" Please use the ICS instead: https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/doc/ics/art_trier_de.md\"\n+ )\n+ url = f\"{API_URL}/{self._zip_code}:{self._district}::@{REMINDER_DAY}-{REMINDER_TIME}.ics\"\n \n res = requests.get(url)\n res.raise_for_status()\n", "issue": "[Bug]: ART Trier Germany collecting no more Data\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nART Trier Germany collecting no more Data. It worked till yesterday. I think they have a new homepage.\r\nThe Calender is now empty, only one Entry on February 26th: A.R.T. Wichtiger Hinweis!\r\nThe link (https://www.art-trier.de/cms/abfuhrtermine-1002.html) in the Description for ART Trier doesn't work anymore. Get a 404 Error Page.\r\n\r\nVer. 1.45.1\n\n### Source (if relevant)\n\nart_trier_de\n\n### Logs\n\n```Shell\nno relevant logs\n```\n\n\n### Relevant Configuration\n\n```YAML\n- name: art_trier_de\r\n args:\r\n district: \"Fellerich\"\r\n zip_code: \"54456\"\n```\n\n\n### Checklist Source Error\n\n- [ ] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [ ] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import contextlib\nfrom datetime import datetime\nfrom typing import Optional\nfrom urllib.parse import quote\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"ART Trier\"\nDESCRIPTION = \"Source for waste collection of ART Trier.\"\nURL = \"https://www.art-trier.de\"\nTEST_CASES = {\n \"Trier\": {\n \"zip_code\": \"54296\",\n \"district\": \"Stadt Trier, Universit\u00e4tsring\",\n }, # # https://www.art-trier.de/ics-feed/54296_trier_universitaetsring_1-1800.ics\n \"Schweich\": {\n \"zip_code\": \"54338\",\n \"district\": \"Schweich (inkl. Issel)\",\n }, # https://www.art-trier.de/ics-feed/54338_schweich_inkl_issel_1-1800.ics\n \"Dreis\": {\n \"zip_code\": \"54518\",\n \"district\": \"Dreis\",\n }, # https://www.art-trier.de/ics-feed/54518_dreis_1-1800.ics\n \"Wittlich Marktplatz\": {\n \"zip_code\": \"54516\",\n \"district\": \"Wittlich, Marktplatz\",\n }, # https://www.art-trier.de/ics-feed/54516_wittlich_marktplatz_1-1800.ics\n \"Wittlich Wengerohr\": {\n \"zip_code\": \"54516\",\n \"district\": \"Wittlich-Wengerohr\",\n }, # https://www.art-trier.de/ics-feed/54516_wittlich%2Dwengerohr_1-1800.ics\n}\n\nAPI_URL = \"https://www.art-trier.de/ics-feed\"\nREMINDER_DAY = (\n \"0\" # The calendar event should be on the same day as the waste collection\n)\nREMINDER_TIME = \"0600\" # The calendar event should start on any hour of the correct day, so this does not matter much\nICON_MAP = {\n \"Altpapier\": \"mdi:package-variant\",\n \"Restm\u00fcll\": \"mdi:trash-can\",\n \"Gelber Sack\": \"mdi:recycle\",\n}\nSPECIAL_CHARS = str.maketrans(\n {\n \" \": \"_\",\n \"\u00e4\": \"ae\",\n \"\u00fc\": \"ue\",\n \"\u00f6\": \"oe\",\n \"\u00df\": \"ss\",\n \"(\": None,\n \")\": None,\n \",\": None,\n \".\": None,\n }\n)\n\n\nclass Source:\n def __init__(self, district: str, zip_code: str):\n self._district = quote(\n district.lower().removeprefix(\"stadt \").translate(SPECIAL_CHARS).strip()\n )\n self._zip_code = zip_code\n self._ics = ICS(regex=r\"^A.R.T. Abfuhrtermin: (.*)\", split_at=r\" & \")\n\n def fetch(self):\n url = f\"{API_URL}/{self._zip_code}_{self._district}_{REMINDER_DAY}-{REMINDER_TIME}.ics\"\n\n res = requests.get(url)\n res.raise_for_status()\n\n schedule = self._ics.convert(res.text)\n\n return [\n Collection(date=entry[0], t=entry[1], icon=ICON_MAP.get(entry[1]))\n for entry in schedule\n ]\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/art_trier_de.py"}]}
1,912
402
gh_patches_debug_9267
rasdani/github-patches
git_diff
pre-commit__pre-commit-1480
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> random.shuffle's random= argument got deprecated Related issue: [bpo-40465](https://bugs.python.org/issue40465). ``` black..................................................................../home/isidentical/.venv/lib/python3.10/site-packages/pre_commit/languages/helpers.py:95: DeprecationWarning: The *random* parameter to shuffle() has been deprecated since Python 3.9 and will be removed in a subsequent version. random.shuffle(seq, random=fixed_random.random) Passed ``` </issue> <code> [start of pre_commit/languages/helpers.py] 1 import multiprocessing 2 import os 3 import random 4 from typing import Any 5 from typing import List 6 from typing import Optional 7 from typing import overload 8 from typing import Sequence 9 from typing import Tuple 10 from typing import TYPE_CHECKING 11 12 import pre_commit.constants as C 13 from pre_commit.hook import Hook 14 from pre_commit.prefix import Prefix 15 from pre_commit.util import cmd_output_b 16 from pre_commit.xargs import xargs 17 18 if TYPE_CHECKING: 19 from typing import NoReturn 20 21 FIXED_RANDOM_SEED = 1542676186 22 23 24 def run_setup_cmd(prefix: Prefix, cmd: Tuple[str, ...]) -> None: 25 cmd_output_b(*cmd, cwd=prefix.prefix_dir) 26 27 28 @overload 29 def environment_dir(d: None, language_version: str) -> None: ... 30 @overload 31 def environment_dir(d: str, language_version: str) -> str: ... 32 33 34 def environment_dir(d: Optional[str], language_version: str) -> Optional[str]: 35 if d is None: 36 return None 37 else: 38 return f'{d}-{language_version}' 39 40 41 def assert_version_default(binary: str, version: str) -> None: 42 if version != C.DEFAULT: 43 raise AssertionError( 44 f'For now, pre-commit requires system-installed {binary}', 45 ) 46 47 48 def assert_no_additional_deps( 49 lang: str, 50 additional_deps: Sequence[str], 51 ) -> None: 52 if additional_deps: 53 raise AssertionError( 54 f'For now, pre-commit does not support ' 55 f'additional_dependencies for {lang}', 56 ) 57 58 59 def basic_get_default_version() -> str: 60 return C.DEFAULT 61 62 63 def basic_healthy(prefix: Prefix, language_version: str) -> bool: 64 return True 65 66 67 def no_install( 68 prefix: Prefix, 69 version: str, 70 additional_dependencies: Sequence[str], 71 ) -> 'NoReturn': 72 raise AssertionError('This type is not installable') 73 74 75 def target_concurrency(hook: Hook) -> int: 76 if hook.require_serial or 'PRE_COMMIT_NO_CONCURRENCY' in os.environ: 77 return 1 78 else: 79 # Travis appears to have a bunch of CPUs, but we can't use them all. 80 if 'TRAVIS' in os.environ: 81 return 2 82 else: 83 try: 84 return multiprocessing.cpu_count() 85 except NotImplementedError: 86 return 1 87 88 89 def _shuffled(seq: Sequence[str]) -> List[str]: 90 """Deterministically shuffle""" 91 fixed_random = random.Random() 92 fixed_random.seed(FIXED_RANDOM_SEED, version=1) 93 94 seq = list(seq) 95 random.shuffle(seq, random=fixed_random.random) 96 return seq 97 98 99 def run_xargs( 100 hook: Hook, 101 cmd: Tuple[str, ...], 102 file_args: Sequence[str], 103 **kwargs: Any, 104 ) -> Tuple[int, bytes]: 105 # Shuffle the files so that they more evenly fill out the xargs partitions, 106 # but do it deterministically in case a hook cares about ordering. 107 file_args = _shuffled(file_args) 108 kwargs['target_concurrency'] = target_concurrency(hook) 109 return xargs(cmd, file_args, **kwargs) 110 [end of pre_commit/languages/helpers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/languages/helpers.py b/pre_commit/languages/helpers.py --- a/pre_commit/languages/helpers.py +++ b/pre_commit/languages/helpers.py @@ -18,7 +18,7 @@ if TYPE_CHECKING: from typing import NoReturn -FIXED_RANDOM_SEED = 1542676186 +FIXED_RANDOM_SEED = 1542676187 def run_setup_cmd(prefix: Prefix, cmd: Tuple[str, ...]) -> None: @@ -92,7 +92,7 @@ fixed_random.seed(FIXED_RANDOM_SEED, version=1) seq = list(seq) - random.shuffle(seq, random=fixed_random.random) + fixed_random.shuffle(seq) return seq
{"golden_diff": "diff --git a/pre_commit/languages/helpers.py b/pre_commit/languages/helpers.py\n--- a/pre_commit/languages/helpers.py\n+++ b/pre_commit/languages/helpers.py\n@@ -18,7 +18,7 @@\n if TYPE_CHECKING:\n from typing import NoReturn\n \n-FIXED_RANDOM_SEED = 1542676186\n+FIXED_RANDOM_SEED = 1542676187\n \n \n def run_setup_cmd(prefix: Prefix, cmd: Tuple[str, ...]) -> None:\n@@ -92,7 +92,7 @@\n fixed_random.seed(FIXED_RANDOM_SEED, version=1)\n \n seq = list(seq)\n- random.shuffle(seq, random=fixed_random.random)\n+ fixed_random.shuffle(seq)\n return seq\n", "issue": "random.shuffle's random= argument got deprecated\nRelated issue: [bpo-40465](https://bugs.python.org/issue40465).\r\n```\r\nblack..................................................................../home/isidentical/.venv/lib/python3.10/site-packages/pre_commit/languages/helpers.py:95: DeprecationWarning: The *random* parameter to shuffle() has been deprecated\r\nsince Python 3.9 and will be removed in a subsequent version.\r\n random.shuffle(seq, random=fixed_random.random)\r\nPassed\r\n```\r\n\r\n\n", "before_files": [{"content": "import multiprocessing\nimport os\nimport random\nfrom typing import Any\nfrom typing import List\nfrom typing import Optional\nfrom typing import overload\nfrom typing import Sequence\nfrom typing import Tuple\nfrom typing import TYPE_CHECKING\n\nimport pre_commit.constants as C\nfrom pre_commit.hook import Hook\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.xargs import xargs\n\nif TYPE_CHECKING:\n from typing import NoReturn\n\nFIXED_RANDOM_SEED = 1542676186\n\n\ndef run_setup_cmd(prefix: Prefix, cmd: Tuple[str, ...]) -> None:\n cmd_output_b(*cmd, cwd=prefix.prefix_dir)\n\n\n@overload\ndef environment_dir(d: None, language_version: str) -> None: ...\n@overload\ndef environment_dir(d: str, language_version: str) -> str: ...\n\n\ndef environment_dir(d: Optional[str], language_version: str) -> Optional[str]:\n if d is None:\n return None\n else:\n return f'{d}-{language_version}'\n\n\ndef assert_version_default(binary: str, version: str) -> None:\n if version != C.DEFAULT:\n raise AssertionError(\n f'For now, pre-commit requires system-installed {binary}',\n )\n\n\ndef assert_no_additional_deps(\n lang: str,\n additional_deps: Sequence[str],\n) -> None:\n if additional_deps:\n raise AssertionError(\n f'For now, pre-commit does not support '\n f'additional_dependencies for {lang}',\n )\n\n\ndef basic_get_default_version() -> str:\n return C.DEFAULT\n\n\ndef basic_healthy(prefix: Prefix, language_version: str) -> bool:\n return True\n\n\ndef no_install(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> 'NoReturn':\n raise AssertionError('This type is not installable')\n\n\ndef target_concurrency(hook: Hook) -> int:\n if hook.require_serial or 'PRE_COMMIT_NO_CONCURRENCY' in os.environ:\n return 1\n else:\n # Travis appears to have a bunch of CPUs, but we can't use them all.\n if 'TRAVIS' in os.environ:\n return 2\n else:\n try:\n return multiprocessing.cpu_count()\n except NotImplementedError:\n return 1\n\n\ndef _shuffled(seq: Sequence[str]) -> List[str]:\n \"\"\"Deterministically shuffle\"\"\"\n fixed_random = random.Random()\n fixed_random.seed(FIXED_RANDOM_SEED, version=1)\n\n seq = list(seq)\n random.shuffle(seq, random=fixed_random.random)\n return seq\n\n\ndef run_xargs(\n hook: Hook,\n cmd: Tuple[str, ...],\n file_args: Sequence[str],\n **kwargs: Any,\n) -> Tuple[int, bytes]:\n # Shuffle the files so that they more evenly fill out the xargs partitions,\n # but do it deterministically in case a hook cares about ordering.\n file_args = _shuffled(file_args)\n kwargs['target_concurrency'] = target_concurrency(hook)\n return xargs(cmd, file_args, **kwargs)\n", "path": "pre_commit/languages/helpers.py"}]}
1,555
174
gh_patches_debug_20264
rasdani/github-patches
git_diff
svthalia__concrexit-3089
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Admin sales shift API should also return total_paid_revenue ### Is your feature request related to a problem? Please describe. The current admin sales shift api route only gives the total_revenue for a shift, but this might contain unpaid orders. We don't want those in certain scoreboards, like for the rag week. ### Describe the solution you'd like Add `total_paid_revenue` ### Motivation ### Describe alternatives you've considered ### Additional context </issue> <code> [start of website/sales/api/v2/admin/serializers/shift.py] 1 from rest_framework import serializers 2 3 from sales.models.product import ProductListItem 4 from sales.models.shift import Shift 5 6 7 class ProductListItemSerializer(serializers.ModelSerializer): 8 """Serializer for product list items.""" 9 10 class Meta: 11 model = ProductListItem 12 fields = ("name", "price", "age_restricted") 13 read_only_fields = ("name", "price", "age_restricted") 14 15 name = serializers.SerializerMethodField("_name") 16 age_restricted = serializers.SerializerMethodField("_age_restricted") 17 18 def _name(self, instance): 19 return instance.product.name 20 21 def _age_restricted(self, instance): 22 return instance.product.age_restricted 23 24 25 class ShiftSerializer(serializers.ModelSerializer): 26 """Serializer for shifts.""" 27 28 class Meta: 29 model = Shift 30 fields = ( 31 "pk", 32 "title", 33 "locked", 34 "active", 35 "start", 36 "end", 37 "products", 38 "total_revenue", 39 "num_orders", 40 "product_sales", 41 ) 42 43 total_revenue = serializers.DecimalField( 44 max_digits=10, decimal_places=2, min_value=0, read_only=True 45 ) 46 47 products = ProductListItemSerializer( 48 source="product_list.product_items", many=True, read_only=True 49 ) 50 51 title = serializers.SerializerMethodField("_get_title") 52 53 def _get_title(self, instance): 54 return instance.title 55 56 product_sales = serializers.JSONField() 57 [end of website/sales/api/v2/admin/serializers/shift.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/sales/api/v2/admin/serializers/shift.py b/website/sales/api/v2/admin/serializers/shift.py --- a/website/sales/api/v2/admin/serializers/shift.py +++ b/website/sales/api/v2/admin/serializers/shift.py @@ -1,5 +1,6 @@ from rest_framework import serializers +from payments.api.v2.serializers.payment_amount import PaymentAmountSerializer from sales.models.product import ProductListItem from sales.models.shift import Shift @@ -36,13 +37,13 @@ "end", "products", "total_revenue", + "total_revenue_paid", "num_orders", "product_sales", ) - total_revenue = serializers.DecimalField( - max_digits=10, decimal_places=2, min_value=0, read_only=True - ) + total_revenue = PaymentAmountSerializer(min_value=0, read_only=True) + total_revenue_paid = PaymentAmountSerializer(min_value=0, read_only=True) products = ProductListItemSerializer( source="product_list.product_items", many=True, read_only=True
{"golden_diff": "diff --git a/website/sales/api/v2/admin/serializers/shift.py b/website/sales/api/v2/admin/serializers/shift.py\n--- a/website/sales/api/v2/admin/serializers/shift.py\n+++ b/website/sales/api/v2/admin/serializers/shift.py\n@@ -1,5 +1,6 @@\n from rest_framework import serializers\n \n+from payments.api.v2.serializers.payment_amount import PaymentAmountSerializer\n from sales.models.product import ProductListItem\n from sales.models.shift import Shift\n \n@@ -36,13 +37,13 @@\n \"end\",\n \"products\",\n \"total_revenue\",\n+ \"total_revenue_paid\",\n \"num_orders\",\n \"product_sales\",\n )\n \n- total_revenue = serializers.DecimalField(\n- max_digits=10, decimal_places=2, min_value=0, read_only=True\n- )\n+ total_revenue = PaymentAmountSerializer(min_value=0, read_only=True)\n+ total_revenue_paid = PaymentAmountSerializer(min_value=0, read_only=True)\n \n products = ProductListItemSerializer(\n source=\"product_list.product_items\", many=True, read_only=True\n", "issue": "Admin sales shift API should also return total_paid_revenue\n### Is your feature request related to a problem? Please describe.\r\nThe current admin sales shift api route only gives the total_revenue for a shift, but this might contain unpaid orders. We don't want those in certain scoreboards, like for the rag week.\r\n\r\n### Describe the solution you'd like\r\nAdd `total_paid_revenue`\r\n\r\n### Motivation\r\n\r\n### Describe alternatives you've considered\r\n\r\n### Additional context\r\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom sales.models.product import ProductListItem\nfrom sales.models.shift import Shift\n\n\nclass ProductListItemSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for product list items.\"\"\"\n\n class Meta:\n model = ProductListItem\n fields = (\"name\", \"price\", \"age_restricted\")\n read_only_fields = (\"name\", \"price\", \"age_restricted\")\n\n name = serializers.SerializerMethodField(\"_name\")\n age_restricted = serializers.SerializerMethodField(\"_age_restricted\")\n\n def _name(self, instance):\n return instance.product.name\n\n def _age_restricted(self, instance):\n return instance.product.age_restricted\n\n\nclass ShiftSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for shifts.\"\"\"\n\n class Meta:\n model = Shift\n fields = (\n \"pk\",\n \"title\",\n \"locked\",\n \"active\",\n \"start\",\n \"end\",\n \"products\",\n \"total_revenue\",\n \"num_orders\",\n \"product_sales\",\n )\n\n total_revenue = serializers.DecimalField(\n max_digits=10, decimal_places=2, min_value=0, read_only=True\n )\n\n products = ProductListItemSerializer(\n source=\"product_list.product_items\", many=True, read_only=True\n )\n\n title = serializers.SerializerMethodField(\"_get_title\")\n\n def _get_title(self, instance):\n return instance.title\n\n product_sales = serializers.JSONField()\n", "path": "website/sales/api/v2/admin/serializers/shift.py"}]}
1,059
257
gh_patches_debug_20662
rasdani/github-patches
git_diff
lightly-ai__lightly-758
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unusual error on lightly-1.2.12 Getting this error: ``` Epoch 19: 100% 430/430 [17:42<00:00, 2.47s/it, loss=2.05, v_num=0] Best model is stored at: /content/lightly_outputs/2022-04-04/12-01-48/lightly_epoch_18.ckpt ########## Starting to embed your dataset. Error executing job with overrides: ['token=min', 'dataset_id=mine', 'input_dir=/content/drive/MyDrive/data/mine/', 'trainer.max_epochs=20'] Traceback (most recent call last): File "/usr/local/lib/python3.7/dist-packages/lightly/cli/lightly_cli.py", line 114, in lightly_cli return _lightly_cli(cfg) File "/usr/local/lib/python3.7/dist-packages/lightly/cli/lightly_cli.py", line 60, in _lightly_cli embeddings = _embed_cli(cfg, is_cli_call) File "/usr/local/lib/python3.7/dist-packages/lightly/cli/embed_cli.py", line 83, in _embed_cli embeddings, labels, filenames = encoder.embed(dataloader, device=device) File "/usr/local/lib/python3.7/dist-packages/lightly/embedding/embedding.py", line 113, in embed total=len(dataloader.dataset), AttributeError: 'BackgroundGenerator' object has no attribute 'dataset' Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace. ``` There are jpgs in `/content/drive/MyDrive/data/mine/` Token/dataset_ide correct </issue> <code> [start of lightly/embedding/embedding.py] 1 """ Embedding Strategies """ 2 3 # Copyright (c) 2020. Lightly AG and its affiliates. 4 # All Rights Reserved 5 6 import time 7 from typing import List, Union, Tuple 8 9 import numpy as np 10 import torch 11 import lightly 12 from lightly.embedding._base import BaseEmbedding 13 from tqdm import tqdm 14 15 from lightly.utils.reordering import sort_items_by_keys 16 17 if lightly._is_prefetch_generator_available(): 18 from prefetch_generator import BackgroundGenerator 19 20 21 class SelfSupervisedEmbedding(BaseEmbedding): 22 """Implementation of self-supervised embedding models. 23 24 Implements an embedding strategy based on self-supervised learning. A 25 model backbone, self-supervised criterion, optimizer, and dataloader are 26 passed to the constructor. The embedding itself is a pytorch-lightning 27 module. 28 29 The implementation is based on contrastive learning. 30 31 * SimCLR: https://arxiv.org/abs/2002.05709 32 * MoCo: https://arxiv.org/abs/1911.05722 33 * SimSiam: https://arxiv.org/abs/2011.10566 34 35 Attributes: 36 model: 37 A backbone convolutional network with a projection head. 38 criterion: 39 A contrastive loss function. 40 optimizer: 41 A PyTorch optimizer. 42 dataloader: 43 A torchvision dataloader. 44 scheduler: 45 A PyTorch learning rate scheduler. 46 47 Examples: 48 >>> # define a model, criterion, optimizer, and dataloader above 49 >>> import lightly.embedding as embedding 50 >>> encoder = SelfSupervisedEmbedding( 51 >>> model, 52 >>> criterion, 53 >>> optimizer, 54 >>> dataloader, 55 >>> ) 56 >>> # train the self-supervised embedding with default settings 57 >>> encoder.train_embedding() 58 >>> # pass pytorch-lightning trainer arguments as kwargs 59 >>> encoder.train_embedding(max_epochs=10) 60 61 """ 62 63 def __init__( 64 self, 65 model: torch.nn.Module, 66 criterion: torch.nn.Module, 67 optimizer: torch.optim.Optimizer, 68 dataloader: torch.utils.data.DataLoader, 69 scheduler=None, 70 ): 71 72 super(SelfSupervisedEmbedding, self).__init__( 73 model, criterion, optimizer, dataloader, scheduler 74 ) 75 76 def embed(self, 77 dataloader: torch.utils.data.DataLoader, 78 device: torch.device = None 79 ) -> Tuple[np.ndarray, np.ndarray, List[str]]: 80 """Embeds images in a vector space. 81 82 Args: 83 dataloader: 84 A PyTorch dataloader. 85 device: 86 Selected device (`cpu`, `cuda`, see PyTorch documentation) 87 88 Returns: 89 Tuple of (embeddings, labels, filenames) ordered by the 90 samples in the dataset of the dataloader. 91 embeddings: 92 Embedding of shape (n_samples, embedding_feature_size). 93 One embedding for each sample. 94 labels: 95 Labels of shape (n_samples, ). 96 filenames: 97 The filenames from dataloader.dataset.get_filenames(). 98 99 100 Examples: 101 >>> # embed images in vector space 102 >>> embeddings, labels, fnames = encoder.embed(dataloader) 103 104 """ 105 106 self.model.eval() 107 embeddings, labels, filenames = None, None, [] 108 109 if lightly._is_prefetch_generator_available(): 110 dataloader = BackgroundGenerator(dataloader, max_prefetch=3) 111 112 pbar = tqdm( 113 total=len(dataloader.dataset), 114 unit='imgs' 115 ) 116 117 efficiency = 0.0 118 embeddings = [] 119 labels = [] 120 with torch.no_grad(): 121 122 start_timepoint = time.time() 123 for (image_batch, label_batch, filename_batch) in dataloader: 124 125 batch_size = image_batch.shape[0] 126 127 # the following 2 lines are needed to prevent a file handler leak, 128 # see https://github.com/lightly-ai/lightly/pull/676 129 image_batch = image_batch.to(device) 130 label_batch = label_batch.clone() 131 132 filenames += [*filename_batch] 133 134 prepared_timepoint = time.time() 135 136 embedding_batch = self.model.backbone(image_batch) 137 embedding_batch = embedding_batch.detach().reshape(batch_size, -1) 138 139 embeddings.append(embedding_batch) 140 labels.append(label_batch) 141 142 finished_timepoint = time.time() 143 144 data_loading_time = prepared_timepoint - start_timepoint 145 inference_time = finished_timepoint - prepared_timepoint 146 total_batch_time = data_loading_time + inference_time 147 148 efficiency = inference_time / total_batch_time 149 pbar.set_description("Compute efficiency: {:.2f}".format(efficiency)) 150 start_timepoint = time.time() 151 152 pbar.update(batch_size) 153 154 embeddings = torch.cat(embeddings, 0) 155 labels = torch.cat(labels, 0) 156 157 embeddings = embeddings.cpu().numpy() 158 labels = labels.cpu().numpy() 159 160 sorted_filenames = dataloader.dataset.get_filenames() 161 sorted_embeddings = sort_items_by_keys( 162 filenames, embeddings, sorted_filenames 163 ) 164 sorted_labels = sort_items_by_keys( 165 filenames, labels, sorted_filenames 166 ) 167 embeddings = np.stack(sorted_embeddings) 168 labels = np.stack(sorted_labels) 169 170 return embeddings, labels, sorted_filenames 171 [end of lightly/embedding/embedding.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lightly/embedding/embedding.py b/lightly/embedding/embedding.py --- a/lightly/embedding/embedding.py +++ b/lightly/embedding/embedding.py @@ -106,11 +106,12 @@ self.model.eval() embeddings, labels, filenames = None, None, [] + dataset = dataloader.dataset if lightly._is_prefetch_generator_available(): dataloader = BackgroundGenerator(dataloader, max_prefetch=3) pbar = tqdm( - total=len(dataloader.dataset), + total=len(dataset), unit='imgs' ) @@ -157,7 +158,7 @@ embeddings = embeddings.cpu().numpy() labels = labels.cpu().numpy() - sorted_filenames = dataloader.dataset.get_filenames() + sorted_filenames = dataset.get_filenames() sorted_embeddings = sort_items_by_keys( filenames, embeddings, sorted_filenames )
{"golden_diff": "diff --git a/lightly/embedding/embedding.py b/lightly/embedding/embedding.py\n--- a/lightly/embedding/embedding.py\n+++ b/lightly/embedding/embedding.py\n@@ -106,11 +106,12 @@\n self.model.eval()\n embeddings, labels, filenames = None, None, []\n \n+ dataset = dataloader.dataset\n if lightly._is_prefetch_generator_available():\n dataloader = BackgroundGenerator(dataloader, max_prefetch=3)\n \n pbar = tqdm(\n- total=len(dataloader.dataset),\n+ total=len(dataset),\n unit='imgs'\n )\n \n@@ -157,7 +158,7 @@\n embeddings = embeddings.cpu().numpy()\n labels = labels.cpu().numpy()\n \n- sorted_filenames = dataloader.dataset.get_filenames()\n+ sorted_filenames = dataset.get_filenames()\n sorted_embeddings = sort_items_by_keys(\n filenames, embeddings, sorted_filenames\n )\n", "issue": "Unusual error on lightly-1.2.12\nGetting this error:\r\n\r\n```\r\nEpoch 19: 100% 430/430 [17:42<00:00, 2.47s/it, loss=2.05, v_num=0]\r\nBest model is stored at: /content/lightly_outputs/2022-04-04/12-01-48/lightly_epoch_18.ckpt\r\n########## Starting to embed your dataset.\r\n\r\nError executing job with overrides: ['token=min', 'dataset_id=mine', 'input_dir=/content/drive/MyDrive/data/mine/', 'trainer.max_epochs=20']\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/dist-packages/lightly/cli/lightly_cli.py\", line 114, in lightly_cli\r\n return _lightly_cli(cfg)\r\n File \"/usr/local/lib/python3.7/dist-packages/lightly/cli/lightly_cli.py\", line 60, in _lightly_cli\r\n embeddings = _embed_cli(cfg, is_cli_call)\r\n File \"/usr/local/lib/python3.7/dist-packages/lightly/cli/embed_cli.py\", line 83, in _embed_cli\r\n embeddings, labels, filenames = encoder.embed(dataloader, device=device)\r\n File \"/usr/local/lib/python3.7/dist-packages/lightly/embedding/embedding.py\", line 113, in embed\r\n total=len(dataloader.dataset),\r\nAttributeError: 'BackgroundGenerator' object has no attribute 'dataset'\r\n\r\nSet the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.\r\n```\r\n\r\nThere are jpgs in `/content/drive/MyDrive/data/mine/`\r\nToken/dataset_ide correct\n", "before_files": [{"content": "\"\"\" Embedding Strategies \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport time\nfrom typing import List, Union, Tuple\n\nimport numpy as np\nimport torch\nimport lightly\nfrom lightly.embedding._base import BaseEmbedding\nfrom tqdm import tqdm\n\nfrom lightly.utils.reordering import sort_items_by_keys\n\nif lightly._is_prefetch_generator_available():\n from prefetch_generator import BackgroundGenerator\n\n\nclass SelfSupervisedEmbedding(BaseEmbedding):\n \"\"\"Implementation of self-supervised embedding models.\n\n Implements an embedding strategy based on self-supervised learning. A\n model backbone, self-supervised criterion, optimizer, and dataloader are\n passed to the constructor. The embedding itself is a pytorch-lightning\n module.\n\n The implementation is based on contrastive learning.\n\n * SimCLR: https://arxiv.org/abs/2002.05709\n * MoCo: https://arxiv.org/abs/1911.05722\n * SimSiam: https://arxiv.org/abs/2011.10566\n\n Attributes:\n model:\n A backbone convolutional network with a projection head.\n criterion:\n A contrastive loss function.\n optimizer:\n A PyTorch optimizer.\n dataloader:\n A torchvision dataloader.\n scheduler:\n A PyTorch learning rate scheduler.\n\n Examples:\n >>> # define a model, criterion, optimizer, and dataloader above\n >>> import lightly.embedding as embedding\n >>> encoder = SelfSupervisedEmbedding(\n >>> model,\n >>> criterion,\n >>> optimizer,\n >>> dataloader,\n >>> )\n >>> #\u00a0train the self-supervised embedding with default settings\n >>> encoder.train_embedding()\n >>> #\u00a0pass pytorch-lightning trainer arguments as kwargs\n >>> encoder.train_embedding(max_epochs=10)\n\n \"\"\"\n\n def __init__(\n self,\n model: torch.nn.Module,\n criterion: torch.nn.Module,\n optimizer: torch.optim.Optimizer,\n dataloader: torch.utils.data.DataLoader,\n scheduler=None,\n ):\n\n super(SelfSupervisedEmbedding, self).__init__(\n model, criterion, optimizer, dataloader, scheduler\n )\n\n def embed(self,\n dataloader: torch.utils.data.DataLoader,\n device: torch.device = None\n ) -> Tuple[np.ndarray, np.ndarray, List[str]]:\n \"\"\"Embeds images in a vector space.\n\n Args:\n dataloader:\n A PyTorch dataloader.\n device:\n Selected device (`cpu`, `cuda`, see PyTorch documentation)\n\n Returns:\n Tuple of (embeddings, labels, filenames) ordered by the\n samples in the dataset of the dataloader.\n embeddings:\n Embedding of shape (n_samples, embedding_feature_size).\n One embedding for each sample.\n labels:\n Labels of shape (n_samples, ).\n filenames:\n The filenames from dataloader.dataset.get_filenames().\n\n\n Examples:\n >>> # embed images in vector space\n >>> embeddings, labels, fnames = encoder.embed(dataloader)\n\n \"\"\"\n\n self.model.eval()\n embeddings, labels, filenames = None, None, []\n\n if lightly._is_prefetch_generator_available():\n dataloader = BackgroundGenerator(dataloader, max_prefetch=3)\n \n pbar = tqdm(\n total=len(dataloader.dataset),\n unit='imgs'\n )\n\n efficiency = 0.0\n embeddings = []\n labels = []\n with torch.no_grad():\n\n start_timepoint = time.time()\n for (image_batch, label_batch, filename_batch) in dataloader:\n\n batch_size = image_batch.shape[0]\n\n # the following 2 lines are needed to prevent a file handler leak,\n # see https://github.com/lightly-ai/lightly/pull/676\n image_batch = image_batch.to(device)\n label_batch = label_batch.clone()\n\n filenames += [*filename_batch]\n\n prepared_timepoint = time.time()\n\n embedding_batch = self.model.backbone(image_batch)\n embedding_batch = embedding_batch.detach().reshape(batch_size, -1)\n\n embeddings.append(embedding_batch)\n labels.append(label_batch)\n\n finished_timepoint = time.time()\n\n data_loading_time = prepared_timepoint - start_timepoint\n inference_time = finished_timepoint - prepared_timepoint\n total_batch_time = data_loading_time + inference_time\n\n efficiency = inference_time / total_batch_time\n pbar.set_description(\"Compute efficiency: {:.2f}\".format(efficiency))\n start_timepoint = time.time()\n\n pbar.update(batch_size)\n\n embeddings = torch.cat(embeddings, 0)\n labels = torch.cat(labels, 0)\n\n embeddings = embeddings.cpu().numpy()\n labels = labels.cpu().numpy()\n\n sorted_filenames = dataloader.dataset.get_filenames()\n sorted_embeddings = sort_items_by_keys(\n filenames, embeddings, sorted_filenames\n )\n sorted_labels = sort_items_by_keys(\n filenames, labels, sorted_filenames\n )\n embeddings = np.stack(sorted_embeddings)\n labels = np.stack(sorted_labels)\n\n return embeddings, labels, sorted_filenames\n", "path": "lightly/embedding/embedding.py"}]}
2,471
209
gh_patches_debug_24892
rasdani/github-patches
git_diff
deepset-ai__haystack-1284
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Crawler does not write JSON, but serializes the result dict to a string written in a .json file First of all, great work on Haystack! It’s an incredible library and I really enjoy playing around with it! I noticed an odd behavior of the Crawler as in this example: ```python from haystack.connector import Crawler crawler = Crawler() # crawl Haystack docs, i.e. all pages that include haystack.deepset.ai/docs/ docs = crawler.crawl(urls=["https://haystack.deepset.ai/docs/latest/get_startedmd"], output_dir="crawled_files", filter_urls= ["haystack\.deepset\.ai\/docs\/"]) ``` The resulting file looks like this: ``` {'meta': {'url': 'https://haystack.deepset.ai/docs/latest/get_startedmd'}, 'text': 'Knowledge... ``` which is not valid JSON, as described in the docs. The Crawler rather simply serializes the data dict to string and writes it into a JSON file. A working import to load the result in a next step looks like this: ```python import ast # docs[0] being the first result from the crawl run with open(docs[0], 'r', encoding='utf-8') as f: result = ast.literal_eval(f.read()) ``` instead of `json.read( ... )`. As I don’t have the overview of the entire lib and how the created text files are used across the different pipelines, I am hesitant to use propose a solution. So I am raising this as a slightly odd behavior for now. Happy to provide a fix though given guidance from other developers. </issue> <code> [start of haystack/connector/crawler.py] 1 import logging 2 import re 3 from pathlib import Path 4 from urllib.parse import urlparse 5 from typing import List, Any, Optional, Dict, Tuple, Union 6 from haystack.schema import Document, BaseComponent 7 logger = logging.getLogger(__name__) 8 9 10 class Crawler(BaseComponent): 11 """ 12 Crawl texts from a website so that we can use them later in Haystack as a corpus for search / question answering etc. 13 14 **Example:** 15 ```python 16 | from haystack.connector import Crawler 17 | 18 | crawler = Crawler() 19 | # crawl Haystack docs, i.e. all pages that include haystack.deepset.ai/docs/ 20 | docs = crawler.crawl(urls=["https://haystack.deepset.ai/docs/latest/get_startedmd"], 21 | output_dir="crawled_files", 22 | filter_urls= ["haystack\.deepset\.ai\/docs\/"]) 23 ``` 24 """ 25 26 outgoing_edges = 1 27 28 def __init__(self, output_dir: str, urls: Optional[List[str]] = None, crawler_depth: int = 1, 29 filter_urls: Optional[List] = None, overwrite_existing_files=True): 30 """ 31 Init object with basic params for crawling (can be overwritten later). 32 33 :param output_dir: Path for the directory to store files 34 :param urls: List of http(s) address(es) (can also be supplied later when calling crawl()) 35 :param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options: 36 0: Only initial list of urls 37 1: Follow links found on the initial URLs (but no further) 38 :param filter_urls: Optional list of regular expressions that the crawled URLs must comply with. 39 All URLs not matching at least one of the regular expressions will be dropped. 40 :param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content 41 """ 42 try: 43 from webdriver_manager.chrome import ChromeDriverManager 44 except ImportError: 45 raise ImportError("Can't find package `webdriver-manager` \n" 46 "You can install it via `pip install webdriver-manager`") 47 48 try: 49 from selenium import webdriver 50 except ImportError: 51 raise ImportError("Can't find package `selenium` \n" 52 "You can install it via `pip install selenium`") 53 54 options = webdriver.chrome.options.Options() 55 options.add_argument('--headless') 56 self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=options) 57 self.urls = urls 58 self.output_dir = output_dir 59 self.crawler_depth = crawler_depth 60 self.filter_urls = filter_urls 61 self.overwrite_existing_files = overwrite_existing_files 62 63 def crawl(self, output_dir: Union[str, Path, None] = None, 64 urls: Optional[List[str]] = None, 65 crawler_depth: Optional[int] = None, 66 filter_urls: Optional[List] = None, 67 overwrite_existing_files: Optional[bool] = None) -> List[Path]: 68 """ 69 Craw URL(s), extract the text from the HTML, create a Haystack Document object out of it and save it (one JSON 70 file per URL, including text and basic meta data). 71 You can optionally specify via `filter_urls` to only crawl URLs that match a certain pattern. 72 All parameters are optional here and only meant to overwrite instance attributes at runtime. 73 If no parameters are provided to this method, the instance attributes that were passed during __init__ will be used. 74 75 :param output_dir: Path for the directory to store files 76 :param urls: List of http addresses or single http address 77 :param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options: 78 0: Only initial list of urls 79 1: Follow links found on the initial URLs (but no further) 80 :param filter_urls: Optional list of regular expressions that the crawled URLs must comply with. 81 All URLs not matching at least one of the regular expressions will be dropped. 82 :param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content 83 84 :return: List of paths where the crawled webpages got stored 85 """ 86 # use passed params or fallback to instance attributes 87 urls = urls or self.urls 88 if urls is None: 89 raise ValueError("Got no urls to crawl. Set `urls` to a list of URLs in __init__(), crawl() or run(). `") 90 output_dir = output_dir or self.output_dir 91 filter_urls = filter_urls or self.filter_urls 92 if overwrite_existing_files is None: 93 overwrite_existing_files = self.overwrite_existing_files 94 if crawler_depth is None: 95 crawler_depth = self.crawler_depth 96 97 output_dir = Path(output_dir) 98 if not output_dir.exists(): 99 output_dir.mkdir(parents=True) 100 101 is_not_empty = len(list(output_dir.rglob("*"))) > 0 102 if is_not_empty and not overwrite_existing_files: 103 logger.info( 104 f"Found data stored in `{output_dir}`. Delete this first if you really want to fetch new data." 105 ) 106 return [] 107 else: 108 logger.info(f"Fetching from {urls} to `{output_dir}`") 109 110 filepaths = [] 111 112 sub_links: Dict[str, List] = {} 113 114 # don't go beyond the initial list of urls 115 if crawler_depth == 0: 116 filepaths += self._write_to_files(urls, output_dir=output_dir) 117 # follow one level of sublinks 118 elif crawler_depth == 1: 119 for url_ in urls: 120 existed_links: List = list(sum(list(sub_links.values()), [])) 121 sub_links[url_] = list(self._extract_sublinks_from_url(base_url=url_, filter_urls=filter_urls, 122 existed_links=existed_links)) 123 for url in sub_links: 124 filepaths += self._write_to_files(sub_links[url], output_dir=output_dir, base_url=url) 125 126 return filepaths 127 128 def _write_to_files(self, urls: List[str], output_dir: Path, base_url: str = None) -> List[Path]: 129 paths = [] 130 for link in urls: 131 logger.info(f"writing contents from `{link}`") 132 self.driver.get(link) 133 el = self.driver.find_element_by_tag_name('body') 134 text = el.text 135 136 link_split_values = link.replace('https://', '').split('/') 137 file_name = f"{'_'.join(link_split_values)}.json" 138 file_path = output_dir / file_name 139 140 data = {} 141 data['meta'] = {'url': link} 142 if base_url: 143 data['meta']['base_url'] = base_url 144 data['text'] = text 145 with open(file_path, 'w', encoding='utf-8') as f: 146 f.write(str(data)) 147 paths.append(file_path) 148 149 return paths 150 151 def run(self, output_dir: Union[str, Path, None] = None, urls: Optional[List[str]] = None, # type: ignore 152 crawler_depth: Optional[int] = None, filter_urls: Optional[List] = None, # type: ignore 153 overwrite_existing_files: Optional[bool] = None, **kwargs) -> Tuple[Dict, str]: # type: ignore 154 """ 155 Method to be executed when the Crawler is used as a Node within a Haystack pipeline. 156 157 :param output_dir: Path for the directory to store files 158 :param urls: List of http addresses or single http address 159 :param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options: 160 0: Only initial list of urls 161 1: Follow links found on the initial URLs (but no further) 162 :param filter_urls: Optional list of regular expressions that the crawled URLs must comply with. 163 All URLs not matching at least one of the regular expressions will be dropped. 164 :param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content 165 166 :return: Tuple({"paths": List of filepaths, ...}, Name of output edge) 167 """ 168 169 filepaths = self.crawl(urls=urls, output_dir=output_dir, crawler_depth=crawler_depth, filter_urls=filter_urls, 170 overwrite_existing_files=overwrite_existing_files) 171 results = {"paths": filepaths} 172 results.update(**kwargs) 173 return results, "output_1" 174 175 @staticmethod 176 def _is_internal_url(base_url: str, sub_link: str) -> bool: 177 base_url_ = urlparse(base_url) 178 sub_link_ = urlparse(sub_link) 179 return base_url_.scheme == sub_link_.scheme and base_url_.netloc == sub_link_.netloc 180 181 @staticmethod 182 def _is_inpage_navigation(base_url: str, sub_link: str) -> bool: 183 base_url_ = urlparse(base_url) 184 sub_link_ = urlparse(sub_link) 185 return base_url_.path == sub_link_.path and base_url_.netloc == sub_link_.netloc 186 187 def _extract_sublinks_from_url(self, base_url: str, 188 filter_urls: Optional[List] = None, 189 existed_links: List = None) -> set: 190 self.driver.get(base_url) 191 a_elements = self.driver.find_elements_by_tag_name('a') 192 sub_links = set() 193 if not (existed_links and base_url in existed_links): 194 if filter_urls: 195 if re.compile('|'.join(filter_urls)).search(base_url): 196 sub_links.add(base_url) 197 198 for i in a_elements: 199 sub_link = i.get_attribute('href') 200 if not (existed_links and sub_link in existed_links): 201 if self._is_internal_url(base_url=base_url, sub_link=sub_link) \ 202 and (not self._is_inpage_navigation(base_url=base_url, sub_link=sub_link)): 203 if filter_urls: 204 if re.compile('|'.join(filter_urls)).search(sub_link): 205 sub_links.add(sub_link) 206 else: 207 sub_links.add(sub_link) 208 209 return sub_links 210 [end of haystack/connector/crawler.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/haystack/connector/crawler.py b/haystack/connector/crawler.py --- a/haystack/connector/crawler.py +++ b/haystack/connector/crawler.py @@ -1,3 +1,4 @@ +import json import logging import re from pathlib import Path @@ -15,10 +16,9 @@ ```python | from haystack.connector import Crawler | - | crawler = Crawler() + | crawler = Crawler(output_dir="crawled_files") | # crawl Haystack docs, i.e. all pages that include haystack.deepset.ai/docs/ | docs = crawler.crawl(urls=["https://haystack.deepset.ai/docs/latest/get_startedmd"], - | output_dir="crawled_files", | filter_urls= ["haystack\.deepset\.ai\/docs\/"]) ``` """ @@ -143,7 +143,7 @@ data['meta']['base_url'] = base_url data['text'] = text with open(file_path, 'w', encoding='utf-8') as f: - f.write(str(data)) + json.dump(data, f) paths.append(file_path) return paths
{"golden_diff": "diff --git a/haystack/connector/crawler.py b/haystack/connector/crawler.py\n--- a/haystack/connector/crawler.py\n+++ b/haystack/connector/crawler.py\n@@ -1,3 +1,4 @@\n+import json\n import logging\n import re\n from pathlib import Path\n@@ -15,10 +16,9 @@\n ```python\n | from haystack.connector import Crawler\n |\n- | crawler = Crawler()\n+ | crawler = Crawler(output_dir=\"crawled_files\")\n | # crawl Haystack docs, i.e. all pages that include haystack.deepset.ai/docs/\n | docs = crawler.crawl(urls=[\"https://haystack.deepset.ai/docs/latest/get_startedmd\"],\n- | output_dir=\"crawled_files\",\n | filter_urls= [\"haystack\\.deepset\\.ai\\/docs\\/\"])\n ```\n \"\"\"\n@@ -143,7 +143,7 @@\n data['meta']['base_url'] = base_url\n data['text'] = text\n with open(file_path, 'w', encoding='utf-8') as f:\n- f.write(str(data))\n+ json.dump(data, f)\n paths.append(file_path)\n \n return paths\n", "issue": "Crawler does not write JSON, but serializes the result dict to a string written in a .json file\nFirst of all, great work on Haystack! It\u2019s an incredible library and I really enjoy playing around with it!\r\n\r\nI noticed an odd behavior of the Crawler as in this example:\r\n\r\n```python\r\nfrom haystack.connector import Crawler\r\n\r\ncrawler = Crawler()\r\n# crawl Haystack docs, i.e. all pages that include haystack.deepset.ai/docs/\r\ndocs = crawler.crawl(urls=[\"https://haystack.deepset.ai/docs/latest/get_startedmd\"],\r\n output_dir=\"crawled_files\",\r\n filter_urls= [\"haystack\\.deepset\\.ai\\/docs\\/\"])\r\n```\r\n\r\nThe resulting file looks like this:\r\n\r\n```\r\n{'meta': {'url': 'https://haystack.deepset.ai/docs/latest/get_startedmd'}, 'text': 'Knowledge...\r\n```\r\n\r\nwhich is not valid JSON, as described in the docs. The Crawler rather simply serializes the data dict to string and writes it into a JSON file.\r\n\r\nA working import to load the result in a next step looks like this:\r\n\r\n```python\r\nimport ast\r\n\r\n# docs[0] being the first result from the crawl run\r\nwith open(docs[0], 'r', encoding='utf-8') as f:\r\n result = ast.literal_eval(f.read())\r\n```\r\n\r\ninstead of `json.read( ... )`.\r\n\r\nAs I don\u2019t have the overview of the entire lib and how the created text files are used across the different pipelines, I am hesitant to use propose a solution. So I am raising this as a slightly odd behavior for now. Happy to provide a fix though given guidance from other developers.\n", "before_files": [{"content": "import logging\nimport re\nfrom pathlib import Path\nfrom urllib.parse import urlparse\nfrom typing import List, Any, Optional, Dict, Tuple, Union\nfrom haystack.schema import Document, BaseComponent\nlogger = logging.getLogger(__name__)\n\n\nclass Crawler(BaseComponent):\n \"\"\"\n Crawl texts from a website so that we can use them later in Haystack as a corpus for search / question answering etc.\n\n **Example:**\n ```python\n | from haystack.connector import Crawler\n |\n | crawler = Crawler()\n | # crawl Haystack docs, i.e. all pages that include haystack.deepset.ai/docs/\n | docs = crawler.crawl(urls=[\"https://haystack.deepset.ai/docs/latest/get_startedmd\"],\n | output_dir=\"crawled_files\",\n | filter_urls= [\"haystack\\.deepset\\.ai\\/docs\\/\"])\n ```\n \"\"\"\n\n outgoing_edges = 1\n\n def __init__(self, output_dir: str, urls: Optional[List[str]] = None, crawler_depth: int = 1,\n filter_urls: Optional[List] = None, overwrite_existing_files=True):\n \"\"\"\n Init object with basic params for crawling (can be overwritten later).\n\n :param output_dir: Path for the directory to store files\n :param urls: List of http(s) address(es) (can also be supplied later when calling crawl())\n :param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options:\n 0: Only initial list of urls\n 1: Follow links found on the initial URLs (but no further)\n :param filter_urls: Optional list of regular expressions that the crawled URLs must comply with.\n All URLs not matching at least one of the regular expressions will be dropped.\n :param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content\n \"\"\"\n try:\n from webdriver_manager.chrome import ChromeDriverManager\n except ImportError:\n raise ImportError(\"Can't find package `webdriver-manager` \\n\"\n \"You can install it via `pip install webdriver-manager`\")\n\n try:\n from selenium import webdriver\n except ImportError:\n raise ImportError(\"Can't find package `selenium` \\n\"\n \"You can install it via `pip install selenium`\")\n\n options = webdriver.chrome.options.Options()\n options.add_argument('--headless')\n self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)\n self.urls = urls\n self.output_dir = output_dir\n self.crawler_depth = crawler_depth\n self.filter_urls = filter_urls\n self.overwrite_existing_files = overwrite_existing_files\n\n def crawl(self, output_dir: Union[str, Path, None] = None,\n urls: Optional[List[str]] = None,\n crawler_depth: Optional[int] = None,\n filter_urls: Optional[List] = None,\n overwrite_existing_files: Optional[bool] = None) -> List[Path]:\n \"\"\"\n Craw URL(s), extract the text from the HTML, create a Haystack Document object out of it and save it (one JSON\n file per URL, including text and basic meta data).\n You can optionally specify via `filter_urls` to only crawl URLs that match a certain pattern.\n All parameters are optional here and only meant to overwrite instance attributes at runtime.\n If no parameters are provided to this method, the instance attributes that were passed during __init__ will be used.\n\n :param output_dir: Path for the directory to store files\n :param urls: List of http addresses or single http address\n :param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options:\n 0: Only initial list of urls\n 1: Follow links found on the initial URLs (but no further)\n :param filter_urls: Optional list of regular expressions that the crawled URLs must comply with.\n All URLs not matching at least one of the regular expressions will be dropped.\n :param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content\n\n :return: List of paths where the crawled webpages got stored\n \"\"\"\n # use passed params or fallback to instance attributes\n urls = urls or self.urls\n if urls is None:\n raise ValueError(\"Got no urls to crawl. Set `urls` to a list of URLs in __init__(), crawl() or run(). `\")\n output_dir = output_dir or self.output_dir\n filter_urls = filter_urls or self.filter_urls\n if overwrite_existing_files is None:\n overwrite_existing_files = self.overwrite_existing_files\n if crawler_depth is None:\n crawler_depth = self.crawler_depth\n\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir(parents=True)\n\n is_not_empty = len(list(output_dir.rglob(\"*\"))) > 0\n if is_not_empty and not overwrite_existing_files:\n logger.info(\n f\"Found data stored in `{output_dir}`. Delete this first if you really want to fetch new data.\"\n )\n return []\n else:\n logger.info(f\"Fetching from {urls} to `{output_dir}`\")\n\n filepaths = []\n\n sub_links: Dict[str, List] = {}\n\n # don't go beyond the initial list of urls\n if crawler_depth == 0:\n filepaths += self._write_to_files(urls, output_dir=output_dir)\n # follow one level of sublinks\n elif crawler_depth == 1:\n for url_ in urls:\n existed_links: List = list(sum(list(sub_links.values()), []))\n sub_links[url_] = list(self._extract_sublinks_from_url(base_url=url_, filter_urls=filter_urls,\n existed_links=existed_links))\n for url in sub_links:\n filepaths += self._write_to_files(sub_links[url], output_dir=output_dir, base_url=url)\n\n return filepaths\n\n def _write_to_files(self, urls: List[str], output_dir: Path, base_url: str = None) -> List[Path]:\n paths = []\n for link in urls:\n logger.info(f\"writing contents from `{link}`\")\n self.driver.get(link)\n el = self.driver.find_element_by_tag_name('body')\n text = el.text\n\n link_split_values = link.replace('https://', '').split('/')\n file_name = f\"{'_'.join(link_split_values)}.json\"\n file_path = output_dir / file_name\n\n data = {}\n data['meta'] = {'url': link}\n if base_url:\n data['meta']['base_url'] = base_url\n data['text'] = text\n with open(file_path, 'w', encoding='utf-8') as f:\n f.write(str(data))\n paths.append(file_path)\n\n return paths\n\n def run(self, output_dir: Union[str, Path, None] = None, urls: Optional[List[str]] = None, # type: ignore\n crawler_depth: Optional[int] = None, filter_urls: Optional[List] = None, # type: ignore\n overwrite_existing_files: Optional[bool] = None, **kwargs) -> Tuple[Dict, str]: # type: ignore\n \"\"\"\n Method to be executed when the Crawler is used as a Node within a Haystack pipeline.\n\n :param output_dir: Path for the directory to store files\n :param urls: List of http addresses or single http address\n :param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options:\n 0: Only initial list of urls\n 1: Follow links found on the initial URLs (but no further)\n :param filter_urls: Optional list of regular expressions that the crawled URLs must comply with.\n All URLs not matching at least one of the regular expressions will be dropped.\n :param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content\n\n :return: Tuple({\"paths\": List of filepaths, ...}, Name of output edge)\n \"\"\"\n\n filepaths = self.crawl(urls=urls, output_dir=output_dir, crawler_depth=crawler_depth, filter_urls=filter_urls,\n overwrite_existing_files=overwrite_existing_files)\n results = {\"paths\": filepaths}\n results.update(**kwargs)\n return results, \"output_1\"\n\n @staticmethod\n def _is_internal_url(base_url: str, sub_link: str) -> bool:\n base_url_ = urlparse(base_url)\n sub_link_ = urlparse(sub_link)\n return base_url_.scheme == sub_link_.scheme and base_url_.netloc == sub_link_.netloc\n\n @staticmethod\n def _is_inpage_navigation(base_url: str, sub_link: str) -> bool:\n base_url_ = urlparse(base_url)\n sub_link_ = urlparse(sub_link)\n return base_url_.path == sub_link_.path and base_url_.netloc == sub_link_.netloc\n\n def _extract_sublinks_from_url(self, base_url: str,\n filter_urls: Optional[List] = None,\n existed_links: List = None) -> set:\n self.driver.get(base_url)\n a_elements = self.driver.find_elements_by_tag_name('a')\n sub_links = set()\n if not (existed_links and base_url in existed_links):\n if filter_urls:\n if re.compile('|'.join(filter_urls)).search(base_url):\n sub_links.add(base_url)\n\n for i in a_elements:\n sub_link = i.get_attribute('href')\n if not (existed_links and sub_link in existed_links):\n if self._is_internal_url(base_url=base_url, sub_link=sub_link) \\\n and (not self._is_inpage_navigation(base_url=base_url, sub_link=sub_link)):\n if filter_urls:\n if re.compile('|'.join(filter_urls)).search(sub_link):\n sub_links.add(sub_link)\n else:\n sub_links.add(sub_link)\n\n return sub_links\n", "path": "haystack/connector/crawler.py"}]}
3,535
280
gh_patches_debug_40218
rasdani/github-patches
git_diff
sopel-irc__sopel-927
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove feedparser dependency The weather module needlessly uses `feedparser` for some things, which adds a needless (python3-incompatible) dependency. It should be done with straight XML processing, instead. </issue> <code> [start of sopel/modules/weather.py] 1 # coding=utf8 2 """ 3 weather.py - Sopel Yahoo! Weather Module 4 Copyright 2008, Sean B. Palmer, inamidst.com 5 Copyright 2012, Edward Powell, embolalia.net 6 Licensed under the Eiffel Forum License 2. 7 8 http://sopel.chat 9 """ 10 from __future__ import unicode_literals 11 12 from sopel import web 13 from sopel.module import commands, example, NOLIMIT 14 15 import feedparser 16 import xmltodict 17 18 19 def woeid_search(query): 20 """ 21 Find the first Where On Earth ID for the given query. Result is the etree 22 node for the result, so that location data can still be retrieved. Returns 23 None if there is no result, or the woeid field is empty. 24 """ 25 query = 'q=select * from geo.placefinder where text="%s"' % query 26 body = web.get('http://query.yahooapis.com/v1/public/yql?' + query, 27 dont_decode=True) 28 parsed = xmltodict.parse(body).get('query') 29 results = parsed.get('results') 30 if results is None or results.get('Result') is None: 31 return None 32 if type(results.get('Result')) is list: 33 return results.get('Result')[0] 34 return results.get('Result') 35 36 37 def get_cover(parsed): 38 try: 39 condition = parsed.entries[0]['yweather_condition'] 40 except KeyError: 41 return 'unknown' 42 text = condition['text'] 43 # code = int(condition['code']) 44 # TODO parse code to get those little icon thingies. 45 return text 46 47 48 def get_temp(parsed): 49 try: 50 condition = parsed.entries[0]['yweather_condition'] 51 temp = int(condition['temp']) 52 except (KeyError, ValueError): 53 return 'unknown' 54 f = round((temp * 1.8) + 32, 2) 55 return (u'%d\u00B0C (%d\u00B0F)' % (temp, f)) 56 57 58 def get_humidity(parsed): 59 try: 60 humidity = parsed['feed']['yweather_atmosphere']['humidity'] 61 except (KeyError, ValueError): 62 return 'unknown' 63 return "Humidity: %s%%" % humidity 64 65 66 def get_wind(parsed): 67 try: 68 wind_data = parsed['feed']['yweather_wind'] 69 kph = float(wind_data['speed']) 70 m_s = float(round(kph / 3.6, 1)) 71 speed = int(round(kph / 1.852, 0)) 72 degrees = int(wind_data['direction']) 73 except (KeyError, ValueError): 74 return 'unknown' 75 76 if speed < 1: 77 description = 'Calm' 78 elif speed < 4: 79 description = 'Light air' 80 elif speed < 7: 81 description = 'Light breeze' 82 elif speed < 11: 83 description = 'Gentle breeze' 84 elif speed < 16: 85 description = 'Moderate breeze' 86 elif speed < 22: 87 description = 'Fresh breeze' 88 elif speed < 28: 89 description = 'Strong breeze' 90 elif speed < 34: 91 description = 'Near gale' 92 elif speed < 41: 93 description = 'Gale' 94 elif speed < 48: 95 description = 'Strong gale' 96 elif speed < 56: 97 description = 'Storm' 98 elif speed < 64: 99 description = 'Violent storm' 100 else: 101 description = 'Hurricane' 102 103 if (degrees <= 22.5) or (degrees > 337.5): 104 degrees = u'\u2193' 105 elif (degrees > 22.5) and (degrees <= 67.5): 106 degrees = u'\u2199' 107 elif (degrees > 67.5) and (degrees <= 112.5): 108 degrees = u'\u2190' 109 elif (degrees > 112.5) and (degrees <= 157.5): 110 degrees = u'\u2196' 111 elif (degrees > 157.5) and (degrees <= 202.5): 112 degrees = u'\u2191' 113 elif (degrees > 202.5) and (degrees <= 247.5): 114 degrees = u'\u2197' 115 elif (degrees > 247.5) and (degrees <= 292.5): 116 degrees = u'\u2192' 117 elif (degrees > 292.5) and (degrees <= 337.5): 118 degrees = u'\u2198' 119 120 return description + ' ' + str(m_s) + 'm/s (' + degrees + ')' 121 122 123 @commands('weather', 'wea') 124 @example('.weather London') 125 def weather(bot, trigger): 126 """.weather location - Show the weather at the given location.""" 127 128 location = trigger.group(2) 129 woeid = '' 130 if not location: 131 woeid = bot.db.get_nick_value(trigger.nick, 'woeid') 132 if not woeid: 133 return bot.msg(trigger.sender, "I don't know where you live. " + 134 'Give me a location, like .weather London, or tell me where you live by saying .setlocation London, for example.') 135 else: 136 location = location.strip() 137 woeid = bot.db.get_nick_value(location, 'woeid') 138 if woeid is None: 139 first_result = woeid_search(location) 140 if first_result is not None: 141 woeid = first_result.get('woeid') 142 143 if not woeid: 144 return bot.reply("I don't know where that is.") 145 146 query = web.urlencode({'w': woeid, 'u': 'c'}) 147 url = 'http://weather.yahooapis.com/forecastrss?' + query 148 parsed = feedparser.parse(url) 149 location = parsed['feed']['title'] 150 151 cover = get_cover(parsed) 152 temp = get_temp(parsed) 153 humidity = get_humidity(parsed) 154 wind = get_wind(parsed) 155 bot.say(u'%s: %s, %s, %s, %s' % (location, cover, temp, humidity, wind)) 156 157 158 @commands('setlocation', 'setwoeid') 159 @example('.setlocation Columbus, OH') 160 def update_woeid(bot, trigger): 161 """Set your default weather location.""" 162 if not trigger.group(2): 163 bot.reply('Give me a location, like "Washington, DC" or "London".') 164 return NOLIMIT 165 166 first_result = woeid_search(trigger.group(2)) 167 if first_result is None: 168 return bot.reply("I don't know where that is.") 169 170 woeid = first_result.get('woeid') 171 172 bot.db.set_nick_value(trigger.nick, 'woeid', woeid) 173 174 neighborhood = first_result.get('neighborhood').text or '' 175 if neighborhood: 176 neighborhood += ',' 177 city = first_result.get('city') or '' 178 state = first_result.get('state') or '' 179 country = first_result.get('country') or '' 180 uzip = first_result.get('uzip') or '' 181 bot.reply('I now have you at WOEID %s (%s %s, %s, %s %s.)' % 182 (woeid, neighborhood, city, state, country, uzip)) 183 [end of sopel/modules/weather.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sopel/modules/weather.py b/sopel/modules/weather.py --- a/sopel/modules/weather.py +++ b/sopel/modules/weather.py @@ -12,7 +12,6 @@ from sopel import web from sopel.module import commands, example, NOLIMIT -import feedparser import xmltodict @@ -36,10 +35,10 @@ def get_cover(parsed): try: - condition = parsed.entries[0]['yweather_condition'] + condition = parsed['channel']['item']['yweather:condition'] except KeyError: return 'unknown' - text = condition['text'] + text = condition['@text'] # code = int(condition['code']) # TODO parse code to get those little icon thingies. return text @@ -47,8 +46,8 @@ def get_temp(parsed): try: - condition = parsed.entries[0]['yweather_condition'] - temp = int(condition['temp']) + condition = parsed['channel']['item']['yweather:condition'] + temp = int(condition['@temp']) except (KeyError, ValueError): return 'unknown' f = round((temp * 1.8) + 32, 2) @@ -57,7 +56,7 @@ def get_humidity(parsed): try: - humidity = parsed['feed']['yweather_atmosphere']['humidity'] + humidity = parsed['channel']['yweather:atmosphere']['@humidity'] except (KeyError, ValueError): return 'unknown' return "Humidity: %s%%" % humidity @@ -65,11 +64,11 @@ def get_wind(parsed): try: - wind_data = parsed['feed']['yweather_wind'] - kph = float(wind_data['speed']) + wind_data = parsed['channel']['yweather:wind'] + kph = float(wind_data['@speed']) m_s = float(round(kph / 3.6, 1)) speed = int(round(kph / 1.852, 0)) - degrees = int(wind_data['direction']) + degrees = int(wind_data['@direction']) except (KeyError, ValueError): return 'unknown' @@ -144,9 +143,10 @@ return bot.reply("I don't know where that is.") query = web.urlencode({'w': woeid, 'u': 'c'}) - url = 'http://weather.yahooapis.com/forecastrss?' + query - parsed = feedparser.parse(url) - location = parsed['feed']['title'] + raw = web.get('http://weather.yahooapis.com/forecastrss?' + query, + dont_decode=True) + parsed = xmltodict.parse(raw).get('rss') + location = parsed.get('channel').get('title') cover = get_cover(parsed) temp = get_temp(parsed) @@ -171,7 +171,7 @@ bot.db.set_nick_value(trigger.nick, 'woeid', woeid) - neighborhood = first_result.get('neighborhood').text or '' + neighborhood = first_result.get('neighborhood') or '' if neighborhood: neighborhood += ',' city = first_result.get('city') or ''
{"golden_diff": "diff --git a/sopel/modules/weather.py b/sopel/modules/weather.py\n--- a/sopel/modules/weather.py\n+++ b/sopel/modules/weather.py\n@@ -12,7 +12,6 @@\n from sopel import web\n from sopel.module import commands, example, NOLIMIT\n \n-import feedparser\n import xmltodict\n \n \n@@ -36,10 +35,10 @@\n \n def get_cover(parsed):\n try:\n- condition = parsed.entries[0]['yweather_condition']\n+ condition = parsed['channel']['item']['yweather:condition']\n except KeyError:\n return 'unknown'\n- text = condition['text']\n+ text = condition['@text']\n # code = int(condition['code'])\n # TODO parse code to get those little icon thingies.\n return text\n@@ -47,8 +46,8 @@\n \n def get_temp(parsed):\n try:\n- condition = parsed.entries[0]['yweather_condition']\n- temp = int(condition['temp'])\n+ condition = parsed['channel']['item']['yweather:condition']\n+ temp = int(condition['@temp'])\n except (KeyError, ValueError):\n return 'unknown'\n f = round((temp * 1.8) + 32, 2)\n@@ -57,7 +56,7 @@\n \n def get_humidity(parsed):\n try:\n- humidity = parsed['feed']['yweather_atmosphere']['humidity']\n+ humidity = parsed['channel']['yweather:atmosphere']['@humidity']\n except (KeyError, ValueError):\n return 'unknown'\n return \"Humidity: %s%%\" % humidity\n@@ -65,11 +64,11 @@\n \n def get_wind(parsed):\n try:\n- wind_data = parsed['feed']['yweather_wind']\n- kph = float(wind_data['speed'])\n+ wind_data = parsed['channel']['yweather:wind']\n+ kph = float(wind_data['@speed'])\n m_s = float(round(kph / 3.6, 1))\n speed = int(round(kph / 1.852, 0))\n- degrees = int(wind_data['direction'])\n+ degrees = int(wind_data['@direction'])\n except (KeyError, ValueError):\n return 'unknown'\n \n@@ -144,9 +143,10 @@\n return bot.reply(\"I don't know where that is.\")\n \n query = web.urlencode({'w': woeid, 'u': 'c'})\n- url = 'http://weather.yahooapis.com/forecastrss?' + query\n- parsed = feedparser.parse(url)\n- location = parsed['feed']['title']\n+ raw = web.get('http://weather.yahooapis.com/forecastrss?' + query, \n+ dont_decode=True)\n+ parsed = xmltodict.parse(raw).get('rss')\n+ location = parsed.get('channel').get('title')\n \n cover = get_cover(parsed)\n temp = get_temp(parsed)\n@@ -171,7 +171,7 @@\n \n bot.db.set_nick_value(trigger.nick, 'woeid', woeid)\n \n- neighborhood = first_result.get('neighborhood').text or ''\n+ neighborhood = first_result.get('neighborhood') or ''\n if neighborhood:\n neighborhood += ','\n city = first_result.get('city') or ''\n", "issue": "Remove feedparser dependency\nThe weather module needlessly uses `feedparser` for some things, which adds a needless (python3-incompatible) dependency. It should be done with straight XML processing, instead.\n\n", "before_files": [{"content": "# coding=utf8\n\"\"\"\nweather.py - Sopel Yahoo! Weather Module\nCopyright 2008, Sean B. Palmer, inamidst.com\nCopyright 2012, Edward Powell, embolalia.net\nLicensed under the Eiffel Forum License 2.\n\nhttp://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom sopel import web\nfrom sopel.module import commands, example, NOLIMIT\n\nimport feedparser\nimport xmltodict\n\n\ndef woeid_search(query):\n \"\"\"\n Find the first Where On Earth ID for the given query. Result is the etree\n node for the result, so that location data can still be retrieved. Returns\n None if there is no result, or the woeid field is empty.\n \"\"\"\n query = 'q=select * from geo.placefinder where text=\"%s\"' % query\n body = web.get('http://query.yahooapis.com/v1/public/yql?' + query,\n dont_decode=True)\n parsed = xmltodict.parse(body).get('query')\n results = parsed.get('results')\n if results is None or results.get('Result') is None:\n return None\n if type(results.get('Result')) is list:\n return results.get('Result')[0]\n return results.get('Result')\n\n\ndef get_cover(parsed):\n try:\n condition = parsed.entries[0]['yweather_condition']\n except KeyError:\n return 'unknown'\n text = condition['text']\n # code = int(condition['code'])\n # TODO parse code to get those little icon thingies.\n return text\n\n\ndef get_temp(parsed):\n try:\n condition = parsed.entries[0]['yweather_condition']\n temp = int(condition['temp'])\n except (KeyError, ValueError):\n return 'unknown'\n f = round((temp * 1.8) + 32, 2)\n return (u'%d\\u00B0C (%d\\u00B0F)' % (temp, f))\n\n\ndef get_humidity(parsed):\n try:\n humidity = parsed['feed']['yweather_atmosphere']['humidity']\n except (KeyError, ValueError):\n return 'unknown'\n return \"Humidity: %s%%\" % humidity\n\n\ndef get_wind(parsed):\n try:\n wind_data = parsed['feed']['yweather_wind']\n kph = float(wind_data['speed'])\n m_s = float(round(kph / 3.6, 1))\n speed = int(round(kph / 1.852, 0))\n degrees = int(wind_data['direction'])\n except (KeyError, ValueError):\n return 'unknown'\n\n if speed < 1:\n description = 'Calm'\n elif speed < 4:\n description = 'Light air'\n elif speed < 7:\n description = 'Light breeze'\n elif speed < 11:\n description = 'Gentle breeze'\n elif speed < 16:\n description = 'Moderate breeze'\n elif speed < 22:\n description = 'Fresh breeze'\n elif speed < 28:\n description = 'Strong breeze'\n elif speed < 34:\n description = 'Near gale'\n elif speed < 41:\n description = 'Gale'\n elif speed < 48:\n description = 'Strong gale'\n elif speed < 56:\n description = 'Storm'\n elif speed < 64:\n description = 'Violent storm'\n else:\n description = 'Hurricane'\n\n if (degrees <= 22.5) or (degrees > 337.5):\n degrees = u'\\u2193'\n elif (degrees > 22.5) and (degrees <= 67.5):\n degrees = u'\\u2199'\n elif (degrees > 67.5) and (degrees <= 112.5):\n degrees = u'\\u2190'\n elif (degrees > 112.5) and (degrees <= 157.5):\n degrees = u'\\u2196'\n elif (degrees > 157.5) and (degrees <= 202.5):\n degrees = u'\\u2191'\n elif (degrees > 202.5) and (degrees <= 247.5):\n degrees = u'\\u2197'\n elif (degrees > 247.5) and (degrees <= 292.5):\n degrees = u'\\u2192'\n elif (degrees > 292.5) and (degrees <= 337.5):\n degrees = u'\\u2198'\n\n return description + ' ' + str(m_s) + 'm/s (' + degrees + ')'\n\n\n@commands('weather', 'wea')\n@example('.weather London')\ndef weather(bot, trigger):\n \"\"\".weather location - Show the weather at the given location.\"\"\"\n\n location = trigger.group(2)\n woeid = ''\n if not location:\n woeid = bot.db.get_nick_value(trigger.nick, 'woeid')\n if not woeid:\n return bot.msg(trigger.sender, \"I don't know where you live. \" +\n 'Give me a location, like .weather London, or tell me where you live by saying .setlocation London, for example.')\n else:\n location = location.strip()\n woeid = bot.db.get_nick_value(location, 'woeid')\n if woeid is None:\n first_result = woeid_search(location)\n if first_result is not None:\n woeid = first_result.get('woeid')\n\n if not woeid:\n return bot.reply(\"I don't know where that is.\")\n\n query = web.urlencode({'w': woeid, 'u': 'c'})\n url = 'http://weather.yahooapis.com/forecastrss?' + query\n parsed = feedparser.parse(url)\n location = parsed['feed']['title']\n\n cover = get_cover(parsed)\n temp = get_temp(parsed)\n humidity = get_humidity(parsed)\n wind = get_wind(parsed)\n bot.say(u'%s: %s, %s, %s, %s' % (location, cover, temp, humidity, wind))\n\n\n@commands('setlocation', 'setwoeid')\n@example('.setlocation Columbus, OH')\ndef update_woeid(bot, trigger):\n \"\"\"Set your default weather location.\"\"\"\n if not trigger.group(2):\n bot.reply('Give me a location, like \"Washington, DC\" or \"London\".')\n return NOLIMIT\n\n first_result = woeid_search(trigger.group(2))\n if first_result is None:\n return bot.reply(\"I don't know where that is.\")\n\n woeid = first_result.get('woeid')\n\n bot.db.set_nick_value(trigger.nick, 'woeid', woeid)\n\n neighborhood = first_result.get('neighborhood').text or ''\n if neighborhood:\n neighborhood += ','\n city = first_result.get('city') or ''\n state = first_result.get('state') or ''\n country = first_result.get('country') or ''\n uzip = first_result.get('uzip') or ''\n bot.reply('I now have you at WOEID %s (%s %s, %s, %s %s.)' %\n (woeid, neighborhood, city, state, country, uzip))\n", "path": "sopel/modules/weather.py"}]}
2,662
748
gh_patches_debug_8616
rasdani/github-patches
git_diff
googleapis__google-api-python-client-1271
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove duplicate docs generation In `synth.py` we have a `nox` session to generate the docs [here](https://github.com/googleapis/google-api-python-client/blob/master/synth.py#L36). The same python script is running as part of the Github action in #1187, so we should remove the `docs` session from `synth.py` and `noxfile.py`. </issue> <code> [start of noxfile.py] 1 2 # Copyright 2020 Google LLC 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 import sys 17 18 import nox 19 20 test_dependencies = [ 21 "django>=2.0.0", 22 "google-auth", 23 "google-auth-httplib2", 24 "mox", 25 "parameterized", 26 "pyopenssl", 27 "pytest", 28 "pytest-cov", 29 "webtest", 30 "coverage", 31 "unittest2", 32 "mock", 33 ] 34 35 36 @nox.session(python=["3.7"]) 37 def lint(session): 38 session.install("flake8") 39 session.run( 40 "flake8", 41 "googleapiclient", 42 "tests", 43 "--count", 44 "--select=E9,F63,F7,F82", 45 "--show-source", 46 "--statistics", 47 ) 48 49 50 @nox.session(python=["3.6", "3.7", "3.8", "3.9"]) 51 @nox.parametrize( 52 "oauth2client", 53 [ 54 "oauth2client<2dev", 55 "oauth2client>=2,<=3dev", 56 "oauth2client>=3,<=4dev", 57 "oauth2client>=4,<=5dev", 58 ], 59 ) 60 def unit(session, oauth2client): 61 session.install(*test_dependencies) 62 session.install(oauth2client) 63 session.install('.') 64 65 # Run py.test against the unit tests. 66 session.run( 67 "py.test", 68 "--quiet", 69 "--cov=googleapiclient", 70 "--cov=tests", 71 "--cov-append", 72 "--cov-config=.coveragerc", 73 "--cov-report=", 74 "--cov-fail-under=85", 75 "tests", 76 *session.posargs, 77 ) 78 79 80 @nox.session(python="3.6") 81 def docs(session): 82 session.install('.') 83 session.run("python", "describe.py") [end of noxfile.py] [start of synth.py] 1 # Copyright 2020 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import synthtool as s 16 from synthtool import gcp 17 18 19 common = gcp.CommonTemplates() 20 21 # ---------------------------------------------------------------------------- 22 # Add templated files 23 # ---------------------------------------------------------------------------- 24 templated_files = common.py_library() 25 26 # Copy kokoro configs. 27 # Docs are excluded as repo docs cannot currently be generated using sphinx. 28 s.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh']) 29 30 # Also move issue templates 31 s.move(templated_files / '.github') 32 33 # ---------------------------------------------------------------------------- 34 # Generate docs 35 # ---------------------------------------------------------------------------- 36 s.shell.run(["nox", "-s", "docs"], hide_output=False) 37 [end of synth.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/noxfile.py b/noxfile.py --- a/noxfile.py +++ b/noxfile.py @@ -75,9 +75,3 @@ "tests", *session.posargs, ) - - [email protected](python="3.6") -def docs(session): - session.install('.') - session.run("python", "describe.py") \ No newline at end of file diff --git a/synth.py b/synth.py --- a/synth.py +++ b/synth.py @@ -29,8 +29,3 @@ # Also move issue templates s.move(templated_files / '.github') - -# ---------------------------------------------------------------------------- -# Generate docs -# ---------------------------------------------------------------------------- -s.shell.run(["nox", "-s", "docs"], hide_output=False)
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -75,9 +75,3 @@\n \"tests\",\n *session.posargs,\n )\n-\n-\[email protected](python=\"3.6\")\n-def docs(session):\n- session.install('.')\n- session.run(\"python\", \"describe.py\")\n\\ No newline at end of file\ndiff --git a/synth.py b/synth.py\n--- a/synth.py\n+++ b/synth.py\n@@ -29,8 +29,3 @@\n \n # Also move issue templates\n s.move(templated_files / '.github')\n-\n-# ----------------------------------------------------------------------------\n-# Generate docs\n-# ----------------------------------------------------------------------------\n-s.shell.run([\"nox\", \"-s\", \"docs\"], hide_output=False)\n", "issue": "Remove duplicate docs generation\nIn `synth.py` we have a `nox` session to generate the docs [here](https://github.com/googleapis/google-api-python-client/blob/master/synth.py#L36). The same python script is running as part of the Github action in #1187, so we should remove the `docs` session from `synth.py` and `noxfile.py`.\n", "before_files": [{"content": "\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nimport nox\n\ntest_dependencies = [\n \"django>=2.0.0\",\n \"google-auth\",\n \"google-auth-httplib2\",\n \"mox\",\n \"parameterized\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n \"unittest2\",\n \"mock\",\n]\n\n\[email protected](python=[\"3.7\"])\ndef lint(session):\n session.install(\"flake8\")\n session.run(\n \"flake8\",\n \"googleapiclient\",\n \"tests\",\n \"--count\",\n \"--select=E9,F63,F7,F82\",\n \"--show-source\",\n \"--statistics\",\n )\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\"])\[email protected](\n \"oauth2client\",\n [\n \"oauth2client<2dev\",\n \"oauth2client>=2,<=3dev\",\n \"oauth2client>=3,<=4dev\",\n \"oauth2client>=4,<=5dev\",\n ],\n)\ndef unit(session, oauth2client):\n session.install(*test_dependencies)\n session.install(oauth2client)\n session.install('.')\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=googleapiclient\",\n \"--cov=tests\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=85\",\n \"tests\",\n *session.posargs,\n )\n\n\[email protected](python=\"3.6\")\ndef docs(session):\n session.install('.')\n session.run(\"python\", \"describe.py\")", "path": "noxfile.py"}, {"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport synthtool as s\nfrom synthtool import gcp\n\n\ncommon = gcp.CommonTemplates()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library()\n\n# Copy kokoro configs.\n# Docs are excluded as repo docs cannot currently be generated using sphinx.\ns.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])\n\n# Also move issue templates\ns.move(templated_files / '.github')\n\n# ----------------------------------------------------------------------------\n# Generate docs\n# ----------------------------------------------------------------------------\ns.shell.run([\"nox\", \"-s\", \"docs\"], hide_output=False)\n", "path": "synth.py"}]}
1,620
174
gh_patches_debug_9906
rasdani/github-patches
git_diff
freedomofpress__securedrop-4927
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [1.1.0-rc4] "Unable to create virtualenv. Check network settings and try again" (Tested on a Tails 3.16 Admin Workstation by checking out 1.1.0-rc4 tag, without updating my servers.) As expected, running `securedrop-admin` commands triggered the "run setup" step. However, the `securedrop-admin setup` step itself did not complete successfully; it went pretty far along but finally failed with this error: "Unable to create virtualenv. Check network settings and try again" Tor seems to be working fine. Possibly intermittent issues but good to warn users about and have mitigation instructions if it is likely to arise during updates. [1.1.0-rc4] "Unable to create virtualenv. Check network settings and try again" (Tested on a Tails 3.16 Admin Workstation by checking out 1.1.0-rc4 tag, without updating my servers.) As expected, running `securedrop-admin` commands triggered the "run setup" step. However, the `securedrop-admin setup` step itself did not complete successfully; it went pretty far along but finally failed with this error: "Unable to create virtualenv. Check network settings and try again" Tor seems to be working fine. Possibly intermittent issues but good to warn users about and have mitigation instructions if it is likely to arise during updates. </issue> <code> [start of admin/bootstrap.py] 1 # -*- mode: python; coding: utf-8 -*- 2 # 3 # Copyright (C) 2013-2018 Freedom of the Press Foundation & al 4 # Copyright (C) 2018 Loic Dachary <[email protected]> 5 # 6 # This program is free software: you can redistribute it and/or modify 7 # it under the terms of the GNU General Public License as published by 8 # the Free Software Foundation, either version 3 of the License, or 9 # (at your option) any later version. 10 # 11 # This program is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU General Public License for more details. 15 # 16 # You should have received a copy of the GNU General Public License 17 # along with this program. If not, see <http://www.gnu.org/licenses/>. 18 # 19 20 import argparse 21 import logging 22 import os 23 import shutil 24 import subprocess 25 import sys 26 27 sdlog = logging.getLogger(__name__) 28 29 DIR = os.path.dirname(os.path.realpath(__file__)) 30 VENV_DIR = os.path.join(DIR, ".venv3") 31 32 33 def setup_logger(verbose=False): 34 """ Configure logging handler """ 35 # Set default level on parent 36 sdlog.setLevel(logging.DEBUG) 37 level = logging.DEBUG if verbose else logging.INFO 38 39 stdout = logging.StreamHandler(sys.stdout) 40 stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) 41 stdout.setLevel(level) 42 sdlog.addHandler(stdout) 43 44 45 def run_command(command): 46 """ 47 Wrapper function to display stdout for running command, 48 similar to how shelling out in a Bash script displays rolling output. 49 50 Yields a list of the stdout from the `command`, and raises a 51 CalledProcessError if `command` returns non-zero. 52 """ 53 popen = subprocess.Popen(command, 54 stdout=subprocess.PIPE, 55 stderr=subprocess.STDOUT) 56 for stdout_line in iter(popen.stdout.readline, b""): 57 yield stdout_line 58 popen.stdout.close() 59 return_code = popen.wait() 60 if return_code: 61 raise subprocess.CalledProcessError(return_code, command) 62 63 64 def is_tails(): 65 try: 66 id = subprocess.check_output('lsb_release --id --short', 67 shell=True).strip() 68 except subprocess.CalledProcessError: 69 id = None 70 71 # dirty hack to unreliably detect Tails 4.0~beta2 72 if id == b'Debian': 73 if os.uname()[1] == 'amnesia': 74 id = 'Tails' 75 76 return id == 'Tails' 77 78 79 def clean_up_tails3_venv(virtualenv_dir=VENV_DIR): 80 """ 81 Tails 3.x, based on debian stretch uses libpython3.5, whereas Tails 4.x is 82 based on Debian Buster and uses libpython3.7. This means that the Tails 3.x 83 virtualenv will not work under Tails 4.x, and will need to be destroyed and 84 rebuilt. We can detect if the version of libpython is 3.5 in the 85 admin/.venv3/ folder, and delete it if that's the case. This will ensure a 86 smooth upgrade from Tails 3.x to Tails 4.x. 87 """ 88 if is_tails(): 89 try: 90 dist = subprocess.check_output('lsb_release --codename --short', 91 shell=True).strip() 92 except subprocess.CalledProcessError: 93 dist = None 94 95 # tails4 is based on buster 96 if dist == b'buster': 97 python_lib_path = os.path.join(virtualenv_dir, "lib/python3.5") 98 if os.path.exists(os.path.join(python_lib_path)): 99 sdlog.info( 100 "Tails 3 Python 3 virtualenv detected. " 101 "Removing it." 102 ) 103 shutil.rmtree(virtualenv_dir) 104 sdlog.info("Tails 3 Python 3 virtualenv deleted.") 105 106 107 def checkenv(args): 108 clean_up_tails3_venv(VENV_DIR) 109 if not os.path.exists(os.path.join(VENV_DIR, "bin/activate")): 110 sdlog.error('Please run "securedrop-admin setup".') 111 sys.exit(1) 112 113 114 def maybe_torify(): 115 if is_tails(): 116 return ['torify'] 117 else: 118 return [] 119 120 121 def install_apt_dependencies(args): 122 """ 123 Install apt dependencies in Tails. In order to install Ansible in 124 a virtualenv, first there are a number of Python prerequisites. 125 """ 126 sdlog.info("Installing SecureDrop Admin dependencies") 127 sdlog.info(("You'll be prompted for the temporary Tails admin password," 128 " which was set on Tails login screen")) 129 130 apt_command = ['sudo', 'su', '-c', 131 "apt-get update && \ 132 apt-get -q -o=Dpkg::Use-Pty=0 install -y \ 133 python3-virtualenv \ 134 python3-yaml \ 135 python3-pip \ 136 ccontrol \ 137 virtualenv \ 138 libffi-dev \ 139 libssl-dev \ 140 libpython3-dev", 141 ] 142 143 try: 144 # Print command results in real-time, to keep Admin apprised 145 # of progress during long-running command. 146 for output_line in run_command(apt_command): 147 print(output_line.decode('utf-8').rstrip()) 148 except subprocess.CalledProcessError: 149 # Tails supports apt persistence, which was used by SecureDrop 150 # under Tails 2.x. If updates are being applied, don't try to pile 151 # on with more apt requests. 152 sdlog.error(("Failed to install apt dependencies. Check network" 153 " connection and try again.")) 154 raise 155 156 157 def envsetup(args): 158 """Installs Admin tooling required for managing SecureDrop. Specifically: 159 160 * updates apt-cache 161 * installs apt packages for Python virtualenv 162 * creates virtualenv 163 * installs pip packages inside virtualenv 164 165 The virtualenv is created within the Persistence volume in Tails, so that 166 Ansible is available to the Admin on subsequent boots without requiring 167 installation of packages again. 168 """ 169 # clean up tails 3.x venv when migrating to tails 4.x 170 clean_up_tails3_venv(VENV_DIR) 171 172 # virtualenv doesnt exist? Install dependencies and create 173 if not os.path.exists(VENV_DIR): 174 175 install_apt_dependencies(args) 176 177 # Technically you can create a virtualenv from within python 178 # but pip can only be run over tor on tails, and debugging that 179 # along with instaling a third-party dependency is not worth 180 # the effort here. 181 sdlog.info("Setting up virtualenv") 182 try: 183 sdlog.debug(subprocess.check_output( 184 maybe_torify() + ['virtualenv', '--python=python3', VENV_DIR], 185 stderr=subprocess.STDOUT)) 186 except subprocess.CalledProcessError as e: 187 sdlog.debug(e.output) 188 sdlog.error(("Unable to create virtualenv. Check network settings" 189 " and try again.")) 190 raise 191 else: 192 sdlog.info("Virtualenv already exists, not creating") 193 194 install_pip_dependencies(args) 195 if os.path.exists(os.path.join(DIR, 'setup.py')): 196 install_pip_self(args) 197 198 sdlog.info("Finished installing SecureDrop dependencies") 199 200 201 def install_pip_self(args): 202 pip_install_cmd = [ 203 os.path.join(VENV_DIR, 'bin', 'pip3'), 204 'install', '-e', DIR 205 ] 206 try: 207 subprocess.check_output(maybe_torify() + pip_install_cmd, 208 stderr=subprocess.STDOUT) 209 except subprocess.CalledProcessError as e: 210 sdlog.debug(e.output) 211 sdlog.error("Unable to install self, run with -v for more information") 212 raise 213 214 215 def install_pip_dependencies(args, pip_install_cmd=[ 216 os.path.join(VENV_DIR, 'bin', 'pip3'), 217 'install', 218 # Specify requirements file. 219 '-r', os.path.join(DIR, 'requirements.txt'), 220 '--require-hashes', 221 # Make sure to upgrade packages only if necessary. 222 '-U', '--upgrade-strategy', 'only-if-needed', 223 ]): 224 """ 225 Install Python dependencies via pip into virtualenv. 226 """ 227 228 sdlog.info("Checking Python dependencies for securedrop-admin") 229 try: 230 pip_output = subprocess.check_output(maybe_torify() + pip_install_cmd, 231 stderr=subprocess.STDOUT) 232 except subprocess.CalledProcessError as e: 233 sdlog.debug(e.output) 234 sdlog.error(("Failed to install pip dependencies. Check network" 235 " connection and try again.")) 236 raise 237 238 sdlog.debug(pip_output) 239 if "Successfully installed" in str(pip_output): 240 sdlog.info("Python dependencies for securedrop-admin upgraded") 241 else: 242 sdlog.info("Python dependencies for securedrop-admin are up-to-date") 243 244 245 def parse_argv(argv): 246 parser = argparse.ArgumentParser() 247 parser.add_argument('-v', action='store_true', default=False, 248 help="Increase verbosity on output") 249 parser.set_defaults(func=envsetup) 250 251 subparsers = parser.add_subparsers() 252 253 envsetup_parser = subparsers.add_parser( 254 'envsetup', 255 help='Set up the admin virtualenv.' 256 ) 257 envsetup_parser.set_defaults(func=envsetup) 258 259 checkenv_parser = subparsers.add_parser( 260 'checkenv', 261 help='Check that the admin virtualenv is properly set up.' 262 ) 263 checkenv_parser.set_defaults(func=checkenv) 264 265 return parser.parse_args(argv) 266 267 268 if __name__ == "__main__": 269 args = parse_argv(sys.argv[1:]) 270 setup_logger(args.v) 271 272 try: 273 args.func(args) 274 except Exception: 275 sys.exit(1) 276 else: 277 sys.exit(0) 278 [end of admin/bootstrap.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/admin/bootstrap.py b/admin/bootstrap.py --- a/admin/bootstrap.py +++ b/admin/bootstrap.py @@ -64,12 +64,12 @@ def is_tails(): try: id = subprocess.check_output('lsb_release --id --short', - shell=True).strip() + shell=True).decode('utf-8').strip() except subprocess.CalledProcessError: id = None # dirty hack to unreliably detect Tails 4.0~beta2 - if id == b'Debian': + if id == 'Debian': if os.uname()[1] == 'amnesia': id = 'Tails'
{"golden_diff": "diff --git a/admin/bootstrap.py b/admin/bootstrap.py\n--- a/admin/bootstrap.py\n+++ b/admin/bootstrap.py\n@@ -64,12 +64,12 @@\n def is_tails():\n try:\n id = subprocess.check_output('lsb_release --id --short',\n- shell=True).strip()\n+ shell=True).decode('utf-8').strip()\n except subprocess.CalledProcessError:\n id = None\n \n # dirty hack to unreliably detect Tails 4.0~beta2\n- if id == b'Debian':\n+ if id == 'Debian':\n if os.uname()[1] == 'amnesia':\n id = 'Tails'\n", "issue": "[1.1.0-rc4] \"Unable to create virtualenv. Check network settings and try again\"\n(Tested on a Tails 3.16 Admin Workstation by checking out 1.1.0-rc4 tag, without updating my servers.)\r\n\r\nAs expected, running `securedrop-admin` commands triggered the \"run setup\" step. However, the `securedrop-admin setup` step itself did not complete successfully; it went pretty far along but finally failed with this error:\r\n\r\n\"Unable to create virtualenv. Check network settings and try again\"\r\n\r\nTor seems to be working fine. Possibly intermittent issues but good to warn users about and have mitigation instructions if it is likely to arise during updates.\n[1.1.0-rc4] \"Unable to create virtualenv. Check network settings and try again\"\n(Tested on a Tails 3.16 Admin Workstation by checking out 1.1.0-rc4 tag, without updating my servers.)\r\n\r\nAs expected, running `securedrop-admin` commands triggered the \"run setup\" step. However, the `securedrop-admin setup` step itself did not complete successfully; it went pretty far along but finally failed with this error:\r\n\r\n\"Unable to create virtualenv. Check network settings and try again\"\r\n\r\nTor seems to be working fine. Possibly intermittent issues but good to warn users about and have mitigation instructions if it is likely to arise during updates.\n", "before_files": [{"content": "# -*- mode: python; coding: utf-8 -*-\n#\n# Copyright (C) 2013-2018 Freedom of the Press Foundation & al\n# Copyright (C) 2018 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\nimport argparse\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nsdlog = logging.getLogger(__name__)\n\nDIR = os.path.dirname(os.path.realpath(__file__))\nVENV_DIR = os.path.join(DIR, \".venv3\")\n\n\ndef setup_logger(verbose=False):\n \"\"\" Configure logging handler \"\"\"\n # Set default level on parent\n sdlog.setLevel(logging.DEBUG)\n level = logging.DEBUG if verbose else logging.INFO\n\n stdout = logging.StreamHandler(sys.stdout)\n stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n stdout.setLevel(level)\n sdlog.addHandler(stdout)\n\n\ndef run_command(command):\n \"\"\"\n Wrapper function to display stdout for running command,\n similar to how shelling out in a Bash script displays rolling output.\n\n Yields a list of the stdout from the `command`, and raises a\n CalledProcessError if `command` returns non-zero.\n \"\"\"\n popen = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n for stdout_line in iter(popen.stdout.readline, b\"\"):\n yield stdout_line\n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, command)\n\n\ndef is_tails():\n try:\n id = subprocess.check_output('lsb_release --id --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n id = None\n\n # dirty hack to unreliably detect Tails 4.0~beta2\n if id == b'Debian':\n if os.uname()[1] == 'amnesia':\n id = 'Tails'\n\n return id == 'Tails'\n\n\ndef clean_up_tails3_venv(virtualenv_dir=VENV_DIR):\n \"\"\"\n Tails 3.x, based on debian stretch uses libpython3.5, whereas Tails 4.x is\n based on Debian Buster and uses libpython3.7. This means that the Tails 3.x\n virtualenv will not work under Tails 4.x, and will need to be destroyed and\n rebuilt. We can detect if the version of libpython is 3.5 in the\n admin/.venv3/ folder, and delete it if that's the case. This will ensure a\n smooth upgrade from Tails 3.x to Tails 4.x.\n \"\"\"\n if is_tails():\n try:\n dist = subprocess.check_output('lsb_release --codename --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n dist = None\n\n # tails4 is based on buster\n if dist == b'buster':\n python_lib_path = os.path.join(virtualenv_dir, \"lib/python3.5\")\n if os.path.exists(os.path.join(python_lib_path)):\n sdlog.info(\n \"Tails 3 Python 3 virtualenv detected. \"\n \"Removing it.\"\n )\n shutil.rmtree(virtualenv_dir)\n sdlog.info(\"Tails 3 Python 3 virtualenv deleted.\")\n\n\ndef checkenv(args):\n clean_up_tails3_venv(VENV_DIR)\n if not os.path.exists(os.path.join(VENV_DIR, \"bin/activate\")):\n sdlog.error('Please run \"securedrop-admin setup\".')\n sys.exit(1)\n\n\ndef maybe_torify():\n if is_tails():\n return ['torify']\n else:\n return []\n\n\ndef install_apt_dependencies(args):\n \"\"\"\n Install apt dependencies in Tails. In order to install Ansible in\n a virtualenv, first there are a number of Python prerequisites.\n \"\"\"\n sdlog.info(\"Installing SecureDrop Admin dependencies\")\n sdlog.info((\"You'll be prompted for the temporary Tails admin password,\"\n \" which was set on Tails login screen\"))\n\n apt_command = ['sudo', 'su', '-c',\n \"apt-get update && \\\n apt-get -q -o=Dpkg::Use-Pty=0 install -y \\\n python3-virtualenv \\\n python3-yaml \\\n python3-pip \\\n ccontrol \\\n virtualenv \\\n libffi-dev \\\n libssl-dev \\\n libpython3-dev\",\n ]\n\n try:\n # Print command results in real-time, to keep Admin apprised\n # of progress during long-running command.\n for output_line in run_command(apt_command):\n print(output_line.decode('utf-8').rstrip())\n except subprocess.CalledProcessError:\n # Tails supports apt persistence, which was used by SecureDrop\n # under Tails 2.x. If updates are being applied, don't try to pile\n # on with more apt requests.\n sdlog.error((\"Failed to install apt dependencies. Check network\"\n \" connection and try again.\"))\n raise\n\n\ndef envsetup(args):\n \"\"\"Installs Admin tooling required for managing SecureDrop. Specifically:\n\n * updates apt-cache\n * installs apt packages for Python virtualenv\n * creates virtualenv\n * installs pip packages inside virtualenv\n\n The virtualenv is created within the Persistence volume in Tails, so that\n Ansible is available to the Admin on subsequent boots without requiring\n installation of packages again.\n \"\"\"\n # clean up tails 3.x venv when migrating to tails 4.x\n clean_up_tails3_venv(VENV_DIR)\n\n # virtualenv doesnt exist? Install dependencies and create\n if not os.path.exists(VENV_DIR):\n\n install_apt_dependencies(args)\n\n # Technically you can create a virtualenv from within python\n # but pip can only be run over tor on tails, and debugging that\n # along with instaling a third-party dependency is not worth\n # the effort here.\n sdlog.info(\"Setting up virtualenv\")\n try:\n sdlog.debug(subprocess.check_output(\n maybe_torify() + ['virtualenv', '--python=python3', VENV_DIR],\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Unable to create virtualenv. Check network settings\"\n \" and try again.\"))\n raise\n else:\n sdlog.info(\"Virtualenv already exists, not creating\")\n\n install_pip_dependencies(args)\n if os.path.exists(os.path.join(DIR, 'setup.py')):\n install_pip_self(args)\n\n sdlog.info(\"Finished installing SecureDrop dependencies\")\n\n\ndef install_pip_self(args):\n pip_install_cmd = [\n os.path.join(VENV_DIR, 'bin', 'pip3'),\n 'install', '-e', DIR\n ]\n try:\n subprocess.check_output(maybe_torify() + pip_install_cmd,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error(\"Unable to install self, run with -v for more information\")\n raise\n\n\ndef install_pip_dependencies(args, pip_install_cmd=[\n os.path.join(VENV_DIR, 'bin', 'pip3'),\n 'install',\n # Specify requirements file.\n '-r', os.path.join(DIR, 'requirements.txt'),\n '--require-hashes',\n # Make sure to upgrade packages only if necessary.\n '-U', '--upgrade-strategy', 'only-if-needed',\n]):\n \"\"\"\n Install Python dependencies via pip into virtualenv.\n \"\"\"\n\n sdlog.info(\"Checking Python dependencies for securedrop-admin\")\n try:\n pip_output = subprocess.check_output(maybe_torify() + pip_install_cmd,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Failed to install pip dependencies. Check network\"\n \" connection and try again.\"))\n raise\n\n sdlog.debug(pip_output)\n if \"Successfully installed\" in str(pip_output):\n sdlog.info(\"Python dependencies for securedrop-admin upgraded\")\n else:\n sdlog.info(\"Python dependencies for securedrop-admin are up-to-date\")\n\n\ndef parse_argv(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', action='store_true', default=False,\n help=\"Increase verbosity on output\")\n parser.set_defaults(func=envsetup)\n\n subparsers = parser.add_subparsers()\n\n envsetup_parser = subparsers.add_parser(\n 'envsetup',\n help='Set up the admin virtualenv.'\n )\n envsetup_parser.set_defaults(func=envsetup)\n\n checkenv_parser = subparsers.add_parser(\n 'checkenv',\n help='Check that the admin virtualenv is properly set up.'\n )\n checkenv_parser.set_defaults(func=checkenv)\n\n return parser.parse_args(argv)\n\n\nif __name__ == \"__main__\":\n args = parse_argv(sys.argv[1:])\n setup_logger(args.v)\n\n try:\n args.func(args)\n except Exception:\n sys.exit(1)\n else:\n sys.exit(0)\n", "path": "admin/bootstrap.py"}]}
3,705
152
gh_patches_debug_18679
rasdani/github-patches
git_diff
python-discord__bot-1108
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Reddit cog does not escape markdown in post titles Discord markdown in reddit post titles is left unhandled and can sometimes break the links: ![redditpydisbot](https://i.imgur.com/ut0gzFQ.png) For the basic markdown passing it through d.py's `escape_markdown` should work, but from a quick look I haven't found a way to escape brackets in post titles, which breaks the text links. A replacement with similar unicode chars is an option </issue> <code> [start of bot/cogs/reddit.py] 1 import asyncio 2 import logging 3 import random 4 import textwrap 5 from collections import namedtuple 6 from datetime import datetime, timedelta 7 from typing import List 8 9 from aiohttp import BasicAuth, ClientError 10 from discord import Colour, Embed, TextChannel 11 from discord.ext.commands import Cog, Context, group 12 from discord.ext.tasks import loop 13 14 from bot.bot import Bot 15 from bot.constants import Channels, ERROR_REPLIES, Emojis, Reddit as RedditConfig, STAFF_ROLES, Webhooks 16 from bot.converters import Subreddit 17 from bot.decorators import with_role 18 from bot.pagination import LinePaginator 19 from bot.utils.messages import sub_clyde 20 21 log = logging.getLogger(__name__) 22 23 AccessToken = namedtuple("AccessToken", ["token", "expires_at"]) 24 25 26 class Reddit(Cog): 27 """Track subreddit posts and show detailed statistics about them.""" 28 29 HEADERS = {"User-Agent": "python3:python-discord/bot:1.0.0 (by /u/PythonDiscord)"} 30 URL = "https://www.reddit.com" 31 OAUTH_URL = "https://oauth.reddit.com" 32 MAX_RETRIES = 3 33 34 def __init__(self, bot: Bot): 35 self.bot = bot 36 37 self.webhook = None 38 self.access_token = None 39 self.client_auth = BasicAuth(RedditConfig.client_id, RedditConfig.secret) 40 41 bot.loop.create_task(self.init_reddit_ready()) 42 self.auto_poster_loop.start() 43 44 def cog_unload(self) -> None: 45 """Stop the loop task and revoke the access token when the cog is unloaded.""" 46 self.auto_poster_loop.cancel() 47 if self.access_token and self.access_token.expires_at > datetime.utcnow(): 48 asyncio.create_task(self.revoke_access_token()) 49 50 async def init_reddit_ready(self) -> None: 51 """Sets the reddit webhook when the cog is loaded.""" 52 await self.bot.wait_until_guild_available() 53 if not self.webhook: 54 self.webhook = await self.bot.fetch_webhook(Webhooks.reddit) 55 56 @property 57 def channel(self) -> TextChannel: 58 """Get the #reddit channel object from the bot's cache.""" 59 return self.bot.get_channel(Channels.reddit) 60 61 async def get_access_token(self) -> None: 62 """ 63 Get a Reddit API OAuth2 access token and assign it to self.access_token. 64 65 A token is valid for 1 hour. There will be MAX_RETRIES to get a token, after which the cog 66 will be unloaded and a ClientError raised if retrieval was still unsuccessful. 67 """ 68 for i in range(1, self.MAX_RETRIES + 1): 69 response = await self.bot.http_session.post( 70 url=f"{self.URL}/api/v1/access_token", 71 headers=self.HEADERS, 72 auth=self.client_auth, 73 data={ 74 "grant_type": "client_credentials", 75 "duration": "temporary" 76 } 77 ) 78 79 if response.status == 200 and response.content_type == "application/json": 80 content = await response.json() 81 expiration = int(content["expires_in"]) - 60 # Subtract 1 minute for leeway. 82 self.access_token = AccessToken( 83 token=content["access_token"], 84 expires_at=datetime.utcnow() + timedelta(seconds=expiration) 85 ) 86 87 log.debug(f"New token acquired; expires on UTC {self.access_token.expires_at}") 88 return 89 else: 90 log.debug( 91 f"Failed to get an access token: " 92 f"status {response.status} & content type {response.content_type}; " 93 f"retrying ({i}/{self.MAX_RETRIES})" 94 ) 95 96 await asyncio.sleep(3) 97 98 self.bot.remove_cog(self.qualified_name) 99 raise ClientError("Authentication with the Reddit API failed. Unloading the cog.") 100 101 async def revoke_access_token(self) -> None: 102 """ 103 Revoke the OAuth2 access token for the Reddit API. 104 105 For security reasons, it's good practice to revoke the token when it's no longer being used. 106 """ 107 response = await self.bot.http_session.post( 108 url=f"{self.URL}/api/v1/revoke_token", 109 headers=self.HEADERS, 110 auth=self.client_auth, 111 data={ 112 "token": self.access_token.token, 113 "token_type_hint": "access_token" 114 } 115 ) 116 117 if response.status == 204 and response.content_type == "application/json": 118 self.access_token = None 119 else: 120 log.warning(f"Unable to revoke access token: status {response.status}.") 121 122 async def fetch_posts(self, route: str, *, amount: int = 25, params: dict = None) -> List[dict]: 123 """A helper method to fetch a certain amount of Reddit posts at a given route.""" 124 # Reddit's JSON responses only provide 25 posts at most. 125 if not 25 >= amount > 0: 126 raise ValueError("Invalid amount of subreddit posts requested.") 127 128 # Renew the token if necessary. 129 if not self.access_token or self.access_token.expires_at < datetime.utcnow(): 130 await self.get_access_token() 131 132 url = f"{self.OAUTH_URL}/{route}" 133 for _ in range(self.MAX_RETRIES): 134 response = await self.bot.http_session.get( 135 url=url, 136 headers={**self.HEADERS, "Authorization": f"bearer {self.access_token.token}"}, 137 params=params 138 ) 139 if response.status == 200 and response.content_type == 'application/json': 140 # Got appropriate response - process and return. 141 content = await response.json() 142 posts = content["data"]["children"] 143 return posts[:amount] 144 145 await asyncio.sleep(3) 146 147 log.debug(f"Invalid response from: {url} - status code {response.status}, mimetype {response.content_type}") 148 return list() # Failed to get appropriate response within allowed number of retries. 149 150 async def get_top_posts(self, subreddit: Subreddit, time: str = "all", amount: int = 5) -> Embed: 151 """ 152 Get the top amount of posts for a given subreddit within a specified timeframe. 153 154 A time of "all" will get posts from all time, "day" will get top daily posts and "week" will get the top 155 weekly posts. 156 157 The amount should be between 0 and 25 as Reddit's JSON requests only provide 25 posts at most. 158 """ 159 embed = Embed(description="") 160 161 posts = await self.fetch_posts( 162 route=f"{subreddit}/top", 163 amount=amount, 164 params={"t": time} 165 ) 166 167 if not posts: 168 embed.title = random.choice(ERROR_REPLIES) 169 embed.colour = Colour.red() 170 embed.description = ( 171 "Sorry! We couldn't find any posts from that subreddit. " 172 "If this problem persists, please let us know." 173 ) 174 175 return embed 176 177 for post in posts: 178 data = post["data"] 179 180 text = data["selftext"] 181 if text: 182 text = textwrap.shorten(text, width=128, placeholder="...") 183 text += "\n" # Add newline to separate embed info 184 185 ups = data["ups"] 186 comments = data["num_comments"] 187 author = data["author"] 188 189 title = textwrap.shorten(data["title"], width=64, placeholder="...") 190 link = self.URL + data["permalink"] 191 192 embed.description += ( 193 f"**[{title}]({link})**\n" 194 f"{text}" 195 f"{Emojis.upvotes} {ups} {Emojis.comments} {comments} {Emojis.user} {author}\n\n" 196 ) 197 198 embed.colour = Colour.blurple() 199 return embed 200 201 @loop() 202 async def auto_poster_loop(self) -> None: 203 """Post the top 5 posts daily, and the top 5 posts weekly.""" 204 # once we upgrade to d.py 1.3 this can be removed and the loop can use the `time=datetime.time.min` parameter 205 now = datetime.utcnow() 206 tomorrow = now + timedelta(days=1) 207 midnight_tomorrow = tomorrow.replace(hour=0, minute=0, second=0) 208 seconds_until = (midnight_tomorrow - now).total_seconds() 209 210 await asyncio.sleep(seconds_until) 211 212 await self.bot.wait_until_guild_available() 213 if not self.webhook: 214 await self.bot.fetch_webhook(Webhooks.reddit) 215 216 if datetime.utcnow().weekday() == 0: 217 await self.top_weekly_posts() 218 # if it's a monday send the top weekly posts 219 220 for subreddit in RedditConfig.subreddits: 221 top_posts = await self.get_top_posts(subreddit=subreddit, time="day") 222 username = sub_clyde(f"{subreddit} Top Daily Posts") 223 message = await self.webhook.send(username=username, embed=top_posts, wait=True) 224 225 if message.channel.is_news(): 226 await message.publish() 227 228 async def top_weekly_posts(self) -> None: 229 """Post a summary of the top posts.""" 230 for subreddit in RedditConfig.subreddits: 231 # Send and pin the new weekly posts. 232 top_posts = await self.get_top_posts(subreddit=subreddit, time="week") 233 username = sub_clyde(f"{subreddit} Top Weekly Posts") 234 message = await self.webhook.send(wait=True, username=username, embed=top_posts) 235 236 if subreddit.lower() == "r/python": 237 if not self.channel: 238 log.warning("Failed to get #reddit channel to remove pins in the weekly loop.") 239 return 240 241 # Remove the oldest pins so that only 12 remain at most. 242 pins = await self.channel.pins() 243 244 while len(pins) >= 12: 245 await pins[-1].unpin() 246 del pins[-1] 247 248 await message.pin() 249 250 if message.channel.is_news(): 251 await message.publish() 252 253 @group(name="reddit", invoke_without_command=True) 254 async def reddit_group(self, ctx: Context) -> None: 255 """View the top posts from various subreddits.""" 256 await ctx.send_help(ctx.command) 257 258 @reddit_group.command(name="top") 259 async def top_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None: 260 """Send the top posts of all time from a given subreddit.""" 261 async with ctx.typing(): 262 embed = await self.get_top_posts(subreddit=subreddit, time="all") 263 264 await ctx.send(content=f"Here are the top {subreddit} posts of all time!", embed=embed) 265 266 @reddit_group.command(name="daily") 267 async def daily_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None: 268 """Send the top posts of today from a given subreddit.""" 269 async with ctx.typing(): 270 embed = await self.get_top_posts(subreddit=subreddit, time="day") 271 272 await ctx.send(content=f"Here are today's top {subreddit} posts!", embed=embed) 273 274 @reddit_group.command(name="weekly") 275 async def weekly_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None: 276 """Send the top posts of this week from a given subreddit.""" 277 async with ctx.typing(): 278 embed = await self.get_top_posts(subreddit=subreddit, time="week") 279 280 await ctx.send(content=f"Here are this week's top {subreddit} posts!", embed=embed) 281 282 @with_role(*STAFF_ROLES) 283 @reddit_group.command(name="subreddits", aliases=("subs",)) 284 async def subreddits_command(self, ctx: Context) -> None: 285 """Send a paginated embed of all the subreddits we're relaying.""" 286 embed = Embed() 287 embed.title = "Relayed subreddits." 288 embed.colour = Colour.blurple() 289 290 await LinePaginator.paginate( 291 RedditConfig.subreddits, 292 ctx, embed, 293 footer_text="Use the reddit commands along with these to view their posts.", 294 empty=False, 295 max_lines=15 296 ) 297 298 299 def setup(bot: Bot) -> None: 300 """Load the Reddit cog.""" 301 if not RedditConfig.secret or not RedditConfig.client_id: 302 log.error("Credentials not provided, cog not loaded.") 303 return 304 bot.add_cog(Reddit(bot)) 305 [end of bot/cogs/reddit.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bot/cogs/reddit.py b/bot/cogs/reddit.py --- a/bot/cogs/reddit.py +++ b/bot/cogs/reddit.py @@ -10,6 +10,7 @@ from discord import Colour, Embed, TextChannel from discord.ext.commands import Cog, Context, group from discord.ext.tasks import loop +from discord.utils import escape_markdown from bot.bot import Bot from bot.constants import Channels, ERROR_REPLIES, Emojis, Reddit as RedditConfig, STAFF_ROLES, Webhooks @@ -187,6 +188,8 @@ author = data["author"] title = textwrap.shorten(data["title"], width=64, placeholder="...") + # Normal brackets interfere with Markdown. + title = escape_markdown(title).replace("[", "⦋").replace("]", "⦌") link = self.URL + data["permalink"] embed.description += (
{"golden_diff": "diff --git a/bot/cogs/reddit.py b/bot/cogs/reddit.py\n--- a/bot/cogs/reddit.py\n+++ b/bot/cogs/reddit.py\n@@ -10,6 +10,7 @@\n from discord import Colour, Embed, TextChannel\n from discord.ext.commands import Cog, Context, group\n from discord.ext.tasks import loop\n+from discord.utils import escape_markdown\n \n from bot.bot import Bot\n from bot.constants import Channels, ERROR_REPLIES, Emojis, Reddit as RedditConfig, STAFF_ROLES, Webhooks\n@@ -187,6 +188,8 @@\n author = data[\"author\"]\n \n title = textwrap.shorten(data[\"title\"], width=64, placeholder=\"...\")\n+ # Normal brackets interfere with Markdown.\n+ title = escape_markdown(title).replace(\"[\", \"\u298b\").replace(\"]\", \"\u298c\")\n link = self.URL + data[\"permalink\"]\n \n embed.description += (\n", "issue": "Reddit cog does not escape markdown in post titles\nDiscord markdown in reddit post titles is left unhandled and can sometimes break the links:\r\n![redditpydisbot](https://i.imgur.com/ut0gzFQ.png)\r\n\r\nFor the basic markdown passing it through d.py's `escape_markdown` should work, but from a quick look I haven't found a way to escape brackets in post titles, which breaks the text links. A replacement with similar unicode chars is an option\n", "before_files": [{"content": "import asyncio\nimport logging\nimport random\nimport textwrap\nfrom collections import namedtuple\nfrom datetime import datetime, timedelta\nfrom typing import List\n\nfrom aiohttp import BasicAuth, ClientError\nfrom discord import Colour, Embed, TextChannel\nfrom discord.ext.commands import Cog, Context, group\nfrom discord.ext.tasks import loop\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, ERROR_REPLIES, Emojis, Reddit as RedditConfig, STAFF_ROLES, Webhooks\nfrom bot.converters import Subreddit\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\nfrom bot.utils.messages import sub_clyde\n\nlog = logging.getLogger(__name__)\n\nAccessToken = namedtuple(\"AccessToken\", [\"token\", \"expires_at\"])\n\n\nclass Reddit(Cog):\n \"\"\"Track subreddit posts and show detailed statistics about them.\"\"\"\n\n HEADERS = {\"User-Agent\": \"python3:python-discord/bot:1.0.0 (by /u/PythonDiscord)\"}\n URL = \"https://www.reddit.com\"\n OAUTH_URL = \"https://oauth.reddit.com\"\n MAX_RETRIES = 3\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n self.webhook = None\n self.access_token = None\n self.client_auth = BasicAuth(RedditConfig.client_id, RedditConfig.secret)\n\n bot.loop.create_task(self.init_reddit_ready())\n self.auto_poster_loop.start()\n\n def cog_unload(self) -> None:\n \"\"\"Stop the loop task and revoke the access token when the cog is unloaded.\"\"\"\n self.auto_poster_loop.cancel()\n if self.access_token and self.access_token.expires_at > datetime.utcnow():\n asyncio.create_task(self.revoke_access_token())\n\n async def init_reddit_ready(self) -> None:\n \"\"\"Sets the reddit webhook when the cog is loaded.\"\"\"\n await self.bot.wait_until_guild_available()\n if not self.webhook:\n self.webhook = await self.bot.fetch_webhook(Webhooks.reddit)\n\n @property\n def channel(self) -> TextChannel:\n \"\"\"Get the #reddit channel object from the bot's cache.\"\"\"\n return self.bot.get_channel(Channels.reddit)\n\n async def get_access_token(self) -> None:\n \"\"\"\n Get a Reddit API OAuth2 access token and assign it to self.access_token.\n\n A token is valid for 1 hour. There will be MAX_RETRIES to get a token, after which the cog\n will be unloaded and a ClientError raised if retrieval was still unsuccessful.\n \"\"\"\n for i in range(1, self.MAX_RETRIES + 1):\n response = await self.bot.http_session.post(\n url=f\"{self.URL}/api/v1/access_token\",\n headers=self.HEADERS,\n auth=self.client_auth,\n data={\n \"grant_type\": \"client_credentials\",\n \"duration\": \"temporary\"\n }\n )\n\n if response.status == 200 and response.content_type == \"application/json\":\n content = await response.json()\n expiration = int(content[\"expires_in\"]) - 60 # Subtract 1 minute for leeway.\n self.access_token = AccessToken(\n token=content[\"access_token\"],\n expires_at=datetime.utcnow() + timedelta(seconds=expiration)\n )\n\n log.debug(f\"New token acquired; expires on UTC {self.access_token.expires_at}\")\n return\n else:\n log.debug(\n f\"Failed to get an access token: \"\n f\"status {response.status} & content type {response.content_type}; \"\n f\"retrying ({i}/{self.MAX_RETRIES})\"\n )\n\n await asyncio.sleep(3)\n\n self.bot.remove_cog(self.qualified_name)\n raise ClientError(\"Authentication with the Reddit API failed. Unloading the cog.\")\n\n async def revoke_access_token(self) -> None:\n \"\"\"\n Revoke the OAuth2 access token for the Reddit API.\n\n For security reasons, it's good practice to revoke the token when it's no longer being used.\n \"\"\"\n response = await self.bot.http_session.post(\n url=f\"{self.URL}/api/v1/revoke_token\",\n headers=self.HEADERS,\n auth=self.client_auth,\n data={\n \"token\": self.access_token.token,\n \"token_type_hint\": \"access_token\"\n }\n )\n\n if response.status == 204 and response.content_type == \"application/json\":\n self.access_token = None\n else:\n log.warning(f\"Unable to revoke access token: status {response.status}.\")\n\n async def fetch_posts(self, route: str, *, amount: int = 25, params: dict = None) -> List[dict]:\n \"\"\"A helper method to fetch a certain amount of Reddit posts at a given route.\"\"\"\n # Reddit's JSON responses only provide 25 posts at most.\n if not 25 >= amount > 0:\n raise ValueError(\"Invalid amount of subreddit posts requested.\")\n\n # Renew the token if necessary.\n if not self.access_token or self.access_token.expires_at < datetime.utcnow():\n await self.get_access_token()\n\n url = f\"{self.OAUTH_URL}/{route}\"\n for _ in range(self.MAX_RETRIES):\n response = await self.bot.http_session.get(\n url=url,\n headers={**self.HEADERS, \"Authorization\": f\"bearer {self.access_token.token}\"},\n params=params\n )\n if response.status == 200 and response.content_type == 'application/json':\n # Got appropriate response - process and return.\n content = await response.json()\n posts = content[\"data\"][\"children\"]\n return posts[:amount]\n\n await asyncio.sleep(3)\n\n log.debug(f\"Invalid response from: {url} - status code {response.status}, mimetype {response.content_type}\")\n return list() # Failed to get appropriate response within allowed number of retries.\n\n async def get_top_posts(self, subreddit: Subreddit, time: str = \"all\", amount: int = 5) -> Embed:\n \"\"\"\n Get the top amount of posts for a given subreddit within a specified timeframe.\n\n A time of \"all\" will get posts from all time, \"day\" will get top daily posts and \"week\" will get the top\n weekly posts.\n\n The amount should be between 0 and 25 as Reddit's JSON requests only provide 25 posts at most.\n \"\"\"\n embed = Embed(description=\"\")\n\n posts = await self.fetch_posts(\n route=f\"{subreddit}/top\",\n amount=amount,\n params={\"t\": time}\n )\n\n if not posts:\n embed.title = random.choice(ERROR_REPLIES)\n embed.colour = Colour.red()\n embed.description = (\n \"Sorry! We couldn't find any posts from that subreddit. \"\n \"If this problem persists, please let us know.\"\n )\n\n return embed\n\n for post in posts:\n data = post[\"data\"]\n\n text = data[\"selftext\"]\n if text:\n text = textwrap.shorten(text, width=128, placeholder=\"...\")\n text += \"\\n\" # Add newline to separate embed info\n\n ups = data[\"ups\"]\n comments = data[\"num_comments\"]\n author = data[\"author\"]\n\n title = textwrap.shorten(data[\"title\"], width=64, placeholder=\"...\")\n link = self.URL + data[\"permalink\"]\n\n embed.description += (\n f\"**[{title}]({link})**\\n\"\n f\"{text}\"\n f\"{Emojis.upvotes} {ups} {Emojis.comments} {comments} {Emojis.user} {author}\\n\\n\"\n )\n\n embed.colour = Colour.blurple()\n return embed\n\n @loop()\n async def auto_poster_loop(self) -> None:\n \"\"\"Post the top 5 posts daily, and the top 5 posts weekly.\"\"\"\n # once we upgrade to d.py 1.3 this can be removed and the loop can use the `time=datetime.time.min` parameter\n now = datetime.utcnow()\n tomorrow = now + timedelta(days=1)\n midnight_tomorrow = tomorrow.replace(hour=0, minute=0, second=0)\n seconds_until = (midnight_tomorrow - now).total_seconds()\n\n await asyncio.sleep(seconds_until)\n\n await self.bot.wait_until_guild_available()\n if not self.webhook:\n await self.bot.fetch_webhook(Webhooks.reddit)\n\n if datetime.utcnow().weekday() == 0:\n await self.top_weekly_posts()\n # if it's a monday send the top weekly posts\n\n for subreddit in RedditConfig.subreddits:\n top_posts = await self.get_top_posts(subreddit=subreddit, time=\"day\")\n username = sub_clyde(f\"{subreddit} Top Daily Posts\")\n message = await self.webhook.send(username=username, embed=top_posts, wait=True)\n\n if message.channel.is_news():\n await message.publish()\n\n async def top_weekly_posts(self) -> None:\n \"\"\"Post a summary of the top posts.\"\"\"\n for subreddit in RedditConfig.subreddits:\n # Send and pin the new weekly posts.\n top_posts = await self.get_top_posts(subreddit=subreddit, time=\"week\")\n username = sub_clyde(f\"{subreddit} Top Weekly Posts\")\n message = await self.webhook.send(wait=True, username=username, embed=top_posts)\n\n if subreddit.lower() == \"r/python\":\n if not self.channel:\n log.warning(\"Failed to get #reddit channel to remove pins in the weekly loop.\")\n return\n\n # Remove the oldest pins so that only 12 remain at most.\n pins = await self.channel.pins()\n\n while len(pins) >= 12:\n await pins[-1].unpin()\n del pins[-1]\n\n await message.pin()\n\n if message.channel.is_news():\n await message.publish()\n\n @group(name=\"reddit\", invoke_without_command=True)\n async def reddit_group(self, ctx: Context) -> None:\n \"\"\"View the top posts from various subreddits.\"\"\"\n await ctx.send_help(ctx.command)\n\n @reddit_group.command(name=\"top\")\n async def top_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of all time from a given subreddit.\"\"\"\n async with ctx.typing():\n embed = await self.get_top_posts(subreddit=subreddit, time=\"all\")\n\n await ctx.send(content=f\"Here are the top {subreddit} posts of all time!\", embed=embed)\n\n @reddit_group.command(name=\"daily\")\n async def daily_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of today from a given subreddit.\"\"\"\n async with ctx.typing():\n embed = await self.get_top_posts(subreddit=subreddit, time=\"day\")\n\n await ctx.send(content=f\"Here are today's top {subreddit} posts!\", embed=embed)\n\n @reddit_group.command(name=\"weekly\")\n async def weekly_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of this week from a given subreddit.\"\"\"\n async with ctx.typing():\n embed = await self.get_top_posts(subreddit=subreddit, time=\"week\")\n\n await ctx.send(content=f\"Here are this week's top {subreddit} posts!\", embed=embed)\n\n @with_role(*STAFF_ROLES)\n @reddit_group.command(name=\"subreddits\", aliases=(\"subs\",))\n async def subreddits_command(self, ctx: Context) -> None:\n \"\"\"Send a paginated embed of all the subreddits we're relaying.\"\"\"\n embed = Embed()\n embed.title = \"Relayed subreddits.\"\n embed.colour = Colour.blurple()\n\n await LinePaginator.paginate(\n RedditConfig.subreddits,\n ctx, embed,\n footer_text=\"Use the reddit commands along with these to view their posts.\",\n empty=False,\n max_lines=15\n )\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Reddit cog.\"\"\"\n if not RedditConfig.secret or not RedditConfig.client_id:\n log.error(\"Credentials not provided, cog not loaded.\")\n return\n bot.add_cog(Reddit(bot))\n", "path": "bot/cogs/reddit.py"}]}
4,088
212
gh_patches_debug_5299
rasdani/github-patches
git_diff
open-mmlab__mmdetection-7147
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Confusion matrix error While trying to generate the confusion matrix with this command: ``` python tools/analysis_tools/confusion_matrix.py ./work_dirs/perception-types--D06-01-2022--T09-23-45/perception-types.py results.pkl ./temp --show ``` I ran into this error: ``` Traceback (most recent call last): File "tools/analysis_tools/confusion_matrix.py", line 261, in <module> main() File "tools/analysis_tools/confusion_matrix.py", line 257, in main show=args.show) File "tools/analysis_tools/confusion_matrix.py", line 210, in plot_confusion_matrix '{}%'.format(int(confusion_matrix[i, j])), ValueError: cannot convert float NaN to integer ``` Would appreciate any help or suggestions! Thanks </issue> <code> [start of tools/analysis_tools/confusion_matrix.py] 1 import argparse 2 import os 3 4 import matplotlib.pyplot as plt 5 import mmcv 6 import numpy as np 7 from matplotlib.ticker import MultipleLocator 8 from mmcv import Config, DictAction 9 from mmcv.ops import nms 10 11 from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps 12 from mmdet.datasets import build_dataset 13 14 15 def parse_args(): 16 parser = argparse.ArgumentParser( 17 description='Generate confusion matrix from detection results') 18 parser.add_argument('config', help='test config file path') 19 parser.add_argument( 20 'prediction_path', help='prediction path where test .pkl result') 21 parser.add_argument( 22 'save_dir', help='directory where confusion matrix will be saved') 23 parser.add_argument( 24 '--show', action='store_true', help='show confusion matrix') 25 parser.add_argument( 26 '--color-theme', 27 default='plasma', 28 help='theme of the matrix color map') 29 parser.add_argument( 30 '--score-thr', 31 type=float, 32 default=0.3, 33 help='score threshold to filter detection bboxes') 34 parser.add_argument( 35 '--tp-iou-thr', 36 type=float, 37 default=0.5, 38 help='IoU threshold to be considered as matched') 39 parser.add_argument( 40 '--nms-iou-thr', 41 type=float, 42 default=None, 43 help='nms IoU threshold, only applied when users want to change the' 44 'nms IoU threshold.') 45 parser.add_argument( 46 '--cfg-options', 47 nargs='+', 48 action=DictAction, 49 help='override some settings in the used config, the key-value pair ' 50 'in xxx=yyy format will be merged into config file. If the value to ' 51 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 52 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 53 'Note that the quotation marks are necessary and that no white space ' 54 'is allowed.') 55 args = parser.parse_args() 56 return args 57 58 59 def calculate_confusion_matrix(dataset, 60 results, 61 score_thr=0, 62 nms_iou_thr=None, 63 tp_iou_thr=0.5): 64 """Calculate the confusion matrix. 65 66 Args: 67 dataset (Dataset): Test or val dataset. 68 results (list[ndarray]): A list of detection results in each image. 69 score_thr (float|optional): Score threshold to filter bboxes. 70 Default: 0. 71 nms_iou_thr (float|optional): nms IoU threshold, the detection results 72 have done nms in the detector, only applied when users want to 73 change the nms IoU threshold. Default: None. 74 tp_iou_thr (float|optional): IoU threshold to be considered as matched. 75 Default: 0.5. 76 """ 77 num_classes = len(dataset.CLASSES) 78 confusion_matrix = np.zeros(shape=[num_classes + 1, num_classes + 1]) 79 assert len(dataset) == len(results) 80 prog_bar = mmcv.ProgressBar(len(results)) 81 for idx, per_img_res in enumerate(results): 82 if isinstance(per_img_res, tuple): 83 res_bboxes, _ = per_img_res 84 else: 85 res_bboxes = per_img_res 86 ann = dataset.get_ann_info(idx) 87 gt_bboxes = ann['bboxes'] 88 labels = ann['labels'] 89 analyze_per_img_dets(confusion_matrix, gt_bboxes, labels, res_bboxes, 90 score_thr, tp_iou_thr, nms_iou_thr) 91 prog_bar.update() 92 return confusion_matrix 93 94 95 def analyze_per_img_dets(confusion_matrix, 96 gt_bboxes, 97 gt_labels, 98 result, 99 score_thr=0, 100 tp_iou_thr=0.5, 101 nms_iou_thr=None): 102 """Analyze detection results on each image. 103 104 Args: 105 confusion_matrix (ndarray): The confusion matrix, 106 has shape (num_classes + 1, num_classes + 1). 107 gt_bboxes (ndarray): Ground truth bboxes, has shape (num_gt, 4). 108 gt_labels (ndarray): Ground truth labels, has shape (num_gt). 109 result (ndarray): Detection results, has shape 110 (num_classes, num_bboxes, 5). 111 score_thr (float): Score threshold to filter bboxes. 112 Default: 0. 113 tp_iou_thr (float): IoU threshold to be considered as matched. 114 Default: 0.5. 115 nms_iou_thr (float|optional): nms IoU threshold, the detection results 116 have done nms in the detector, only applied when users want to 117 change the nms IoU threshold. Default: None. 118 """ 119 true_positives = np.zeros_like(gt_labels) 120 for det_label, det_bboxes in enumerate(result): 121 if nms_iou_thr: 122 det_bboxes, _ = nms( 123 det_bboxes[:, :4], 124 det_bboxes[:, -1], 125 nms_iou_thr, 126 score_threshold=score_thr) 127 ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes) 128 for i, det_bbox in enumerate(det_bboxes): 129 score = det_bbox[4] 130 det_match = 0 131 if score >= score_thr: 132 for j, gt_label in enumerate(gt_labels): 133 if ious[i, j] >= tp_iou_thr: 134 det_match += 1 135 if gt_label == det_label: 136 true_positives[j] += 1 # TP 137 confusion_matrix[gt_label, det_label] += 1 138 if det_match == 0: # BG FP 139 confusion_matrix[-1, det_label] += 1 140 for num_tp, gt_label in zip(true_positives, gt_labels): 141 if num_tp == 0: # FN 142 confusion_matrix[gt_label, -1] += 1 143 144 145 def plot_confusion_matrix(confusion_matrix, 146 labels, 147 save_dir=None, 148 show=True, 149 title='Normalized Confusion Matrix', 150 color_theme='plasma'): 151 """Draw confusion matrix with matplotlib. 152 153 Args: 154 confusion_matrix (ndarray): The confusion matrix. 155 labels (list[str]): List of class names. 156 save_dir (str|optional): If set, save the confusion matrix plot to the 157 given path. Default: None. 158 show (bool): Whether to show the plot. Default: True. 159 title (str): Title of the plot. Default: `Normalized Confusion Matrix`. 160 color_theme (str): Theme of the matrix color map. Default: `plasma`. 161 """ 162 # normalize the confusion matrix 163 per_label_sums = confusion_matrix.sum(axis=1)[:, np.newaxis] 164 confusion_matrix = \ 165 confusion_matrix.astype(np.float32) / per_label_sums * 100 166 167 num_classes = len(labels) 168 fig, ax = plt.subplots( 169 figsize=(0.5 * num_classes, 0.5 * num_classes * 0.8), dpi=180) 170 cmap = plt.get_cmap(color_theme) 171 im = ax.imshow(confusion_matrix, cmap=cmap) 172 plt.colorbar(mappable=im, ax=ax) 173 174 title_font = {'weight': 'bold', 'size': 12} 175 ax.set_title(title, fontdict=title_font) 176 label_font = {'size': 10} 177 plt.ylabel('Ground Truth Label', fontdict=label_font) 178 plt.xlabel('Prediction Label', fontdict=label_font) 179 180 # draw locator 181 xmajor_locator = MultipleLocator(1) 182 xminor_locator = MultipleLocator(0.5) 183 ax.xaxis.set_major_locator(xmajor_locator) 184 ax.xaxis.set_minor_locator(xminor_locator) 185 ymajor_locator = MultipleLocator(1) 186 yminor_locator = MultipleLocator(0.5) 187 ax.yaxis.set_major_locator(ymajor_locator) 188 ax.yaxis.set_minor_locator(yminor_locator) 189 190 # draw grid 191 ax.grid(True, which='minor', linestyle='-') 192 193 # draw label 194 ax.set_xticks(np.arange(num_classes)) 195 ax.set_yticks(np.arange(num_classes)) 196 ax.set_xticklabels(labels) 197 ax.set_yticklabels(labels) 198 199 ax.tick_params( 200 axis='x', bottom=False, top=True, labelbottom=False, labeltop=True) 201 plt.setp( 202 ax.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor') 203 204 # draw confution matrix value 205 for i in range(num_classes): 206 for j in range(num_classes): 207 ax.text( 208 j, 209 i, 210 '{}%'.format(int(confusion_matrix[i, j])), 211 ha='center', 212 va='center', 213 color='w', 214 size=7) 215 216 ax.set_ylim(len(confusion_matrix) - 0.5, -0.5) # matplotlib>3.1.1 217 218 fig.tight_layout() 219 if save_dir is not None: 220 plt.savefig( 221 os.path.join(save_dir, 'confusion_matrix.png'), format='png') 222 if show: 223 plt.show() 224 225 226 def main(): 227 args = parse_args() 228 229 cfg = Config.fromfile(args.config) 230 if args.cfg_options is not None: 231 cfg.merge_from_dict(args.cfg_options) 232 233 results = mmcv.load(args.prediction_path) 234 assert isinstance(results, list) 235 if isinstance(results[0], list): 236 pass 237 elif isinstance(results[0], tuple): 238 results = [result[0] for result in results] 239 else: 240 raise TypeError('invalid type of prediction results') 241 242 if isinstance(cfg.data.test, dict): 243 cfg.data.test.test_mode = True 244 elif isinstance(cfg.data.test, list): 245 for ds_cfg in cfg.data.test: 246 ds_cfg.test_mode = True 247 dataset = build_dataset(cfg.data.test) 248 249 confusion_matrix = calculate_confusion_matrix(dataset, results, 250 args.score_thr, 251 args.nms_iou_thr, 252 args.tp_iou_thr) 253 plot_confusion_matrix( 254 confusion_matrix, 255 dataset.CLASSES + ('background', ), 256 save_dir=args.save_dir, 257 show=args.show) 258 259 260 if __name__ == '__main__': 261 main() 262 [end of tools/analysis_tools/confusion_matrix.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/analysis_tools/confusion_matrix.py b/tools/analysis_tools/confusion_matrix.py --- a/tools/analysis_tools/confusion_matrix.py +++ b/tools/analysis_tools/confusion_matrix.py @@ -207,7 +207,10 @@ ax.text( j, i, - '{}%'.format(int(confusion_matrix[i, j])), + '{}%'.format( + int(confusion_matrix[ + i, + j]) if not np.isnan(confusion_matrix[i, j]) else -1), ha='center', va='center', color='w',
{"golden_diff": "diff --git a/tools/analysis_tools/confusion_matrix.py b/tools/analysis_tools/confusion_matrix.py\n--- a/tools/analysis_tools/confusion_matrix.py\n+++ b/tools/analysis_tools/confusion_matrix.py\n@@ -207,7 +207,10 @@\n ax.text(\n j,\n i,\n- '{}%'.format(int(confusion_matrix[i, j])),\n+ '{}%'.format(\n+ int(confusion_matrix[\n+ i,\n+ j]) if not np.isnan(confusion_matrix[i, j]) else -1),\n ha='center',\n va='center',\n color='w',\n", "issue": "Confusion matrix error\nWhile trying to generate the confusion matrix with this command:\r\n```\r\npython tools/analysis_tools/confusion_matrix.py ./work_dirs/perception-types--D06-01-2022--T09-23-45/perception-types.py results.pkl ./temp --show\r\n```\r\nI ran into this error:\r\n```\r\nTraceback (most recent call last):\r\n File \"tools/analysis_tools/confusion_matrix.py\", line 261, in <module>\r\n main()\r\n File \"tools/analysis_tools/confusion_matrix.py\", line 257, in main\r\n show=args.show)\r\n File \"tools/analysis_tools/confusion_matrix.py\", line 210, in plot_confusion_matrix\r\n '{}%'.format(int(confusion_matrix[i, j])),\r\nValueError: cannot convert float NaN to integer\r\n```\r\nWould appreciate any help or suggestions! Thanks\r\n\n", "before_files": [{"content": "import argparse\nimport os\n\nimport matplotlib.pyplot as plt\nimport mmcv\nimport numpy as np\nfrom matplotlib.ticker import MultipleLocator\nfrom mmcv import Config, DictAction\nfrom mmcv.ops import nms\n\nfrom mmdet.core.evaluation.bbox_overlaps import bbox_overlaps\nfrom mmdet.datasets import build_dataset\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Generate confusion matrix from detection results')\n parser.add_argument('config', help='test config file path')\n parser.add_argument(\n 'prediction_path', help='prediction path where test .pkl result')\n parser.add_argument(\n 'save_dir', help='directory where confusion matrix will be saved')\n parser.add_argument(\n '--show', action='store_true', help='show confusion matrix')\n parser.add_argument(\n '--color-theme',\n default='plasma',\n help='theme of the matrix color map')\n parser.add_argument(\n '--score-thr',\n type=float,\n default=0.3,\n help='score threshold to filter detection bboxes')\n parser.add_argument(\n '--tp-iou-thr',\n type=float,\n default=0.5,\n help='IoU threshold to be considered as matched')\n parser.add_argument(\n '--nms-iou-thr',\n type=float,\n default=None,\n help='nms IoU threshold, only applied when users want to change the'\n 'nms IoU threshold.')\n parser.add_argument(\n '--cfg-options',\n nargs='+',\n action=DictAction,\n help='override some settings in the used config, the key-value pair '\n 'in xxx=yyy format will be merged into config file. If the value to '\n 'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n 'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n 'Note that the quotation marks are necessary and that no white space '\n 'is allowed.')\n args = parser.parse_args()\n return args\n\n\ndef calculate_confusion_matrix(dataset,\n results,\n score_thr=0,\n nms_iou_thr=None,\n tp_iou_thr=0.5):\n \"\"\"Calculate the confusion matrix.\n\n Args:\n dataset (Dataset): Test or val dataset.\n results (list[ndarray]): A list of detection results in each image.\n score_thr (float|optional): Score threshold to filter bboxes.\n Default: 0.\n nms_iou_thr (float|optional): nms IoU threshold, the detection results\n have done nms in the detector, only applied when users want to\n change the nms IoU threshold. Default: None.\n tp_iou_thr (float|optional): IoU threshold to be considered as matched.\n Default: 0.5.\n \"\"\"\n num_classes = len(dataset.CLASSES)\n confusion_matrix = np.zeros(shape=[num_classes + 1, num_classes + 1])\n assert len(dataset) == len(results)\n prog_bar = mmcv.ProgressBar(len(results))\n for idx, per_img_res in enumerate(results):\n if isinstance(per_img_res, tuple):\n res_bboxes, _ = per_img_res\n else:\n res_bboxes = per_img_res\n ann = dataset.get_ann_info(idx)\n gt_bboxes = ann['bboxes']\n labels = ann['labels']\n analyze_per_img_dets(confusion_matrix, gt_bboxes, labels, res_bboxes,\n score_thr, tp_iou_thr, nms_iou_thr)\n prog_bar.update()\n return confusion_matrix\n\n\ndef analyze_per_img_dets(confusion_matrix,\n gt_bboxes,\n gt_labels,\n result,\n score_thr=0,\n tp_iou_thr=0.5,\n nms_iou_thr=None):\n \"\"\"Analyze detection results on each image.\n\n Args:\n confusion_matrix (ndarray): The confusion matrix,\n has shape (num_classes + 1, num_classes + 1).\n gt_bboxes (ndarray): Ground truth bboxes, has shape (num_gt, 4).\n gt_labels (ndarray): Ground truth labels, has shape (num_gt).\n result (ndarray): Detection results, has shape\n (num_classes, num_bboxes, 5).\n score_thr (float): Score threshold to filter bboxes.\n Default: 0.\n tp_iou_thr (float): IoU threshold to be considered as matched.\n Default: 0.5.\n nms_iou_thr (float|optional): nms IoU threshold, the detection results\n have done nms in the detector, only applied when users want to\n change the nms IoU threshold. Default: None.\n \"\"\"\n true_positives = np.zeros_like(gt_labels)\n for det_label, det_bboxes in enumerate(result):\n if nms_iou_thr:\n det_bboxes, _ = nms(\n det_bboxes[:, :4],\n det_bboxes[:, -1],\n nms_iou_thr,\n score_threshold=score_thr)\n ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes)\n for i, det_bbox in enumerate(det_bboxes):\n score = det_bbox[4]\n det_match = 0\n if score >= score_thr:\n for j, gt_label in enumerate(gt_labels):\n if ious[i, j] >= tp_iou_thr:\n det_match += 1\n if gt_label == det_label:\n true_positives[j] += 1 # TP\n confusion_matrix[gt_label, det_label] += 1\n if det_match == 0: # BG FP\n confusion_matrix[-1, det_label] += 1\n for num_tp, gt_label in zip(true_positives, gt_labels):\n if num_tp == 0: # FN\n confusion_matrix[gt_label, -1] += 1\n\n\ndef plot_confusion_matrix(confusion_matrix,\n labels,\n save_dir=None,\n show=True,\n title='Normalized Confusion Matrix',\n color_theme='plasma'):\n \"\"\"Draw confusion matrix with matplotlib.\n\n Args:\n confusion_matrix (ndarray): The confusion matrix.\n labels (list[str]): List of class names.\n save_dir (str|optional): If set, save the confusion matrix plot to the\n given path. Default: None.\n show (bool): Whether to show the plot. Default: True.\n title (str): Title of the plot. Default: `Normalized Confusion Matrix`.\n color_theme (str): Theme of the matrix color map. Default: `plasma`.\n \"\"\"\n # normalize the confusion matrix\n per_label_sums = confusion_matrix.sum(axis=1)[:, np.newaxis]\n confusion_matrix = \\\n confusion_matrix.astype(np.float32) / per_label_sums * 100\n\n num_classes = len(labels)\n fig, ax = plt.subplots(\n figsize=(0.5 * num_classes, 0.5 * num_classes * 0.8), dpi=180)\n cmap = plt.get_cmap(color_theme)\n im = ax.imshow(confusion_matrix, cmap=cmap)\n plt.colorbar(mappable=im, ax=ax)\n\n title_font = {'weight': 'bold', 'size': 12}\n ax.set_title(title, fontdict=title_font)\n label_font = {'size': 10}\n plt.ylabel('Ground Truth Label', fontdict=label_font)\n plt.xlabel('Prediction Label', fontdict=label_font)\n\n # draw locator\n xmajor_locator = MultipleLocator(1)\n xminor_locator = MultipleLocator(0.5)\n ax.xaxis.set_major_locator(xmajor_locator)\n ax.xaxis.set_minor_locator(xminor_locator)\n ymajor_locator = MultipleLocator(1)\n yminor_locator = MultipleLocator(0.5)\n ax.yaxis.set_major_locator(ymajor_locator)\n ax.yaxis.set_minor_locator(yminor_locator)\n\n # draw grid\n ax.grid(True, which='minor', linestyle='-')\n\n # draw label\n ax.set_xticks(np.arange(num_classes))\n ax.set_yticks(np.arange(num_classes))\n ax.set_xticklabels(labels)\n ax.set_yticklabels(labels)\n\n ax.tick_params(\n axis='x', bottom=False, top=True, labelbottom=False, labeltop=True)\n plt.setp(\n ax.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor')\n\n # draw confution matrix value\n for i in range(num_classes):\n for j in range(num_classes):\n ax.text(\n j,\n i,\n '{}%'.format(int(confusion_matrix[i, j])),\n ha='center',\n va='center',\n color='w',\n size=7)\n\n ax.set_ylim(len(confusion_matrix) - 0.5, -0.5) # matplotlib>3.1.1\n\n fig.tight_layout()\n if save_dir is not None:\n plt.savefig(\n os.path.join(save_dir, 'confusion_matrix.png'), format='png')\n if show:\n plt.show()\n\n\ndef main():\n args = parse_args()\n\n cfg = Config.fromfile(args.config)\n if args.cfg_options is not None:\n cfg.merge_from_dict(args.cfg_options)\n\n results = mmcv.load(args.prediction_path)\n assert isinstance(results, list)\n if isinstance(results[0], list):\n pass\n elif isinstance(results[0], tuple):\n results = [result[0] for result in results]\n else:\n raise TypeError('invalid type of prediction results')\n\n if isinstance(cfg.data.test, dict):\n cfg.data.test.test_mode = True\n elif isinstance(cfg.data.test, list):\n for ds_cfg in cfg.data.test:\n ds_cfg.test_mode = True\n dataset = build_dataset(cfg.data.test)\n\n confusion_matrix = calculate_confusion_matrix(dataset, results,\n args.score_thr,\n args.nms_iou_thr,\n args.tp_iou_thr)\n plot_confusion_matrix(\n confusion_matrix,\n dataset.CLASSES + ('background', ),\n save_dir=args.save_dir,\n show=args.show)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/analysis_tools/confusion_matrix.py"}]}
3,631
135
gh_patches_debug_13358
rasdani/github-patches
git_diff
mindsdb__mindsdb-1311
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Reject predict(POST) API call if JSON payload is not properly wrapped by WHEN **Is your feature request related to a problem? Please describe.** The MindsDB Predict API returns predictions even when the JSON payload is not properly following the specification. (https://apidocs.mindsdb.com/#acaf5684-c1bb-4df7-bae0-3a673ac1dd11) . For instance, the payload should follow this rule but the API also accepts payload without 'when' ``` --data-raw '{ "when": { "number_of_rooms": 2, "sqft": 1700 } }' ``` ``` --data-raw '{ "number_of_rooms": 2, "sqft": 1700 }' ``` In this case, the API cannot recognize input variables in the JSON request, thus return them as 'missing' in the response text. I am not sure if the prediction itself is not reliable in this case, but we might as well block such wrong API requests. **Describe the solution you'd like** Return 'error' message if 'predict when' specification is not met </issue> <code> [start of mindsdb/api/http/namespaces/predictor.py] 1 import os 2 import time 3 4 from dateutil.parser import parse as parse_datetime 5 from flask import request 6 from flask_restx import Resource, abort 7 from flask import current_app as ca 8 9 from mindsdb.utilities.log import log 10 from mindsdb.api.http.utils import http_error 11 from mindsdb.api.http.namespaces.configs.predictors import ns_conf 12 from mindsdb.api.http.namespaces.entitites.predictor_metadata import ( 13 predictor_metadata, 14 predictor_query_params, 15 upload_predictor_params, 16 put_predictor_params 17 ) 18 from mindsdb.api.http.namespaces.entitites.predictor_status import predictor_status 19 20 21 @ns_conf.route('/') 22 class PredictorList(Resource): 23 @ns_conf.doc('list_predictors') 24 @ns_conf.marshal_list_with(predictor_status, skip_none=True) 25 def get(self): 26 '''List all predictors''' 27 return request.native_interface.get_models() 28 29 30 @ns_conf.route('/custom/<name>') 31 @ns_conf.param('name', 'The predictor identifier') 32 @ns_conf.response(404, 'predictor not found') 33 class CustomPredictor(Resource): 34 @ns_conf.doc('put_custom_predictor') 35 def put(self, name): 36 try: 37 trained_status = request.json['trained_status'] 38 except Exception: 39 trained_status = 'untrained' 40 41 predictor_file = request.files['file'] 42 fpath = os.path.join(ca.config_obj.paths['tmp'], name + '.zip') 43 with open(fpath, 'wb') as f: 44 f.write(predictor_file.read()) 45 46 request.custom_models.load_model(fpath, name, trained_status) 47 48 return f'Uploaded custom model {name}' 49 50 51 @ns_conf.route('/<name>') 52 @ns_conf.param('name', 'The predictor identifier') 53 @ns_conf.response(404, 'predictor not found') 54 class Predictor(Resource): 55 @ns_conf.doc('get_predictor') 56 @ns_conf.marshal_with(predictor_metadata, skip_none=True) 57 def get(self, name): 58 try: 59 model = request.native_interface.get_model_data(name, db_fix=False) 60 except Exception as e: 61 abort(404, "") 62 63 for k in ['train_end_at', 'updated_at', 'created_at']: 64 if k in model and model[k] is not None: 65 model[k] = parse_datetime(model[k]) 66 67 return model 68 69 @ns_conf.doc('delete_predictor') 70 def delete(self, name): 71 '''Remove predictor''' 72 request.native_interface.delete_model(name) 73 74 return '', 200 75 76 @ns_conf.doc('put_predictor', params=put_predictor_params) 77 def put(self, name): 78 '''Learning new predictor''' 79 data = request.json 80 to_predict = data.get('to_predict') 81 82 try: 83 kwargs = data.get('kwargs') 84 except Exception: 85 kwargs = None 86 87 if type(kwargs) != type({}): 88 kwargs = {} 89 90 if 'equal_accuracy_for_all_output_categories' not in kwargs: 91 kwargs['equal_accuracy_for_all_output_categories'] = True 92 93 if 'advanced_args' not in kwargs: 94 kwargs['advanced_args'] = {} 95 96 if 'use_selfaware_model' not in kwargs['advanced_args']: 97 kwargs['advanced_args']['use_selfaware_model'] = False 98 99 try: 100 retrain = data.get('retrain') 101 if retrain in ('true', 'True'): 102 retrain = True 103 else: 104 retrain = False 105 except Exception: 106 retrain = None 107 108 ds_name = data.get('data_source_name') if data.get('data_source_name') is not None else data.get('from_data') 109 from_data = request.default_store.get_datasource_obj(ds_name, raw=True) 110 111 if from_data is None: 112 return {'message': f'Can not find datasource: {ds_name}'}, 400 113 114 if retrain is True: 115 original_name = name 116 name = name + '_retrained' 117 118 model_names = [x['name'] for x in request.native_interface.get_models()] 119 if name in model_names: 120 return http_error( 121 409, 122 f"Predictor '{name}' already exists", 123 f"Predictor with name '{name}' already exists. Each predictor must have unique name." 124 ) 125 126 request.native_interface.learn(name, from_data, to_predict, request.default_store.get_datasource(ds_name)['id'], kwargs=kwargs) 127 for i in range(20): 128 try: 129 # Dirty hack, we should use a messaging queue between the predictor process and this bit of the code 130 request.native_interface.get_model_data(name) 131 break 132 except Exception: 133 time.sleep(1) 134 135 if retrain is True: 136 try: 137 request.native_interface.delete_model(original_name) 138 request.native_interface.rename_model(name, original_name) 139 except Exception: 140 pass 141 142 return '', 200 143 144 145 @ns_conf.route('/<name>/learn') 146 @ns_conf.param('name', 'The predictor identifier') 147 class PredictorLearn(Resource): 148 def post(self, name): 149 data = request.json 150 to_predict = data.get('to_predict') 151 kwargs = data.get('kwargs', None) 152 153 if not isinstance(kwargs, dict): 154 kwargs = {} 155 156 if 'advanced_args' not in kwargs: 157 kwargs['advanced_args'] = {} 158 159 ds_name = data.get('data_source_name') if data.get('data_source_name') is not None else data.get('from_data') 160 from_data = request.default_store.get_datasource_obj(ds_name, raw=True) 161 162 request.custom_models.learn(name, from_data, to_predict, request.default_store.get_datasource(ds_name)['id'], kwargs) 163 164 return '', 200 165 166 167 @ns_conf.route('/<name>/update') 168 @ns_conf.param('name', 'Update predictor') 169 class PredictorPredict(Resource): 170 @ns_conf.doc('Update predictor') 171 def get(self, name): 172 msg = request.native_interface.update_model(name) 173 return { 174 'message': msg 175 } 176 177 178 @ns_conf.route('/<name>/predict') 179 @ns_conf.param('name', 'The predictor identifier') 180 class PredictorPredict2(Resource): 181 @ns_conf.doc('post_predictor_predict', params=predictor_query_params) 182 def post(self, name): 183 '''Queries predictor''' 184 data = request.json 185 when = data.get('when', {}) 186 format_flag = data.get('format_flag', 'explain') 187 kwargs = data.get('kwargs', {}) 188 189 if when is None: 190 return 'No data provided for the predictions', 500 191 192 results = request.native_interface.predict(name, format_flag, when_data=when, **kwargs) 193 194 return results 195 196 197 @ns_conf.route('/<name>/predict_datasource') 198 @ns_conf.param('name', 'The predictor identifier') 199 class PredictorPredictFromDataSource(Resource): 200 @ns_conf.doc('post_predictor_predict', params=predictor_query_params) 201 def post(self, name): 202 data = request.json 203 format_flag = data.get('format_flag', 'explain') 204 kwargs = data.get('kwargs', {}) 205 206 use_raw = False 207 208 from_data = request.default_store.get_datasource_obj(data.get('data_source_name'), raw=use_raw) 209 if from_data is None: 210 abort(400, 'No valid datasource given') 211 212 results = request.native_interface.predict(name, format_flag, when_data=from_data, **kwargs) 213 return results 214 215 216 @ns_conf.route('/<name>/rename') 217 @ns_conf.param('name', 'The predictor identifier') 218 class PredictorDownload(Resource): 219 @ns_conf.doc('get_predictor_download') 220 def get(self, name): 221 '''Export predictor to file''' 222 try: 223 new_name = request.args.get('new_name') 224 request.native_interface.rename_model(name, new_name) 225 except Exception as e: 226 return str(e), 400 227 228 return f'Renamed model to {new_name}', 200 229 [end of mindsdb/api/http/namespaces/predictor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mindsdb/api/http/namespaces/predictor.py b/mindsdb/api/http/namespaces/predictor.py --- a/mindsdb/api/http/namespaces/predictor.py +++ b/mindsdb/api/http/namespaces/predictor.py @@ -182,12 +182,12 @@ def post(self, name): '''Queries predictor''' data = request.json - when = data.get('when', {}) + when = data.get('when') format_flag = data.get('format_flag', 'explain') kwargs = data.get('kwargs', {}) - if when is None: - return 'No data provided for the predictions', 500 + if isinstance(when, dict) is False or len(when) == 0: + return 'No data provided for the predictions', 400 results = request.native_interface.predict(name, format_flag, when_data=when, **kwargs)
{"golden_diff": "diff --git a/mindsdb/api/http/namespaces/predictor.py b/mindsdb/api/http/namespaces/predictor.py\n--- a/mindsdb/api/http/namespaces/predictor.py\n+++ b/mindsdb/api/http/namespaces/predictor.py\n@@ -182,12 +182,12 @@\n def post(self, name):\n '''Queries predictor'''\n data = request.json\n- when = data.get('when', {})\n+ when = data.get('when')\n format_flag = data.get('format_flag', 'explain')\n kwargs = data.get('kwargs', {})\n \n- if when is None:\n- return 'No data provided for the predictions', 500\n+ if isinstance(when, dict) is False or len(when) == 0:\n+ return 'No data provided for the predictions', 400\n \n results = request.native_interface.predict(name, format_flag, when_data=when, **kwargs)\n", "issue": "Reject predict(POST) API call if JSON payload is not properly wrapped by WHEN \n**Is your feature request related to a problem? Please describe.**\r\nThe MindsDB Predict API returns predictions even when the JSON payload is not properly following the specification.\r\n(https://apidocs.mindsdb.com/#acaf5684-c1bb-4df7-bae0-3a673ac1dd11) . \r\nFor instance, the payload should follow this rule but the API also accepts payload without 'when' \r\n```\r\n--data-raw '{\r\n\t\"when\": {\r\n\t\t\"number_of_rooms\": 2,\r\n\t\t\"sqft\": 1700\r\n\t}\r\n}'\r\n```\r\n```\r\n--data-raw '{\r\n\t\t\"number_of_rooms\": 2,\r\n\t\t\"sqft\": 1700\r\n}'\r\n```\r\nIn this case, the API cannot recognize input variables in the JSON request, thus return them as 'missing' in the response text. I am not sure if the prediction itself is not reliable in this case, but we might as well block such wrong API requests. \r\n\r\n**Describe the solution you'd like**\r\nReturn 'error' message if 'predict when' specification is not met \r\n\r\n\n", "before_files": [{"content": "import os\nimport time\n\nfrom dateutil.parser import parse as parse_datetime\nfrom flask import request\nfrom flask_restx import Resource, abort\nfrom flask import current_app as ca\n\nfrom mindsdb.utilities.log import log\nfrom mindsdb.api.http.utils import http_error\nfrom mindsdb.api.http.namespaces.configs.predictors import ns_conf\nfrom mindsdb.api.http.namespaces.entitites.predictor_metadata import (\n predictor_metadata,\n predictor_query_params,\n upload_predictor_params,\n put_predictor_params\n)\nfrom mindsdb.api.http.namespaces.entitites.predictor_status import predictor_status\n\n\n@ns_conf.route('/')\nclass PredictorList(Resource):\n @ns_conf.doc('list_predictors')\n @ns_conf.marshal_list_with(predictor_status, skip_none=True)\n def get(self):\n '''List all predictors'''\n return request.native_interface.get_models()\n\n\n@ns_conf.route('/custom/<name>')\n@ns_conf.param('name', 'The predictor identifier')\n@ns_conf.response(404, 'predictor not found')\nclass CustomPredictor(Resource):\n @ns_conf.doc('put_custom_predictor')\n def put(self, name):\n try:\n trained_status = request.json['trained_status']\n except Exception:\n trained_status = 'untrained'\n\n predictor_file = request.files['file']\n fpath = os.path.join(ca.config_obj.paths['tmp'], name + '.zip')\n with open(fpath, 'wb') as f:\n f.write(predictor_file.read())\n\n request.custom_models.load_model(fpath, name, trained_status)\n\n return f'Uploaded custom model {name}'\n\n\n@ns_conf.route('/<name>')\n@ns_conf.param('name', 'The predictor identifier')\n@ns_conf.response(404, 'predictor not found')\nclass Predictor(Resource):\n @ns_conf.doc('get_predictor')\n @ns_conf.marshal_with(predictor_metadata, skip_none=True)\n def get(self, name):\n try:\n model = request.native_interface.get_model_data(name, db_fix=False)\n except Exception as e:\n abort(404, \"\")\n\n for k in ['train_end_at', 'updated_at', 'created_at']:\n if k in model and model[k] is not None:\n model[k] = parse_datetime(model[k])\n\n return model\n\n @ns_conf.doc('delete_predictor')\n def delete(self, name):\n '''Remove predictor'''\n request.native_interface.delete_model(name)\n\n return '', 200\n\n @ns_conf.doc('put_predictor', params=put_predictor_params)\n def put(self, name):\n '''Learning new predictor'''\n data = request.json\n to_predict = data.get('to_predict')\n\n try:\n kwargs = data.get('kwargs')\n except Exception:\n kwargs = None\n\n if type(kwargs) != type({}):\n kwargs = {}\n\n if 'equal_accuracy_for_all_output_categories' not in kwargs:\n kwargs['equal_accuracy_for_all_output_categories'] = True\n\n if 'advanced_args' not in kwargs:\n kwargs['advanced_args'] = {}\n\n if 'use_selfaware_model' not in kwargs['advanced_args']:\n kwargs['advanced_args']['use_selfaware_model'] = False\n\n try:\n retrain = data.get('retrain')\n if retrain in ('true', 'True'):\n retrain = True\n else:\n retrain = False\n except Exception:\n retrain = None\n\n ds_name = data.get('data_source_name') if data.get('data_source_name') is not None else data.get('from_data')\n from_data = request.default_store.get_datasource_obj(ds_name, raw=True)\n\n if from_data is None:\n return {'message': f'Can not find datasource: {ds_name}'}, 400\n\n if retrain is True:\n original_name = name\n name = name + '_retrained'\n\n model_names = [x['name'] for x in request.native_interface.get_models()]\n if name in model_names:\n return http_error(\n 409,\n f\"Predictor '{name}' already exists\",\n f\"Predictor with name '{name}' already exists. Each predictor must have unique name.\"\n )\n\n request.native_interface.learn(name, from_data, to_predict, request.default_store.get_datasource(ds_name)['id'], kwargs=kwargs)\n for i in range(20):\n try:\n # Dirty hack, we should use a messaging queue between the predictor process and this bit of the code\n request.native_interface.get_model_data(name)\n break\n except Exception:\n time.sleep(1)\n\n if retrain is True:\n try:\n request.native_interface.delete_model(original_name)\n request.native_interface.rename_model(name, original_name)\n except Exception:\n pass\n\n return '', 200\n\n\n@ns_conf.route('/<name>/learn')\n@ns_conf.param('name', 'The predictor identifier')\nclass PredictorLearn(Resource):\n def post(self, name):\n data = request.json\n to_predict = data.get('to_predict')\n kwargs = data.get('kwargs', None)\n\n if not isinstance(kwargs, dict):\n kwargs = {}\n\n if 'advanced_args' not in kwargs:\n kwargs['advanced_args'] = {}\n\n ds_name = data.get('data_source_name') if data.get('data_source_name') is not None else data.get('from_data')\n from_data = request.default_store.get_datasource_obj(ds_name, raw=True)\n\n request.custom_models.learn(name, from_data, to_predict, request.default_store.get_datasource(ds_name)['id'], kwargs)\n\n return '', 200\n\n\n@ns_conf.route('/<name>/update')\n@ns_conf.param('name', 'Update predictor')\nclass PredictorPredict(Resource):\n @ns_conf.doc('Update predictor')\n def get(self, name):\n msg = request.native_interface.update_model(name)\n return {\n 'message': msg\n }\n\n\n@ns_conf.route('/<name>/predict')\n@ns_conf.param('name', 'The predictor identifier')\nclass PredictorPredict2(Resource):\n @ns_conf.doc('post_predictor_predict', params=predictor_query_params)\n def post(self, name):\n '''Queries predictor'''\n data = request.json\n when = data.get('when', {})\n format_flag = data.get('format_flag', 'explain')\n kwargs = data.get('kwargs', {})\n\n if when is None:\n return 'No data provided for the predictions', 500\n\n results = request.native_interface.predict(name, format_flag, when_data=when, **kwargs)\n\n return results\n\n\n@ns_conf.route('/<name>/predict_datasource')\n@ns_conf.param('name', 'The predictor identifier')\nclass PredictorPredictFromDataSource(Resource):\n @ns_conf.doc('post_predictor_predict', params=predictor_query_params)\n def post(self, name):\n data = request.json\n format_flag = data.get('format_flag', 'explain')\n kwargs = data.get('kwargs', {})\n\n use_raw = False\n\n from_data = request.default_store.get_datasource_obj(data.get('data_source_name'), raw=use_raw)\n if from_data is None:\n abort(400, 'No valid datasource given')\n\n results = request.native_interface.predict(name, format_flag, when_data=from_data, **kwargs)\n return results\n\n\n@ns_conf.route('/<name>/rename')\n@ns_conf.param('name', 'The predictor identifier')\nclass PredictorDownload(Resource):\n @ns_conf.doc('get_predictor_download')\n def get(self, name):\n '''Export predictor to file'''\n try:\n new_name = request.args.get('new_name')\n request.native_interface.rename_model(name, new_name)\n except Exception as e:\n return str(e), 400\n\n return f'Renamed model to {new_name}', 200\n", "path": "mindsdb/api/http/namespaces/predictor.py"}]}
3,102
215
gh_patches_debug_28517
rasdani/github-patches
git_diff
Parsl__parsl-686
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SSHChannel fails with host-based authentication Systems using host-based authentication (without a key or a password) fail with: ``` paramiko.ssh_exception.SSHException: No authentication methods available ``` Reported by @jmoon1506 </issue> <code> [start of parsl/channels/ssh/ssh.py] 1 import errno 2 import logging 3 import os 4 5 import paramiko 6 from parsl.channels.base import Channel 7 from parsl.channels.errors import * 8 from parsl.utils import RepresentationMixin 9 10 logger = logging.getLogger(__name__) 11 12 13 class SSHChannel(Channel, RepresentationMixin): 14 ''' SSH persistent channel. This enables remote execution on sites 15 accessible via ssh. It is assumed that the user has setup host keys 16 so as to ssh to the remote host. Which goes to say that the following 17 test on the commandline should work : 18 19 >>> ssh <username>@<hostname> 20 21 ''' 22 23 def __init__(self, hostname, username=None, password=None, script_dir=None, envs=None, **kwargs): 24 ''' Initialize a persistent connection to the remote system. 25 We should know at this point whether ssh connectivity is possible 26 27 Args: 28 - hostname (String) : Hostname 29 30 KWargs: 31 - username (string) : Username on remote system 32 - password (string) : Password for remote system 33 - script_dir (string) : Full path to a script dir where 34 generated scripts could be sent to. 35 - envs (dict) : A dictionary of environment variables to be set when executing commands 36 37 Raises: 38 ''' 39 40 self.hostname = hostname 41 self.username = username 42 self.password = password 43 self.kwargs = kwargs 44 self.script_dir = script_dir 45 46 self.ssh_client = paramiko.SSHClient() 47 self.ssh_client.load_system_host_keys() 48 self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 49 50 self.envs = {} 51 if envs is not None: 52 self.envs = envs 53 54 try: 55 self.ssh_client.connect( 56 hostname, 57 username=username, 58 password=password, 59 allow_agent=True 60 ) 61 t = self.ssh_client.get_transport() 62 self.sftp_client = paramiko.SFTPClient.from_transport(t) 63 64 except paramiko.BadHostKeyException as e: 65 raise BadHostKeyException(e, self.hostname) 66 67 except paramiko.AuthenticationException as e: 68 raise AuthException(e, self.hostname) 69 70 except paramiko.SSHException as e: 71 raise SSHException(e, self.hostname) 72 73 except Exception as e: 74 raise SSHException(e, self.hostname) 75 76 def prepend_envs(self, cmd, env={}): 77 env.update(self.envs) 78 79 if len(env.keys()) > 0: 80 env_vars = ' '.join(['{}={}'.format(key, value) for key, value in env.items()]) 81 return 'env {0} {1}'.format(env_vars, cmd) 82 return cmd 83 84 def execute_wait(self, cmd, walltime=2, envs={}): 85 ''' Synchronously execute a commandline string on the shell. 86 87 Args: 88 - cmd (string) : Commandline string to execute 89 - walltime (int) : walltime in seconds, this is not really used now. 90 91 Kwargs: 92 - envs (dict) : Dictionary of env variables 93 94 Returns: 95 - retcode : Return code from the execution, -1 on fail 96 - stdout : stdout string 97 - stderr : stderr string 98 99 Raises: 100 None. 101 ''' 102 103 # Execute the command 104 stdin, stdout, stderr = self.ssh_client.exec_command( 105 self.prepend_envs(cmd, envs), bufsize=-1, timeout=walltime 106 ) 107 # Block on exit status from the command 108 exit_status = stdout.channel.recv_exit_status() 109 return exit_status, stdout.read().decode("utf-8"), stderr.read().decode("utf-8") 110 111 def execute_no_wait(self, cmd, walltime=2, envs={}): 112 ''' Execute asynchronousely without waiting for exitcode 113 114 Args: 115 - cmd (string): Commandline string to be executed on the remote side 116 - walltime (int): timeout to exec_command 117 118 KWargs: 119 - envs (dict): A dictionary of env variables 120 121 Returns: 122 - None, stdout (readable stream), stderr (readable stream) 123 124 Raises: 125 - ChannelExecFailed (reason) 126 ''' 127 128 # Execute the command 129 stdin, stdout, stderr = self.ssh_client.exec_command( 130 self.prepend_envs(cmd, envs), bufsize=-1, timeout=walltime 131 ) 132 # Block on exit status from the command 133 return None, stdout, stderr 134 135 def push_file(self, local_source, remote_dir): 136 ''' Transport a local file to a directory on a remote machine 137 138 Args: 139 - local_source (string): Path 140 - remote_dir (string): Remote path 141 142 Returns: 143 - str: Path to copied file on remote machine 144 145 Raises: 146 - BadScriptPath : if script path on the remote side is bad 147 - BadPermsScriptPath : You do not have perms to make the channel script dir 148 - FileCopyException : FileCopy failed. 149 150 ''' 151 remote_dest = remote_dir + '/' + os.path.basename(local_source) 152 153 try: 154 self.makedirs(remote_dir, exist_ok=True) 155 except IOError as e: 156 logger.exception("Pushing {0} to {1} failed".format(local_source, remote_dir)) 157 if e.errno == 2: 158 raise BadScriptPath(e, self.hostname) 159 elif e.errno == 13: 160 raise BadPermsScriptPath(e, self.hostname) 161 else: 162 logger.exception("File push failed due to SFTP client failure") 163 raise FileCopyException(e, self.hostname) 164 try: 165 self.sftp_client.put(local_source, remote_dest, confirm=True) 166 # Set perm because some systems require the script to be executable 167 self.sftp_client.chmod(remote_dest, 0o777) 168 except Exception as e: 169 logger.exception("File push from local source {} to remote destination {} failed".format( 170 local_source, remote_dest)) 171 raise FileCopyException(e, self.hostname) 172 173 return remote_dest 174 175 def pull_file(self, remote_source, local_dir): 176 ''' Transport file on the remote side to a local directory 177 178 Args: 179 - remote_source (string): remote_source 180 - local_dir (string): Local directory to copy to 181 182 183 Returns: 184 - str: Local path to file 185 186 Raises: 187 - FileExists : Name collision at local directory. 188 - FileCopyException : FileCopy failed. 189 ''' 190 191 local_dest = local_dir + '/' + os.path.basename(remote_source) 192 193 try: 194 os.makedirs(local_dir) 195 except OSError as e: 196 if e.errno != errno.EEXIST: 197 logger.exception("Failed to create script_dir: {0}".format(script_dir)) 198 raise BadScriptPath(e, self.hostname) 199 200 # Easier to check this than to waste time trying to pull file and 201 # realize there's a problem. 202 if os.path.exists(local_dest): 203 logger.exception("Remote file copy will overwrite a local file:{0}".format(local_dest)) 204 raise FileExists(None, self.hostname, filename=local_dest) 205 206 try: 207 self.sftp_client.get(remote_source, local_dest) 208 except Exception as e: 209 logger.exception("File pull failed") 210 raise FileCopyException(e, self.hostname) 211 212 return local_dest 213 214 def close(self): 215 return self.ssh_client.close() 216 217 def isdir(self, path): 218 """Return true if the path refers to an existing directory. 219 220 Parameters 221 ---------- 222 path : str 223 Path of directory on the remote side to check. 224 """ 225 result = True 226 try: 227 self.sftp_client.lstat(path) 228 except FileNotFoundError: 229 result = False 230 231 return result 232 233 def makedirs(self, path, mode=511, exist_ok=False): 234 """Create a directory on the remote side. 235 236 If intermediate directories do not exist, they will be created. 237 238 Parameters 239 ---------- 240 path : str 241 Path of directory on the remote side to create. 242 mode : int 243 Permissions (posix-style) for the newly-created directory. 244 exist_ok : bool 245 If False, raise an OSError if the target directory already exists. 246 """ 247 if exist_ok is False and self.isdir(path): 248 raise OSError('Target directory {} already exists'.format(path)) 249 250 self.execute_wait('mkdir -p {}'.format(path)) 251 self.sftp_client.chmod(path, mode) 252 253 def abspath(self, path): 254 """Return the absolute path on the remote side. 255 256 Parameters 257 ---------- 258 path : str 259 Path for which the absolute path will be returned. 260 """ 261 return self.sftp_client.normalize(path) 262 263 @property 264 def script_dir(self): 265 return self._script_dir 266 267 @script_dir.setter 268 def script_dir(self, value): 269 self._script_dir = value 270 [end of parsl/channels/ssh/ssh.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/parsl/channels/ssh/ssh.py b/parsl/channels/ssh/ssh.py --- a/parsl/channels/ssh/ssh.py +++ b/parsl/channels/ssh/ssh.py @@ -10,6 +10,12 @@ logger = logging.getLogger(__name__) +class HostAuthSSHClient(paramiko.SSHClient): + def _auth(self, username, *args): + self._transport.auth_none(username) + return + + class SSHChannel(Channel, RepresentationMixin): ''' SSH persistent channel. This enables remote execution on sites accessible via ssh. It is assumed that the user has setup host keys @@ -20,7 +26,7 @@ ''' - def __init__(self, hostname, username=None, password=None, script_dir=None, envs=None, **kwargs): + def __init__(self, hostname, username=None, password=None, script_dir=None, envs=None, host_auth=False, **kwargs): ''' Initialize a persistent connection to the remote system. We should know at this point whether ssh connectivity is possible @@ -42,8 +48,12 @@ self.password = password self.kwargs = kwargs self.script_dir = script_dir + self.host_auth = host_auth - self.ssh_client = paramiko.SSHClient() + if host_auth: + self.ssh_client = HostAuthSSHClient() + else: + self.ssh_client = paramiko.SSHClient() self.ssh_client.load_system_host_keys() self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
{"golden_diff": "diff --git a/parsl/channels/ssh/ssh.py b/parsl/channels/ssh/ssh.py\n--- a/parsl/channels/ssh/ssh.py\n+++ b/parsl/channels/ssh/ssh.py\n@@ -10,6 +10,12 @@\n logger = logging.getLogger(__name__)\n \n \n+class HostAuthSSHClient(paramiko.SSHClient):\n+ def _auth(self, username, *args):\n+ self._transport.auth_none(username)\n+ return\n+\n+\n class SSHChannel(Channel, RepresentationMixin):\n ''' SSH persistent channel. This enables remote execution on sites\n accessible via ssh. It is assumed that the user has setup host keys\n@@ -20,7 +26,7 @@\n \n '''\n \n- def __init__(self, hostname, username=None, password=None, script_dir=None, envs=None, **kwargs):\n+ def __init__(self, hostname, username=None, password=None, script_dir=None, envs=None, host_auth=False, **kwargs):\n ''' Initialize a persistent connection to the remote system.\n We should know at this point whether ssh connectivity is possible\n \n@@ -42,8 +48,12 @@\n self.password = password\n self.kwargs = kwargs\n self.script_dir = script_dir\n+ self.host_auth = host_auth\n \n- self.ssh_client = paramiko.SSHClient()\n+ if host_auth:\n+ self.ssh_client = HostAuthSSHClient()\n+ else:\n+ self.ssh_client = paramiko.SSHClient()\n self.ssh_client.load_system_host_keys()\n self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n", "issue": "SSHChannel fails with host-based authentication\nSystems using host-based authentication (without a key or a password) fail with:\r\n```\r\nparamiko.ssh_exception.SSHException: No authentication methods available\r\n```\r\n\r\nReported by @jmoon1506\n", "before_files": [{"content": "import errno\nimport logging\nimport os\n\nimport paramiko\nfrom parsl.channels.base import Channel\nfrom parsl.channels.errors import *\nfrom parsl.utils import RepresentationMixin\n\nlogger = logging.getLogger(__name__)\n\n\nclass SSHChannel(Channel, RepresentationMixin):\n ''' SSH persistent channel. This enables remote execution on sites\n accessible via ssh. It is assumed that the user has setup host keys\n so as to ssh to the remote host. Which goes to say that the following\n test on the commandline should work :\n\n >>> ssh <username>@<hostname>\n\n '''\n\n def __init__(self, hostname, username=None, password=None, script_dir=None, envs=None, **kwargs):\n ''' Initialize a persistent connection to the remote system.\n We should know at this point whether ssh connectivity is possible\n\n Args:\n - hostname (String) : Hostname\n\n KWargs:\n - username (string) : Username on remote system\n - password (string) : Password for remote system\n - script_dir (string) : Full path to a script dir where\n generated scripts could be sent to.\n - envs (dict) : A dictionary of environment variables to be set when executing commands\n\n Raises:\n '''\n\n self.hostname = hostname\n self.username = username\n self.password = password\n self.kwargs = kwargs\n self.script_dir = script_dir\n\n self.ssh_client = paramiko.SSHClient()\n self.ssh_client.load_system_host_keys()\n self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n self.envs = {}\n if envs is not None:\n self.envs = envs\n\n try:\n self.ssh_client.connect(\n hostname,\n username=username,\n password=password,\n allow_agent=True\n )\n t = self.ssh_client.get_transport()\n self.sftp_client = paramiko.SFTPClient.from_transport(t)\n\n except paramiko.BadHostKeyException as e:\n raise BadHostKeyException(e, self.hostname)\n\n except paramiko.AuthenticationException as e:\n raise AuthException(e, self.hostname)\n\n except paramiko.SSHException as e:\n raise SSHException(e, self.hostname)\n\n except Exception as e:\n raise SSHException(e, self.hostname)\n\n def prepend_envs(self, cmd, env={}):\n env.update(self.envs)\n\n if len(env.keys()) > 0:\n env_vars = ' '.join(['{}={}'.format(key, value) for key, value in env.items()])\n return 'env {0} {1}'.format(env_vars, cmd)\n return cmd\n\n def execute_wait(self, cmd, walltime=2, envs={}):\n ''' Synchronously execute a commandline string on the shell.\n\n Args:\n - cmd (string) : Commandline string to execute\n - walltime (int) : walltime in seconds, this is not really used now.\n\n Kwargs:\n - envs (dict) : Dictionary of env variables\n\n Returns:\n - retcode : Return code from the execution, -1 on fail\n - stdout : stdout string\n - stderr : stderr string\n\n Raises:\n None.\n '''\n\n # Execute the command\n stdin, stdout, stderr = self.ssh_client.exec_command(\n self.prepend_envs(cmd, envs), bufsize=-1, timeout=walltime\n )\n # Block on exit status from the command\n exit_status = stdout.channel.recv_exit_status()\n return exit_status, stdout.read().decode(\"utf-8\"), stderr.read().decode(\"utf-8\")\n\n def execute_no_wait(self, cmd, walltime=2, envs={}):\n ''' Execute asynchronousely without waiting for exitcode\n\n Args:\n - cmd (string): Commandline string to be executed on the remote side\n - walltime (int): timeout to exec_command\n\n KWargs:\n - envs (dict): A dictionary of env variables\n\n Returns:\n - None, stdout (readable stream), stderr (readable stream)\n\n Raises:\n - ChannelExecFailed (reason)\n '''\n\n # Execute the command\n stdin, stdout, stderr = self.ssh_client.exec_command(\n self.prepend_envs(cmd, envs), bufsize=-1, timeout=walltime\n )\n # Block on exit status from the command\n return None, stdout, stderr\n\n def push_file(self, local_source, remote_dir):\n ''' Transport a local file to a directory on a remote machine\n\n Args:\n - local_source (string): Path\n - remote_dir (string): Remote path\n\n Returns:\n - str: Path to copied file on remote machine\n\n Raises:\n - BadScriptPath : if script path on the remote side is bad\n - BadPermsScriptPath : You do not have perms to make the channel script dir\n - FileCopyException : FileCopy failed.\n\n '''\n remote_dest = remote_dir + '/' + os.path.basename(local_source)\n\n try:\n self.makedirs(remote_dir, exist_ok=True)\n except IOError as e:\n logger.exception(\"Pushing {0} to {1} failed\".format(local_source, remote_dir))\n if e.errno == 2:\n raise BadScriptPath(e, self.hostname)\n elif e.errno == 13:\n raise BadPermsScriptPath(e, self.hostname)\n else:\n logger.exception(\"File push failed due to SFTP client failure\")\n raise FileCopyException(e, self.hostname)\n try:\n self.sftp_client.put(local_source, remote_dest, confirm=True)\n # Set perm because some systems require the script to be executable\n self.sftp_client.chmod(remote_dest, 0o777)\n except Exception as e:\n logger.exception(\"File push from local source {} to remote destination {} failed\".format(\n local_source, remote_dest))\n raise FileCopyException(e, self.hostname)\n\n return remote_dest\n\n def pull_file(self, remote_source, local_dir):\n ''' Transport file on the remote side to a local directory\n\n Args:\n - remote_source (string): remote_source\n - local_dir (string): Local directory to copy to\n\n\n Returns:\n - str: Local path to file\n\n Raises:\n - FileExists : Name collision at local directory.\n - FileCopyException : FileCopy failed.\n '''\n\n local_dest = local_dir + '/' + os.path.basename(remote_source)\n\n try:\n os.makedirs(local_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n logger.exception(\"Failed to create script_dir: {0}\".format(script_dir))\n raise BadScriptPath(e, self.hostname)\n\n # Easier to check this than to waste time trying to pull file and\n # realize there's a problem.\n if os.path.exists(local_dest):\n logger.exception(\"Remote file copy will overwrite a local file:{0}\".format(local_dest))\n raise FileExists(None, self.hostname, filename=local_dest)\n\n try:\n self.sftp_client.get(remote_source, local_dest)\n except Exception as e:\n logger.exception(\"File pull failed\")\n raise FileCopyException(e, self.hostname)\n\n return local_dest\n\n def close(self):\n return self.ssh_client.close()\n\n def isdir(self, path):\n \"\"\"Return true if the path refers to an existing directory.\n\n Parameters\n ----------\n path : str\n Path of directory on the remote side to check.\n \"\"\"\n result = True\n try:\n self.sftp_client.lstat(path)\n except FileNotFoundError:\n result = False\n\n return result\n\n def makedirs(self, path, mode=511, exist_ok=False):\n \"\"\"Create a directory on the remote side.\n\n If intermediate directories do not exist, they will be created.\n\n Parameters\n ----------\n path : str\n Path of directory on the remote side to create.\n mode : int\n Permissions (posix-style) for the newly-created directory.\n exist_ok : bool\n If False, raise an OSError if the target directory already exists.\n \"\"\"\n if exist_ok is False and self.isdir(path):\n raise OSError('Target directory {} already exists'.format(path))\n\n self.execute_wait('mkdir -p {}'.format(path))\n self.sftp_client.chmod(path, mode)\n\n def abspath(self, path):\n \"\"\"Return the absolute path on the remote side.\n\n Parameters\n ----------\n path : str\n Path for which the absolute path will be returned.\n \"\"\"\n return self.sftp_client.normalize(path)\n\n @property\n def script_dir(self):\n return self._script_dir\n\n @script_dir.setter\n def script_dir(self, value):\n self._script_dir = value\n", "path": "parsl/channels/ssh/ssh.py"}]}
3,212
363
gh_patches_debug_4332
rasdani/github-patches
git_diff
bids-standard__pybids-517
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> replace_entities() modifies entities I guess it is by design, but replace_entities() modifies the input entities as it goes. I find any function that modifies the input values surprising, but it also means that previous path_patterns can affect the entities as they are iterated. I think the function should return a new entities with the correct entities if this is useful. However, this failing, the function definitely shouldn't modify entities unless it actually returns something other than None. </issue> <code> [start of bids/layout/writing.py] 1 ''' 2 Contains helper functions that involve writing operations. 3 ''' 4 5 import warnings 6 import os 7 import re 8 import sys 9 from ..utils import splitext, listify 10 from os.path import join, dirname, exists, islink, isabs, isdir 11 12 13 __all__ = ['replace_entities', 'build_path', 'write_contents_to_file'] 14 15 16 def replace_entities(entities, pattern): 17 """ 18 Replaces all entity names in a given pattern with the corresponding 19 values provided by entities. 20 21 Args: 22 entities (dict): A dictionary mapping entity names to entity values. 23 pattern (str): A path pattern that contains entity names denoted 24 by curly braces. Optional portions denoted by square braces. 25 For example: 'sub-{subject}/[var-{name}/]{id}.csv' 26 Accepted entity values, using regex matching, denoted within angle 27 brackets. 28 For example: 'sub-{subject<01|02>}/{task}.csv' 29 30 Returns: 31 A new string with the entity values inserted where entity names 32 were denoted in the provided pattern. 33 """ 34 ents = re.findall(r'\{(.*?)\}', pattern) 35 new_path = pattern 36 for ent in ents: 37 match = re.search(r'([^|<]+)(<.*?>)?(\|.*)?', ent) 38 if match is None: 39 return None 40 name, valid, default = match.groups() 41 default = default[1:] if default is not None else default 42 43 if name in entities and valid is not None: 44 ent_val = str(entities[name]) 45 if not re.match(valid[1:-1], ent_val): 46 if default is None: 47 return None 48 entities[name] = default 49 50 ent_val = entities.get(name, default) 51 if ent_val is None: 52 return None 53 new_path = new_path.replace('{%s}' % ent, str(ent_val)) 54 55 return new_path 56 57 58 def build_path(entities, path_patterns, strict=False): 59 """ 60 Constructs a path given a set of entities and a list of potential 61 filename patterns to use. 62 63 Args: 64 entities (dict): A dictionary mapping entity names to entity values. 65 path_patterns (str, list): One or more filename patterns to write 66 the file to. Entities should be represented by the name 67 surrounded by curly braces. Optional portions of the patterns 68 should be denoted by square brackets. Entities that require a 69 specific value for the pattern to match can pass them inside 70 carets. Default values can be assigned by specifying a string after 71 the pipe operator. E.g., (e.g., {type<image>|bold} would only match 72 the pattern if the entity 'type' was passed and its value is 73 "image", otherwise the default value "bold" will be used). 74 Example 1: 'sub-{subject}/[var-{name}/]{id}.csv' 75 Result 2: 'sub-01/var-SES/1045.csv' 76 strict (bool): If True, all passed entities must be matched inside a 77 pattern in order to be a valid match. If False, extra entities will 78 be ignored so long as all mandatory entities are found. 79 80 Returns: 81 A constructed path for this file based on the provided patterns. 82 """ 83 path_patterns = listify(path_patterns) 84 85 # Loop over available patherns, return first one that matches all 86 for pattern in path_patterns: 87 # If strict, all entities must be contained in the pattern 88 if strict: 89 defined = re.findall(r'\{(.*?)(?:<[^>]+>)?\}', pattern) 90 if set(entities.keys()) - set(defined): 91 continue 92 # Iterate through the provided path patterns 93 new_path = pattern 94 optional_patterns = re.findall(r'\[(.*?)\]', pattern) 95 # First build from optional patterns if possible 96 for optional_pattern in optional_patterns: 97 optional_chunk = replace_entities(entities, optional_pattern) or '' 98 new_path = new_path.replace('[%s]' % optional_pattern, 99 optional_chunk) 100 # Replace remaining entities 101 new_path = replace_entities(entities, new_path) 102 103 if new_path: 104 return new_path 105 106 return None 107 108 109 def write_contents_to_file(path, contents=None, link_to=None, 110 content_mode='text', root=None, conflicts='fail'): 111 """ 112 Uses provided filename patterns to write contents to a new path, given 113 a corresponding entity map. 114 115 Args: 116 path (str): Destination path of the desired contents. 117 contents (str): Raw text or binary encoded string of contents to write 118 to the new path. 119 link_to (str): Optional path with which to create a symbolic link to. 120 Used as an alternative to and takes priority over the contents 121 argument. 122 content_mode (str): Either 'text' or 'binary' to indicate the writing 123 mode for the new file. Only relevant if contents is provided. 124 root (str): Optional root directory that all patterns are relative 125 to. Defaults to current working directory. 126 conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append' 127 that defines the desired action when the output path already 128 exists. 'fail' raises an exception; 'skip' does nothing; 129 'overwrite' overwrites the existing file; 'append' adds a suffix 130 to each file copy, starting with 1. Default is 'fail'. 131 """ 132 133 if root is None and not isabs(path): 134 root = os.getcwd() 135 136 if root: 137 path = join(root, path) 138 139 if exists(path) or islink(path): 140 if conflicts == 'fail': 141 msg = 'A file at path {} already exists.' 142 raise ValueError(msg.format(path)) 143 elif conflicts == 'skip': 144 msg = 'A file at path {} already exists, skipping writing file.' 145 warnings.warn(msg.format(path)) 146 return 147 elif conflicts == 'overwrite': 148 if isdir(path): 149 warnings.warn('New path is a directory, not going to ' 150 'overwrite it, skipping instead.') 151 return 152 os.remove(path) 153 elif conflicts == 'append': 154 i = 1 155 while i < sys.maxsize: 156 path_splits = splitext(path) 157 path_splits[0] = path_splits[0] + '_%d' % i 158 appended_filename = os.extsep.join(path_splits) 159 if not exists(appended_filename) and \ 160 not islink(appended_filename): 161 path = appended_filename 162 break 163 i += 1 164 else: 165 raise ValueError('Did not provide a valid conflicts parameter') 166 167 if not exists(dirname(path)): 168 os.makedirs(dirname(path)) 169 170 if link_to: 171 os.symlink(link_to, path) 172 elif contents: 173 mode = 'wb' if content_mode == 'binary' else 'w' 174 with open(path, mode) as f: 175 f.write(contents) 176 else: 177 raise ValueError('One of contents or link_to must be provided.') 178 [end of bids/layout/writing.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bids/layout/writing.py b/bids/layout/writing.py --- a/bids/layout/writing.py +++ b/bids/layout/writing.py @@ -31,6 +31,7 @@ A new string with the entity values inserted where entity names were denoted in the provided pattern. """ + entities = entities.copy() # make a local copy, since dicts are mutable ents = re.findall(r'\{(.*?)\}', pattern) new_path = pattern for ent in ents:
{"golden_diff": "diff --git a/bids/layout/writing.py b/bids/layout/writing.py\n--- a/bids/layout/writing.py\n+++ b/bids/layout/writing.py\n@@ -31,6 +31,7 @@\n A new string with the entity values inserted where entity names\n were denoted in the provided pattern.\n \"\"\"\n+ entities = entities.copy() # make a local copy, since dicts are mutable\n ents = re.findall(r'\\{(.*?)\\}', pattern)\n new_path = pattern\n for ent in ents:\n", "issue": "replace_entities() modifies entities\nI guess it is by design, but replace_entities() modifies the input entities as it goes. I find any function that modifies the input values surprising, but it also means that previous path_patterns can affect the entities as they are iterated.\r\n\r\nI think the function should return a new entities with the correct entities if this is useful. However, this failing, the function definitely shouldn't modify entities unless it actually returns something other than None.\n", "before_files": [{"content": "'''\nContains helper functions that involve writing operations.\n'''\n\nimport warnings\nimport os\nimport re\nimport sys\nfrom ..utils import splitext, listify\nfrom os.path import join, dirname, exists, islink, isabs, isdir\n\n\n__all__ = ['replace_entities', 'build_path', 'write_contents_to_file']\n\n\ndef replace_entities(entities, pattern):\n \"\"\"\n Replaces all entity names in a given pattern with the corresponding\n values provided by entities.\n\n Args:\n entities (dict): A dictionary mapping entity names to entity values.\n pattern (str): A path pattern that contains entity names denoted\n by curly braces. Optional portions denoted by square braces.\n For example: 'sub-{subject}/[var-{name}/]{id}.csv'\n Accepted entity values, using regex matching, denoted within angle\n brackets.\n For example: 'sub-{subject<01|02>}/{task}.csv'\n\n Returns:\n A new string with the entity values inserted where entity names\n were denoted in the provided pattern.\n \"\"\"\n ents = re.findall(r'\\{(.*?)\\}', pattern)\n new_path = pattern\n for ent in ents:\n match = re.search(r'([^|<]+)(<.*?>)?(\\|.*)?', ent)\n if match is None:\n return None\n name, valid, default = match.groups()\n default = default[1:] if default is not None else default\n\n if name in entities and valid is not None:\n ent_val = str(entities[name])\n if not re.match(valid[1:-1], ent_val):\n if default is None:\n return None\n entities[name] = default\n\n ent_val = entities.get(name, default)\n if ent_val is None:\n return None\n new_path = new_path.replace('{%s}' % ent, str(ent_val))\n\n return new_path\n\n\ndef build_path(entities, path_patterns, strict=False):\n \"\"\"\n Constructs a path given a set of entities and a list of potential\n filename patterns to use.\n\n Args:\n entities (dict): A dictionary mapping entity names to entity values.\n path_patterns (str, list): One or more filename patterns to write\n the file to. Entities should be represented by the name\n surrounded by curly braces. Optional portions of the patterns\n should be denoted by square brackets. Entities that require a\n specific value for the pattern to match can pass them inside\n carets. Default values can be assigned by specifying a string after\n the pipe operator. E.g., (e.g., {type<image>|bold} would only match\n the pattern if the entity 'type' was passed and its value is\n \"image\", otherwise the default value \"bold\" will be used).\n Example 1: 'sub-{subject}/[var-{name}/]{id}.csv'\n Result 2: 'sub-01/var-SES/1045.csv'\n strict (bool): If True, all passed entities must be matched inside a\n pattern in order to be a valid match. If False, extra entities will\n be ignored so long as all mandatory entities are found.\n\n Returns:\n A constructed path for this file based on the provided patterns.\n \"\"\"\n path_patterns = listify(path_patterns)\n\n # Loop over available patherns, return first one that matches all\n for pattern in path_patterns:\n # If strict, all entities must be contained in the pattern\n if strict:\n defined = re.findall(r'\\{(.*?)(?:<[^>]+>)?\\}', pattern)\n if set(entities.keys()) - set(defined):\n continue\n # Iterate through the provided path patterns\n new_path = pattern\n optional_patterns = re.findall(r'\\[(.*?)\\]', pattern)\n # First build from optional patterns if possible\n for optional_pattern in optional_patterns:\n optional_chunk = replace_entities(entities, optional_pattern) or ''\n new_path = new_path.replace('[%s]' % optional_pattern,\n optional_chunk)\n # Replace remaining entities\n new_path = replace_entities(entities, new_path)\n\n if new_path:\n return new_path\n\n return None\n\n\ndef write_contents_to_file(path, contents=None, link_to=None,\n content_mode='text', root=None, conflicts='fail'):\n \"\"\"\n Uses provided filename patterns to write contents to a new path, given\n a corresponding entity map.\n\n Args:\n path (str): Destination path of the desired contents.\n contents (str): Raw text or binary encoded string of contents to write\n to the new path.\n link_to (str): Optional path with which to create a symbolic link to.\n Used as an alternative to and takes priority over the contents\n argument.\n content_mode (str): Either 'text' or 'binary' to indicate the writing\n mode for the new file. Only relevant if contents is provided.\n root (str): Optional root directory that all patterns are relative\n to. Defaults to current working directory.\n conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'\n that defines the desired action when the output path already\n exists. 'fail' raises an exception; 'skip' does nothing;\n 'overwrite' overwrites the existing file; 'append' adds a suffix\n to each file copy, starting with 1. Default is 'fail'.\n \"\"\"\n\n if root is None and not isabs(path):\n root = os.getcwd()\n\n if root:\n path = join(root, path)\n\n if exists(path) or islink(path):\n if conflicts == 'fail':\n msg = 'A file at path {} already exists.'\n raise ValueError(msg.format(path))\n elif conflicts == 'skip':\n msg = 'A file at path {} already exists, skipping writing file.'\n warnings.warn(msg.format(path))\n return\n elif conflicts == 'overwrite':\n if isdir(path):\n warnings.warn('New path is a directory, not going to '\n 'overwrite it, skipping instead.')\n return\n os.remove(path)\n elif conflicts == 'append':\n i = 1\n while i < sys.maxsize:\n path_splits = splitext(path)\n path_splits[0] = path_splits[0] + '_%d' % i\n appended_filename = os.extsep.join(path_splits)\n if not exists(appended_filename) and \\\n not islink(appended_filename):\n path = appended_filename\n break\n i += 1\n else:\n raise ValueError('Did not provide a valid conflicts parameter')\n\n if not exists(dirname(path)):\n os.makedirs(dirname(path))\n\n if link_to:\n os.symlink(link_to, path)\n elif contents:\n mode = 'wb' if content_mode == 'binary' else 'w'\n with open(path, mode) as f:\n f.write(contents)\n else:\n raise ValueError('One of contents or link_to must be provided.')\n", "path": "bids/layout/writing.py"}]}
2,557
116
gh_patches_debug_17236
rasdani/github-patches
git_diff
pyca__cryptography-3638
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update release automation for new wheel builder Once #3636 is merged we need to update the release automation to trigger the new wheel builder and download the artifacts. </issue> <code> [start of release.py] 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 from __future__ import absolute_import, division, print_function 6 7 import getpass 8 import io 9 import os 10 import subprocess 11 import time 12 13 import click 14 15 from clint.textui.progress import Bar as ProgressBar 16 17 import requests 18 19 20 JENKINS_URL = "https://jenkins.cryptography.io/job/cryptography-wheel-builder" 21 22 23 def run(*args, **kwargs): 24 kwargs.setdefault("stderr", subprocess.STDOUT) 25 subprocess.check_output(list(args), **kwargs) 26 27 28 def wait_for_build_completed(session): 29 # Wait 20 seconds before actually checking if the build is complete, to 30 # ensure that it had time to really start. 31 time.sleep(20) 32 while True: 33 response = session.get( 34 "{0}/lastBuild/api/json/".format(JENKINS_URL), 35 headers={ 36 "Accept": "application/json", 37 } 38 ) 39 response.raise_for_status() 40 if not response.json()["building"]: 41 assert response.json()["result"] == "SUCCESS" 42 break 43 time.sleep(0.1) 44 45 46 def download_artifacts(session): 47 response = session.get( 48 "{0}/lastBuild/api/json/".format(JENKINS_URL), 49 headers={ 50 "Accept": "application/json" 51 } 52 ) 53 response.raise_for_status() 54 assert not response.json()["building"] 55 assert response.json()["result"] == "SUCCESS" 56 57 paths = [] 58 59 last_build_number = response.json()["number"] 60 for run in response.json()["runs"]: 61 if run["number"] != last_build_number: 62 print( 63 "Skipping {0} as it is not from the latest build ({1})".format( 64 run["url"], last_build_number 65 ) 66 ) 67 continue 68 69 response = session.get( 70 run["url"] + "api/json/", 71 headers={ 72 "Accept": "application/json", 73 } 74 ) 75 response.raise_for_status() 76 for artifact in response.json()["artifacts"]: 77 response = session.get( 78 "{0}artifact/{1}".format(run["url"], artifact["relativePath"]), 79 stream=True 80 ) 81 assert response.headers["content-length"] 82 print("Downloading {0}".format(artifact["fileName"])) 83 bar = ProgressBar( 84 expected_size=int(response.headers["content-length"]), 85 filled_char="=" 86 ) 87 content = io.BytesIO() 88 for data in response.iter_content(chunk_size=8192): 89 content.write(data) 90 bar.show(content.tell()) 91 assert bar.expected_size == content.tell() 92 bar.done() 93 out_path = os.path.join( 94 os.path.dirname(__file__), 95 "dist", 96 artifact["fileName"], 97 ) 98 with open(out_path, "wb") as f: 99 f.write(content.getvalue()) 100 paths.append(out_path) 101 return paths 102 103 104 @click.command() 105 @click.argument("version") 106 def release(version): 107 """ 108 ``version`` should be a string like '0.4' or '1.0'. 109 """ 110 run("git", "tag", "-s", version, "-m", "{0} release".format(version)) 111 run("git", "push", "--tags") 112 113 run("python", "setup.py", "sdist") 114 run("python", "setup.py", "sdist", "bdist_wheel", cwd="vectors/") 115 116 run( 117 "twine", "upload", "-s", "dist/cryptography-{0}*".format(version), 118 "vectors/dist/cryptography_vectors-{0}*".format(version), shell=True 119 ) 120 121 session = requests.Session() 122 123 # This tells the CDN to delete the cached response for the URL. We do this 124 # so that the Jenkins builders will see the new sdist immediately when they 125 # go to build the wheels. 126 response = session.request( 127 "PURGE", "https://pypi.python.org/simple/cryptography/" 128 ) 129 response.raise_for_status() 130 131 username = getpass.getpass("Input the GitHub/Jenkins username: ") 132 token = getpass.getpass("Input the Jenkins token: ") 133 response = session.post( 134 "{0}/build".format(JENKINS_URL), 135 auth=requests.auth.HTTPBasicAuth( 136 username, token 137 ), 138 params={ 139 "cause": "Building wheels for {0}".format(version) 140 } 141 ) 142 response.raise_for_status() 143 wait_for_build_completed(session) 144 paths = download_artifacts(session) 145 run("twine", "upload", " ".join(paths)) 146 147 148 if __name__ == "__main__": 149 release() 150 [end of release.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/release.py b/release.py --- a/release.py +++ b/release.py @@ -17,7 +17,10 @@ import requests -JENKINS_URL = "https://jenkins.cryptography.io/job/cryptography-wheel-builder" +JENKINS_URL = ( + "https://ci.cryptography.io/job/cryptography-support-jobs/" + "job/wheel-builder" +) def run(*args, **kwargs): @@ -128,14 +131,11 @@ ) response.raise_for_status() - username = getpass.getpass("Input the GitHub/Jenkins username: ") token = getpass.getpass("Input the Jenkins token: ") - response = session.post( + response = session.get( "{0}/build".format(JENKINS_URL), - auth=requests.auth.HTTPBasicAuth( - username, token - ), params={ + "token": token, "cause": "Building wheels for {0}".format(version) } )
{"golden_diff": "diff --git a/release.py b/release.py\n--- a/release.py\n+++ b/release.py\n@@ -17,7 +17,10 @@\n import requests\n \n \n-JENKINS_URL = \"https://jenkins.cryptography.io/job/cryptography-wheel-builder\"\n+JENKINS_URL = (\n+ \"https://ci.cryptography.io/job/cryptography-support-jobs/\"\n+ \"job/wheel-builder\"\n+)\n \n \n def run(*args, **kwargs):\n@@ -128,14 +131,11 @@\n )\n response.raise_for_status()\n \n- username = getpass.getpass(\"Input the GitHub/Jenkins username: \")\n token = getpass.getpass(\"Input the Jenkins token: \")\n- response = session.post(\n+ response = session.get(\n \"{0}/build\".format(JENKINS_URL),\n- auth=requests.auth.HTTPBasicAuth(\n- username, token\n- ),\n params={\n+ \"token\": token,\n \"cause\": \"Building wheels for {0}\".format(version)\n }\n )\n", "issue": "Update release automation for new wheel builder\nOnce #3636 is merged we need to update the release automation to trigger the new wheel builder and download the artifacts.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport getpass\nimport io\nimport os\nimport subprocess\nimport time\n\nimport click\n\nfrom clint.textui.progress import Bar as ProgressBar\n\nimport requests\n\n\nJENKINS_URL = \"https://jenkins.cryptography.io/job/cryptography-wheel-builder\"\n\n\ndef run(*args, **kwargs):\n kwargs.setdefault(\"stderr\", subprocess.STDOUT)\n subprocess.check_output(list(args), **kwargs)\n\n\ndef wait_for_build_completed(session):\n # Wait 20 seconds before actually checking if the build is complete, to\n # ensure that it had time to really start.\n time.sleep(20)\n while True:\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n if not response.json()[\"building\"]:\n assert response.json()[\"result\"] == \"SUCCESS\"\n break\n time.sleep(0.1)\n\n\ndef download_artifacts(session):\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\"\n }\n )\n response.raise_for_status()\n assert not response.json()[\"building\"]\n assert response.json()[\"result\"] == \"SUCCESS\"\n\n paths = []\n\n last_build_number = response.json()[\"number\"]\n for run in response.json()[\"runs\"]:\n if run[\"number\"] != last_build_number:\n print(\n \"Skipping {0} as it is not from the latest build ({1})\".format(\n run[\"url\"], last_build_number\n )\n )\n continue\n\n response = session.get(\n run[\"url\"] + \"api/json/\",\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n \"{0}artifact/{1}\".format(run[\"url\"], artifact[\"relativePath\"]),\n stream=True\n )\n assert response.headers[\"content-length\"]\n print(\"Downloading {0}\".format(artifact[\"fileName\"]))\n bar = ProgressBar(\n expected_size=int(response.headers[\"content-length\"]),\n filled_char=\"=\"\n )\n content = io.BytesIO()\n for data in response.iter_content(chunk_size=8192):\n content.write(data)\n bar.show(content.tell())\n assert bar.expected_size == content.tell()\n bar.done()\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n artifact[\"fileName\"],\n )\n with open(out_path, \"wb\") as f:\n f.write(content.getvalue())\n paths.append(out_path)\n return paths\n\n\[email protected]()\[email protected](\"version\")\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n run(\"git\", \"tag\", \"-s\", version, \"-m\", \"{0} release\".format(version))\n run(\"git\", \"push\", \"--tags\")\n\n run(\"python\", \"setup.py\", \"sdist\")\n run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", cwd=\"vectors/\")\n\n run(\n \"twine\", \"upload\", \"-s\", \"dist/cryptography-{0}*\".format(version),\n \"vectors/dist/cryptography_vectors-{0}*\".format(version), shell=True\n )\n\n session = requests.Session()\n\n # This tells the CDN to delete the cached response for the URL. We do this\n # so that the Jenkins builders will see the new sdist immediately when they\n # go to build the wheels.\n response = session.request(\n \"PURGE\", \"https://pypi.python.org/simple/cryptography/\"\n )\n response.raise_for_status()\n\n username = getpass.getpass(\"Input the GitHub/Jenkins username: \")\n token = getpass.getpass(\"Input the Jenkins token: \")\n response = session.post(\n \"{0}/build\".format(JENKINS_URL),\n auth=requests.auth.HTTPBasicAuth(\n username, token\n ),\n params={\n \"cause\": \"Building wheels for {0}\".format(version)\n }\n )\n response.raise_for_status()\n wait_for_build_completed(session)\n paths = download_artifacts(session)\n run(\"twine\", \"upload\", \" \".join(paths))\n\n\nif __name__ == \"__main__\":\n release()\n", "path": "release.py"}]}
1,918
230
gh_patches_debug_24985
rasdani/github-patches
git_diff
comic__grand-challenge.org-2348
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Archive Serializers use `id` rather than `pk` Some of our serializers use `id` rather than `pk`, for consistency we should only use one and that should be `pk`. Check the other serializers and see if this occurs elsewhere. </issue> <code> [start of app/grandchallenge/archives/serializers.py] 1 from django.db.transaction import on_commit 2 from guardian.shortcuts import get_objects_for_user 3 from rest_framework import serializers 4 from rest_framework.fields import ReadOnlyField, URLField 5 from rest_framework.relations import HyperlinkedRelatedField 6 7 from grandchallenge.archives.models import Archive, ArchiveItem 8 from grandchallenge.archives.tasks import ( 9 start_archive_item_update_tasks, 10 update_archive_item_update_kwargs, 11 ) 12 from grandchallenge.components.serializers import ( 13 ComponentInterfaceValuePostSerializer, 14 ComponentInterfaceValueSerializer, 15 ) 16 from grandchallenge.hanging_protocols.serializers import ( 17 HangingProtocolSerializer, 18 ) 19 20 21 class ArchiveItemSerializer(serializers.ModelSerializer): 22 archive = HyperlinkedRelatedField( 23 read_only=True, view_name="api:archive-detail" 24 ) 25 values = ComponentInterfaceValueSerializer(many=True) 26 27 class Meta: 28 model = ArchiveItem 29 fields = ("id", "archive", "values") 30 31 32 class ArchiveSerializer(serializers.ModelSerializer): 33 algorithms = HyperlinkedRelatedField( 34 read_only=True, many=True, view_name="api:algorithm-detail" 35 ) 36 logo = URLField(source="logo.x20.url", read_only=True) 37 url = URLField(source="get_absolute_url", read_only=True) 38 # Include the read only name for legacy clients 39 name = ReadOnlyField() 40 hanging_protocol = HangingProtocolSerializer() 41 42 class Meta: 43 model = Archive 44 fields = ( 45 "id", 46 "name", 47 "title", 48 "algorithms", 49 "logo", 50 "description", 51 "api_url", 52 "url", 53 "hanging_protocol", 54 "view_content", 55 ) 56 57 58 class ArchiveItemPostSerializer(ArchiveItemSerializer): 59 archive = HyperlinkedRelatedField( 60 queryset=Archive.objects.none(), 61 view_name="api:archive-detail", 62 write_only=True, 63 ) 64 65 def __init__(self, *args, **kwargs): 66 super().__init__(*args, **kwargs) 67 self.fields["values"] = ComponentInterfaceValuePostSerializer( 68 many=True, context=self.context 69 ) 70 71 if "request" in self.context: 72 user = self.context["request"].user 73 74 self.fields["archive"].queryset = get_objects_for_user( 75 user, "archives.use_archive", accept_global_perms=False 76 ) 77 78 def update(self, instance, validated_data): 79 civs = validated_data.pop("values") 80 81 civ_pks_to_remove = set() 82 civ_pks_to_add = set() 83 upload_pks = {} 84 85 for civ in civs: 86 interface = civ.pop("interface", None) 87 upload_session = civ.pop("upload_session", None) 88 value = civ.pop("value", None) 89 image = civ.pop("image", None) 90 user_upload = civ.pop("user_upload", None) 91 92 update_archive_item_update_kwargs( 93 instance=instance, 94 interface=interface, 95 value=value, 96 image=image, 97 user_upload=user_upload, 98 upload_session=upload_session, 99 civ_pks_to_add=civ_pks_to_add, 100 civ_pks_to_remove=civ_pks_to_remove, 101 upload_pks=upload_pks, 102 ) 103 104 on_commit( 105 start_archive_item_update_tasks.signature( 106 kwargs={ 107 "archive_item_pk": instance.pk, 108 "civ_pks_to_add": list(civ_pks_to_add), 109 "civ_pks_to_remove": list(civ_pks_to_remove), 110 "upload_pks": upload_pks, 111 } 112 ).apply_async 113 ) 114 115 return instance 116 [end of app/grandchallenge/archives/serializers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/grandchallenge/archives/serializers.py b/app/grandchallenge/archives/serializers.py --- a/app/grandchallenge/archives/serializers.py +++ b/app/grandchallenge/archives/serializers.py @@ -11,7 +11,7 @@ ) from grandchallenge.components.serializers import ( ComponentInterfaceValuePostSerializer, - ComponentInterfaceValueSerializer, + HyperlinkedComponentInterfaceValueSerializer, ) from grandchallenge.hanging_protocols.serializers import ( HangingProtocolSerializer, @@ -22,11 +22,11 @@ archive = HyperlinkedRelatedField( read_only=True, view_name="api:archive-detail" ) - values = ComponentInterfaceValueSerializer(many=True) + values = HyperlinkedComponentInterfaceValueSerializer(many=True) class Meta: model = ArchiveItem - fields = ("id", "archive", "values") + fields = ("pk", "archive", "values") class ArchiveSerializer(serializers.ModelSerializer): @@ -42,7 +42,7 @@ class Meta: model = Archive fields = ( - "id", + "pk", "name", "title", "algorithms",
{"golden_diff": "diff --git a/app/grandchallenge/archives/serializers.py b/app/grandchallenge/archives/serializers.py\n--- a/app/grandchallenge/archives/serializers.py\n+++ b/app/grandchallenge/archives/serializers.py\n@@ -11,7 +11,7 @@\n )\n from grandchallenge.components.serializers import (\n ComponentInterfaceValuePostSerializer,\n- ComponentInterfaceValueSerializer,\n+ HyperlinkedComponentInterfaceValueSerializer,\n )\n from grandchallenge.hanging_protocols.serializers import (\n HangingProtocolSerializer,\n@@ -22,11 +22,11 @@\n archive = HyperlinkedRelatedField(\n read_only=True, view_name=\"api:archive-detail\"\n )\n- values = ComponentInterfaceValueSerializer(many=True)\n+ values = HyperlinkedComponentInterfaceValueSerializer(many=True)\n \n class Meta:\n model = ArchiveItem\n- fields = (\"id\", \"archive\", \"values\")\n+ fields = (\"pk\", \"archive\", \"values\")\n \n \n class ArchiveSerializer(serializers.ModelSerializer):\n@@ -42,7 +42,7 @@\n class Meta:\n model = Archive\n fields = (\n- \"id\",\n+ \"pk\",\n \"name\",\n \"title\",\n \"algorithms\",\n", "issue": "Archive Serializers use `id` rather than `pk`\nSome of our serializers use `id` rather than `pk`, for consistency we should only use one and that should be `pk`. Check the other serializers and see if this occurs elsewhere.\n", "before_files": [{"content": "from django.db.transaction import on_commit\nfrom guardian.shortcuts import get_objects_for_user\nfrom rest_framework import serializers\nfrom rest_framework.fields import ReadOnlyField, URLField\nfrom rest_framework.relations import HyperlinkedRelatedField\n\nfrom grandchallenge.archives.models import Archive, ArchiveItem\nfrom grandchallenge.archives.tasks import (\n start_archive_item_update_tasks,\n update_archive_item_update_kwargs,\n)\nfrom grandchallenge.components.serializers import (\n ComponentInterfaceValuePostSerializer,\n ComponentInterfaceValueSerializer,\n)\nfrom grandchallenge.hanging_protocols.serializers import (\n HangingProtocolSerializer,\n)\n\n\nclass ArchiveItemSerializer(serializers.ModelSerializer):\n archive = HyperlinkedRelatedField(\n read_only=True, view_name=\"api:archive-detail\"\n )\n values = ComponentInterfaceValueSerializer(many=True)\n\n class Meta:\n model = ArchiveItem\n fields = (\"id\", \"archive\", \"values\")\n\n\nclass ArchiveSerializer(serializers.ModelSerializer):\n algorithms = HyperlinkedRelatedField(\n read_only=True, many=True, view_name=\"api:algorithm-detail\"\n )\n logo = URLField(source=\"logo.x20.url\", read_only=True)\n url = URLField(source=\"get_absolute_url\", read_only=True)\n # Include the read only name for legacy clients\n name = ReadOnlyField()\n hanging_protocol = HangingProtocolSerializer()\n\n class Meta:\n model = Archive\n fields = (\n \"id\",\n \"name\",\n \"title\",\n \"algorithms\",\n \"logo\",\n \"description\",\n \"api_url\",\n \"url\",\n \"hanging_protocol\",\n \"view_content\",\n )\n\n\nclass ArchiveItemPostSerializer(ArchiveItemSerializer):\n archive = HyperlinkedRelatedField(\n queryset=Archive.objects.none(),\n view_name=\"api:archive-detail\",\n write_only=True,\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"values\"] = ComponentInterfaceValuePostSerializer(\n many=True, context=self.context\n )\n\n if \"request\" in self.context:\n user = self.context[\"request\"].user\n\n self.fields[\"archive\"].queryset = get_objects_for_user(\n user, \"archives.use_archive\", accept_global_perms=False\n )\n\n def update(self, instance, validated_data):\n civs = validated_data.pop(\"values\")\n\n civ_pks_to_remove = set()\n civ_pks_to_add = set()\n upload_pks = {}\n\n for civ in civs:\n interface = civ.pop(\"interface\", None)\n upload_session = civ.pop(\"upload_session\", None)\n value = civ.pop(\"value\", None)\n image = civ.pop(\"image\", None)\n user_upload = civ.pop(\"user_upload\", None)\n\n update_archive_item_update_kwargs(\n instance=instance,\n interface=interface,\n value=value,\n image=image,\n user_upload=user_upload,\n upload_session=upload_session,\n civ_pks_to_add=civ_pks_to_add,\n civ_pks_to_remove=civ_pks_to_remove,\n upload_pks=upload_pks,\n )\n\n on_commit(\n start_archive_item_update_tasks.signature(\n kwargs={\n \"archive_item_pk\": instance.pk,\n \"civ_pks_to_add\": list(civ_pks_to_add),\n \"civ_pks_to_remove\": list(civ_pks_to_remove),\n \"upload_pks\": upload_pks,\n }\n ).apply_async\n )\n\n return instance\n", "path": "app/grandchallenge/archives/serializers.py"}]}
1,580
268
gh_patches_debug_32080
rasdani/github-patches
git_diff
ManageIQ__integration_tests-4789
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Freeze.py screws up test running The virtualenv that is left in requirments/ dir seems to interfere with normal operations so I always need to delete it, perhaps we need some ignore somewhere or need to place it elsewhere ``` ../default/lib/python2.7/site-packages/py/_path/common.py:367: in visit for x in Visitor(fil, rec, ignore, bf, sort).gen(self): ../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen for p in self.gen(subdir): ../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen for p in self.gen(subdir): ../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen for p in self.gen(subdir): ../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen for p in self.gen(subdir): ../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen for p in self.gen(subdir): ../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen for p in self.gen(subdir): ../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen for p in self.gen(subdir): ../default/lib/python2.7/site-packages/py/_path/common.py:406: in gen if p.check(dir=1) and (rec is None or rec(p))]) ../default/lib/python2.7/site-packages/_pytest/main.py:682: in _recurse ihook = self.gethookproxy(path) ../default/lib/python2.7/site-packages/_pytest/main.py:587: in gethookproxy my_conftestmodules = pm._getconftestmodules(fspath) ../default/lib/python2.7/site-packages/_pytest/config.py:339: in _getconftestmodules mod = self._importconftest(conftestpath) ../default/lib/python2.7/site-packages/_pytest/config.py:375: in _importconftest self.consider_conftest(mod) ../default/lib/python2.7/site-packages/_pytest/config.py:398: in consider_conftest if self.register(conftestmodule, name=conftestmodule.__file__): ../default/lib/python2.7/site-packages/_pytest/config.py:250: in register ret = super(PytestPluginManager, self).register(plugin, name) ../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:371: in register hook._maybe_apply_history(hookimpl) ../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:768: in _maybe_apply_history res = self._hookexec(self, [method], kwargs) ../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:339: in _hookexec return self._inner_hookexec(hook, methods, kwargs) ../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:334: in <lambda> _MultiCall(methods, kwargs, hook.spec_opts).execute() ../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:614: in execute res = hook_impl.function(*args) requirements/temporary_venv/lib/python2.7/site-packages/tests/contrib/appengine/conftest.py:45: in pytest_configure if config.getoption('gae_sdk') is not None: ../default/lib/python2.7/site-packages/_pytest/config.py:1195: in getoption raise ValueError("no option named %r" % (name,)) E ValueError: no option named 'gae_sdk' ``` </issue> <code> [start of requirements/freeze.py] 1 #!/usr/bin/env python 2 """ 3 outputs the frozen packages 4 """ 5 import sys 6 import os 7 import argparse 8 import subprocess 9 parser = argparse.ArgumentParser(description=__doc__.strip()) 10 parser.add_argument('--venv', default='requirements/temporary_venv') 11 parser.add_argument( 12 "--template", default="requirements/template.txt",) 13 parser.add_argument( 14 "--out", default=sys.stdout, type=argparse.FileType('w'), 15 help='the file where packages should be written to') 16 17 18 def main(args): 19 if not os.path.isdir(args.venv): 20 subprocess.check_call([ 21 sys.executable, '-m', 'virtualenv', args.venv 22 ]) 23 subprocess.check_call([ 24 os.path.join(args.venv, 'bin/pip'), 25 'install', '-U', '-r', args.template]) 26 27 subprocess.check_call([ 28 os.path.join(args.venv, 'bin/pip'), 'freeze' 29 ], stdout=args.out) 30 31 32 if __name__ == '__main__': 33 main(parser.parse_args()) 34 [end of requirements/freeze.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/requirements/freeze.py b/requirements/freeze.py --- a/requirements/freeze.py +++ b/requirements/freeze.py @@ -2,31 +2,52 @@ """ outputs the frozen packages """ +from __future__ import print_function import sys import os import argparse import subprocess +import tempfile +import shutil parser = argparse.ArgumentParser(description=__doc__.strip()) -parser.add_argument('--venv', default='requirements/temporary_venv') +parser.add_argument('--venv', default=None) +parser.add_argument('--keep-venv', action='store_true') parser.add_argument( "--template", default="requirements/template.txt",) parser.add_argument( - "--out", default=sys.stdout, type=argparse.FileType('w'), + "--out", default=None, help='the file where packages should be written to') def main(args): - if not os.path.isdir(args.venv): + if args.venv is None: + args.venv = tempfile.mkdtemp(suffix='-miq-QE-rebuild-venv') + + try: + if not os.path.isdir(os.path.join(args.venv, 'bin')): + subprocess.check_call([ + sys.executable, '-m', 'virtualenv', args.venv + ]) subprocess.check_call([ - sys.executable, '-m', 'virtualenv', args.venv - ]) - subprocess.check_call([ - os.path.join(args.venv, 'bin/pip'), - 'install', '-U', '-r', args.template]) + os.path.join(args.venv, 'bin/pip'), + 'install', '-U', '-r', args.template]) + + if args.out is None: + subprocess.check_call([ + os.path.join(args.venv, 'bin/pip'), 'freeze' + ], stdout=sys.stdout) + else: + with open(args.out) as out: + subprocess.check_call([ + os.path.join(args.venv, 'bin/pip'), 'freeze' + ], stdout=out) - subprocess.check_call([ - os.path.join(args.venv, 'bin/pip'), 'freeze' - ], stdout=args.out) + subprocess.check_call([ + os.path.join(args.venv, 'bin/pip'), 'freeze' + ], stdout=args.out) + finally: + if not args.keep_venv: + shutil.rmtree(args.venv) if __name__ == '__main__':
{"golden_diff": "diff --git a/requirements/freeze.py b/requirements/freeze.py\n--- a/requirements/freeze.py\n+++ b/requirements/freeze.py\n@@ -2,31 +2,52 @@\n \"\"\"\n outputs the frozen packages\n \"\"\"\n+from __future__ import print_function\n import sys\n import os\n import argparse\n import subprocess\n+import tempfile\n+import shutil\n parser = argparse.ArgumentParser(description=__doc__.strip())\n-parser.add_argument('--venv', default='requirements/temporary_venv')\n+parser.add_argument('--venv', default=None)\n+parser.add_argument('--keep-venv', action='store_true')\n parser.add_argument(\n \"--template\", default=\"requirements/template.txt\",)\n parser.add_argument(\n- \"--out\", default=sys.stdout, type=argparse.FileType('w'),\n+ \"--out\", default=None,\n help='the file where packages should be written to')\n \n \n def main(args):\n- if not os.path.isdir(args.venv):\n+ if args.venv is None:\n+ args.venv = tempfile.mkdtemp(suffix='-miq-QE-rebuild-venv')\n+\n+ try:\n+ if not os.path.isdir(os.path.join(args.venv, 'bin')):\n+ subprocess.check_call([\n+ sys.executable, '-m', 'virtualenv', args.venv\n+ ])\n subprocess.check_call([\n- sys.executable, '-m', 'virtualenv', args.venv\n- ])\n- subprocess.check_call([\n- os.path.join(args.venv, 'bin/pip'),\n- 'install', '-U', '-r', args.template])\n+ os.path.join(args.venv, 'bin/pip'),\n+ 'install', '-U', '-r', args.template])\n+\n+ if args.out is None:\n+ subprocess.check_call([\n+ os.path.join(args.venv, 'bin/pip'), 'freeze'\n+ ], stdout=sys.stdout)\n+ else:\n+ with open(args.out) as out:\n+ subprocess.check_call([\n+ os.path.join(args.venv, 'bin/pip'), 'freeze'\n+ ], stdout=out)\n \n- subprocess.check_call([\n- os.path.join(args.venv, 'bin/pip'), 'freeze'\n- ], stdout=args.out)\n+ subprocess.check_call([\n+ os.path.join(args.venv, 'bin/pip'), 'freeze'\n+ ], stdout=args.out)\n+ finally:\n+ if not args.keep_venv:\n+ shutil.rmtree(args.venv)\n \n \n if __name__ == '__main__':\n", "issue": "Freeze.py screws up test running\nThe virtualenv that is left in requirments/ dir seems to interfere with normal operations so I always need to delete it, perhaps we need some ignore somewhere or need to place it elsewhere\r\n\r\n```\r\n../default/lib/python2.7/site-packages/py/_path/common.py:367: in visit\r\n for x in Visitor(fil, rec, ignore, bf, sort).gen(self):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen\r\n for p in self.gen(subdir):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen\r\n for p in self.gen(subdir):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen\r\n for p in self.gen(subdir):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen\r\n for p in self.gen(subdir):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen\r\n for p in self.gen(subdir):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen\r\n for p in self.gen(subdir):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen\r\n for p in self.gen(subdir):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:406: in gen\r\n if p.check(dir=1) and (rec is None or rec(p))])\r\n../default/lib/python2.7/site-packages/_pytest/main.py:682: in _recurse\r\n ihook = self.gethookproxy(path)\r\n../default/lib/python2.7/site-packages/_pytest/main.py:587: in gethookproxy\r\n my_conftestmodules = pm._getconftestmodules(fspath)\r\n../default/lib/python2.7/site-packages/_pytest/config.py:339: in _getconftestmodules\r\n mod = self._importconftest(conftestpath)\r\n../default/lib/python2.7/site-packages/_pytest/config.py:375: in _importconftest\r\n self.consider_conftest(mod)\r\n../default/lib/python2.7/site-packages/_pytest/config.py:398: in consider_conftest\r\n if self.register(conftestmodule, name=conftestmodule.__file__):\r\n../default/lib/python2.7/site-packages/_pytest/config.py:250: in register\r\n ret = super(PytestPluginManager, self).register(plugin, name)\r\n../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:371: in register\r\n hook._maybe_apply_history(hookimpl)\r\n../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:768: in _maybe_apply_history\r\n res = self._hookexec(self, [method], kwargs)\r\n../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:339: in _hookexec\r\n return self._inner_hookexec(hook, methods, kwargs)\r\n../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:334: in <lambda>\r\n _MultiCall(methods, kwargs, hook.spec_opts).execute()\r\n../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:614: in execute\r\n res = hook_impl.function(*args)\r\nrequirements/temporary_venv/lib/python2.7/site-packages/tests/contrib/appengine/conftest.py:45: in pytest_configure\r\n if config.getoption('gae_sdk') is not None:\r\n../default/lib/python2.7/site-packages/_pytest/config.py:1195: in getoption\r\n raise ValueError(\"no option named %r\" % (name,))\r\nE ValueError: no option named 'gae_sdk'\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\noutputs the frozen packages\n\"\"\"\nimport sys\nimport os\nimport argparse\nimport subprocess\nparser = argparse.ArgumentParser(description=__doc__.strip())\nparser.add_argument('--venv', default='requirements/temporary_venv')\nparser.add_argument(\n \"--template\", default=\"requirements/template.txt\",)\nparser.add_argument(\n \"--out\", default=sys.stdout, type=argparse.FileType('w'),\n help='the file where packages should be written to')\n\n\ndef main(args):\n if not os.path.isdir(args.venv):\n subprocess.check_call([\n sys.executable, '-m', 'virtualenv', args.venv\n ])\n subprocess.check_call([\n os.path.join(args.venv, 'bin/pip'),\n 'install', '-U', '-r', args.template])\n\n subprocess.check_call([\n os.path.join(args.venv, 'bin/pip'), 'freeze'\n ], stdout=args.out)\n\n\nif __name__ == '__main__':\n main(parser.parse_args())\n", "path": "requirements/freeze.py"}]}
1,675
558
gh_patches_debug_26060
rasdani/github-patches
git_diff
doccano__doccano-2099
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Broken: Importing and Exporting SequenceLabeling projects with relations How to reproduce the behaviour --------- <!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions --> <!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. --> Your Environment --------- <!-- Include details of your environment.--> * Operating System: Dockeer * Python Version Used: 3.8 * When you install doccano: 11/1/22 * How did you install doccano (Heroku button etc): docker-compose I observed issues with the UI and interacting with relation labels. I am able create a relation label between two span labels in the UI, however the relation array get exported empty when going through the Export Dataset -> JSONL(relation) path. Furthermore, issues occur when trying to import relations as well. The import dataset flow only takes one "Column Label" field. When that is set to label, all of the span label and relation label info are uploaded as metadata. ![image](https://user-images.githubusercontent.com/81196676/199328801-c98d3693-ff8f-45b3-80a3-7d916075ec56.png) If the "Column Label" field is set to "entities" the span labels are imported and only the relation label data is uploaded as metadata. ![image](https://user-images.githubusercontent.com/81196676/199329062-26b1ea53-8b03-464d-932e-15e0b0e83061.png) The first goal would be that the export process, exports in the format displayed when you select the JSONL(relation) option from Export Dataset. ie. ``` { "text": "Google was founded on September 4, 1998, by Larry Page and Sergey Brin.", "entities": [ { "id": 0, "start_offset": 0, "end_offset": 6, "label": "ORG" }, { "id": 1, "start_offset": 22, "end_offset": 39, "label": "DATE" }, { "id": 2, "start_offset": 44, "end_offset": 54, "label": "PERSON" }, { "id": 3, "start_offset": 59, "end_offset": 70, "label": "PERSON" } ], "relations": [ { "id": 0, "from_id": 0, "to_id": 1, "type": "foundedAt" }, { "id": 1, "from_id": 0, "to_id": 2, "type": "foundedBy" }, { "id": 2, "from_id": 0, "to_id": 3, "type": "foundedBy" } ] } ``` The second goal would be the ability to upload span labels and relation labels. Basically, Import Dataset should work with the Export Dataset -> JSONL(relation) results. I'll include a JSONL testing file for imports. [relation_import_sample.zip](https://github.com/doccano/doccano/files/9913661/relation_import_sample.zip) </issue> <code> [start of backend/data_import/pipeline/label.py] 1 import abc 2 import uuid 3 from typing import Any, Optional 4 5 from pydantic import UUID4, BaseModel, ConstrainedStr, NonNegativeInt, root_validator 6 7 from .label_types import LabelTypes 8 from examples.models import Example 9 from label_types.models import CategoryType, LabelType, RelationType, SpanType 10 from labels.models import Category as CategoryModel 11 from labels.models import Label as LabelModel 12 from labels.models import Relation as RelationModel 13 from labels.models import Span as SpanModel 14 from labels.models import TextLabel as TextLabelModel 15 from projects.models import Project 16 17 18 class NonEmptyStr(ConstrainedStr): 19 min_length = 1 20 21 22 class Label(BaseModel, abc.ABC): 23 id: int = -1 24 uuid: UUID4 25 example_uuid: UUID4 26 27 def __init__(self, **data): 28 data["uuid"] = uuid.uuid4() 29 super().__init__(**data) 30 31 @abc.abstractmethod 32 def __lt__(self, other): 33 raise NotImplementedError() 34 35 @classmethod 36 def parse(cls, example_uuid: UUID4, obj: Any): 37 raise NotImplementedError() 38 39 @abc.abstractmethod 40 def create_type(self, project: Project) -> Optional[LabelType]: 41 raise NotImplementedError() 42 43 @abc.abstractmethod 44 def create(self, user, example: Example, types: LabelTypes, **kwargs) -> LabelModel: 45 raise NotImplementedError 46 47 def __hash__(self): 48 return hash(tuple(self.dict())) 49 50 51 class CategoryLabel(Label): 52 label: NonEmptyStr 53 54 def __lt__(self, other): 55 return self.label < other.label 56 57 @classmethod 58 def parse(cls, example_uuid: UUID4, obj: Any): 59 return cls(example_uuid=example_uuid, label=obj) 60 61 def create_type(self, project: Project) -> Optional[LabelType]: 62 return CategoryType(text=self.label, project=project) 63 64 def create(self, user, example: Example, types: LabelTypes, **kwargs): 65 return CategoryModel(uuid=self.uuid, user=user, example=example, label=types[self.label]) 66 67 68 class SpanLabel(Label): 69 label: NonEmptyStr 70 start_offset: NonNegativeInt 71 end_offset: NonNegativeInt 72 73 def __lt__(self, other): 74 return self.start_offset < other.start_offset 75 76 @root_validator 77 def check_start_offset_is_less_than_end_offset(cls, values): 78 start_offset, end_offset = values.get("start_offset"), values.get("end_offset") 79 if start_offset >= end_offset: 80 raise ValueError("start_offset must be less than end_offset.") 81 return values 82 83 @classmethod 84 def parse(cls, example_uuid: UUID4, obj: Any): 85 if isinstance(obj, list) or isinstance(obj, tuple): 86 columns = ["start_offset", "end_offset", "label"] 87 obj = zip(columns, obj) 88 return cls(example_uuid=example_uuid, **dict(obj)) 89 elif isinstance(obj, dict): 90 return cls(example_uuid=example_uuid, **obj) 91 raise ValueError("SpanLabel.parse()") 92 93 def create_type(self, project: Project) -> Optional[LabelType]: 94 return SpanType(text=self.label, project=project) 95 96 def create(self, user, example: Example, types: LabelTypes, **kwargs): 97 return SpanModel( 98 uuid=self.uuid, 99 user=user, 100 example=example, 101 start_offset=self.start_offset, 102 end_offset=self.end_offset, 103 label=types[self.label], 104 ) 105 106 107 class TextLabel(Label): 108 text: NonEmptyStr 109 110 def __lt__(self, other): 111 return self.text < other.text 112 113 @classmethod 114 def parse(cls, example_uuid: UUID4, obj: Any): 115 return cls(example_uuid=example_uuid, text=obj) 116 117 def create_type(self, project: Project) -> Optional[LabelType]: 118 return None 119 120 def create(self, user, example: Example, types: LabelTypes, **kwargs): 121 return TextLabelModel(uuid=self.uuid, user=user, example=example, text=self.text) 122 123 124 class RelationLabel(Label): 125 from_id: int 126 to_id: int 127 type: NonEmptyStr 128 129 def __lt__(self, other): 130 return self.from_id < other.from_id 131 132 @classmethod 133 def parse(cls, example_uuid: UUID4, obj: Any): 134 return cls(example_uuid=example_uuid, **obj) 135 136 def create_type(self, project: Project) -> Optional[LabelType]: 137 return RelationType(text=self.type, project=project) 138 139 def create(self, user, example: Example, types: LabelTypes, **kwargs): 140 return RelationModel( 141 uuid=self.uuid, 142 user=user, 143 example=example, 144 type=types[self.type], 145 from_id=kwargs["id_to_span"][self.from_id], 146 to_id=kwargs["id_to_span"][self.to_id], 147 ) 148 [end of backend/data_import/pipeline/label.py] [start of backend/data_import/pipeline/labels.py] 1 import abc 2 from itertools import groupby 3 from typing import Dict, List 4 5 from .examples import Examples 6 from .label import Label 7 from .label_types import LabelTypes 8 from labels.models import Category as CategoryModel 9 from labels.models import Label as LabelModel 10 from labels.models import Relation as RelationModel 11 from labels.models import Span as SpanModel 12 from labels.models import TextLabel as TextLabelModel 13 from projects.models import Project 14 15 16 class Labels(abc.ABC): 17 label_model = LabelModel 18 19 def __init__(self, labels: List[Label], types: LabelTypes): 20 self.labels = labels 21 self.types = types 22 23 def __len__(self) -> int: 24 return len(self.labels) 25 26 def clean(self, project: Project): 27 pass 28 29 def save_types(self, project: Project): 30 types = [label.create_type(project) for label in self.labels] 31 filtered_types = list(filter(None, types)) 32 self.types.save(filtered_types) 33 self.types.update(project) 34 35 def save(self, user, examples: Examples, **kwargs): 36 labels = [ 37 label.create(user, examples[label.example_uuid], self.types, **kwargs) 38 for label in self.labels 39 if label.example_uuid in examples 40 ] 41 self.label_model.objects.bulk_create(labels) 42 43 44 class Categories(Labels): 45 label_model = CategoryModel 46 47 def clean(self, project: Project): 48 exclusive = getattr(project, "single_class_classification", False) 49 if exclusive: 50 groups = groupby(self.labels, lambda label: label.example_uuid) 51 self.labels = [next(group) for _, group in groups] 52 53 54 class Spans(Labels): 55 label_model = SpanModel 56 57 def clean(self, project: Project): 58 allow_overlapping = getattr(project, "allow_overlapping", False) 59 if allow_overlapping: 60 return 61 spans = [] 62 groups = groupby(self.labels, lambda label: label.example_uuid) 63 for _, group in groups: 64 labels = sorted(group) 65 last_offset = -1 66 for label in labels: 67 if getattr(label, "start_offset") >= last_offset: 68 last_offset = getattr(label, "end_offset") 69 spans.append(label) 70 self.labels = spans 71 72 @property 73 def id_to_span(self) -> Dict[int, SpanModel]: 74 span_uuids = [str(label.uuid) for label in self.labels] 75 spans = SpanModel.objects.filter(uuid__in=span_uuids) 76 uuid_to_span = {span.uuid: span for span in spans} 77 return {span.id: uuid_to_span[span.uuid] for span in self.labels} 78 79 80 class Texts(Labels): 81 label_model = TextLabelModel 82 83 84 class Relations(Labels): 85 label_model = RelationModel 86 87 def save(self, user, examples: Examples, **kwargs): 88 id_to_span = kwargs["spans"].id_to_span 89 super().save(user, examples, id_to_span=id_to_span) 90 [end of backend/data_import/pipeline/labels.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/backend/data_import/pipeline/label.py b/backend/data_import/pipeline/label.py --- a/backend/data_import/pipeline/label.py +++ b/backend/data_import/pipeline/label.py @@ -142,6 +142,6 @@ user=user, example=example, type=types[self.type], - from_id=kwargs["id_to_span"][self.from_id], - to_id=kwargs["id_to_span"][self.to_id], + from_id=kwargs["id_to_span"][(self.from_id, str(self.example_uuid))], + to_id=kwargs["id_to_span"][(self.to_id, str(self.example_uuid))], ) diff --git a/backend/data_import/pipeline/labels.py b/backend/data_import/pipeline/labels.py --- a/backend/data_import/pipeline/labels.py +++ b/backend/data_import/pipeline/labels.py @@ -1,6 +1,6 @@ import abc from itertools import groupby -from typing import Dict, List +from typing import Dict, List, Tuple from .examples import Examples from .label import Label @@ -70,11 +70,11 @@ self.labels = spans @property - def id_to_span(self) -> Dict[int, SpanModel]: - span_uuids = [str(label.uuid) for label in self.labels] - spans = SpanModel.objects.filter(uuid__in=span_uuids) + def id_to_span(self) -> Dict[Tuple[int, str], SpanModel]: + uuids = [str(span.uuid) for span in self.labels] + spans = SpanModel.objects.filter(uuid__in=uuids) uuid_to_span = {span.uuid: span for span in spans} - return {span.id: uuid_to_span[span.uuid] for span in self.labels} + return {(span.id, str(span.example_uuid)): uuid_to_span[span.uuid] for span in self.labels} class Texts(Labels):
{"golden_diff": "diff --git a/backend/data_import/pipeline/label.py b/backend/data_import/pipeline/label.py\n--- a/backend/data_import/pipeline/label.py\n+++ b/backend/data_import/pipeline/label.py\n@@ -142,6 +142,6 @@\n user=user,\n example=example,\n type=types[self.type],\n- from_id=kwargs[\"id_to_span\"][self.from_id],\n- to_id=kwargs[\"id_to_span\"][self.to_id],\n+ from_id=kwargs[\"id_to_span\"][(self.from_id, str(self.example_uuid))],\n+ to_id=kwargs[\"id_to_span\"][(self.to_id, str(self.example_uuid))],\n )\ndiff --git a/backend/data_import/pipeline/labels.py b/backend/data_import/pipeline/labels.py\n--- a/backend/data_import/pipeline/labels.py\n+++ b/backend/data_import/pipeline/labels.py\n@@ -1,6 +1,6 @@\n import abc\n from itertools import groupby\n-from typing import Dict, List\n+from typing import Dict, List, Tuple\n \n from .examples import Examples\n from .label import Label\n@@ -70,11 +70,11 @@\n self.labels = spans\n \n @property\n- def id_to_span(self) -> Dict[int, SpanModel]:\n- span_uuids = [str(label.uuid) for label in self.labels]\n- spans = SpanModel.objects.filter(uuid__in=span_uuids)\n+ def id_to_span(self) -> Dict[Tuple[int, str], SpanModel]:\n+ uuids = [str(span.uuid) for span in self.labels]\n+ spans = SpanModel.objects.filter(uuid__in=uuids)\n uuid_to_span = {span.uuid: span for span in spans}\n- return {span.id: uuid_to_span[span.uuid] for span in self.labels}\n+ return {(span.id, str(span.example_uuid)): uuid_to_span[span.uuid] for span in self.labels}\n \n \n class Texts(Labels):\n", "issue": "Broken: Importing and Exporting SequenceLabeling projects with relations\nHow to reproduce the behaviour\r\n---------\r\n<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions -->\r\n\r\n<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->\r\n\r\nYour Environment\r\n---------\r\n<!-- Include details of your environment.-->\r\n* Operating System: Dockeer\r\n* Python Version Used: 3.8\r\n* When you install doccano: 11/1/22\r\n* How did you install doccano (Heroku button etc): docker-compose\r\n\r\nI observed issues with the UI and interacting with relation labels. I am able create a relation label between two span labels in the UI, however the relation array get exported empty when going through the Export Dataset -> JSONL(relation) path. Furthermore, issues occur when trying to import relations as well. The import dataset flow only takes one \"Column Label\" field. When that is set to label, all of the span label and relation label info are uploaded as metadata. \r\n\r\n![image](https://user-images.githubusercontent.com/81196676/199328801-c98d3693-ff8f-45b3-80a3-7d916075ec56.png)\r\n\r\nIf the \"Column Label\" field is set to \"entities\" the span labels are imported and only the relation label data is uploaded as metadata. \r\n\r\n![image](https://user-images.githubusercontent.com/81196676/199329062-26b1ea53-8b03-464d-932e-15e0b0e83061.png)\r\n\r\nThe first goal would be that the export process, exports in the format displayed when you select the JSONL(relation) option from Export Dataset.\r\n\r\nie.\r\n\r\n```\r\n{\r\n \"text\": \"Google was founded on September 4, 1998, by Larry Page and Sergey Brin.\",\r\n \"entities\": [\r\n {\r\n \"id\": 0,\r\n \"start_offset\": 0,\r\n \"end_offset\": 6,\r\n \"label\": \"ORG\"\r\n },\r\n {\r\n \"id\": 1,\r\n \"start_offset\": 22,\r\n \"end_offset\": 39,\r\n \"label\": \"DATE\"\r\n },\r\n {\r\n \"id\": 2,\r\n \"start_offset\": 44,\r\n \"end_offset\": 54,\r\n \"label\": \"PERSON\"\r\n },\r\n {\r\n \"id\": 3,\r\n \"start_offset\": 59,\r\n \"end_offset\": 70,\r\n \"label\": \"PERSON\"\r\n }\r\n ],\r\n \"relations\": [\r\n {\r\n \"id\": 0,\r\n \"from_id\": 0,\r\n \"to_id\": 1,\r\n \"type\": \"foundedAt\"\r\n },\r\n {\r\n \"id\": 1,\r\n \"from_id\": 0,\r\n \"to_id\": 2,\r\n \"type\": \"foundedBy\"\r\n },\r\n {\r\n \"id\": 2,\r\n \"from_id\": 0,\r\n \"to_id\": 3,\r\n \"type\": \"foundedBy\"\r\n }\r\n ]\r\n}\r\n```\r\n\r\nThe second goal would be the ability to upload span labels and relation labels. Basically, Import Dataset should work with the Export Dataset -> JSONL(relation) results. I'll include a JSONL testing file for imports.\r\n\r\n[relation_import_sample.zip](https://github.com/doccano/doccano/files/9913661/relation_import_sample.zip)\n", "before_files": [{"content": "import abc\nimport uuid\nfrom typing import Any, Optional\n\nfrom pydantic import UUID4, BaseModel, ConstrainedStr, NonNegativeInt, root_validator\n\nfrom .label_types import LabelTypes\nfrom examples.models import Example\nfrom label_types.models import CategoryType, LabelType, RelationType, SpanType\nfrom labels.models import Category as CategoryModel\nfrom labels.models import Label as LabelModel\nfrom labels.models import Relation as RelationModel\nfrom labels.models import Span as SpanModel\nfrom labels.models import TextLabel as TextLabelModel\nfrom projects.models import Project\n\n\nclass NonEmptyStr(ConstrainedStr):\n min_length = 1\n\n\nclass Label(BaseModel, abc.ABC):\n id: int = -1\n uuid: UUID4\n example_uuid: UUID4\n\n def __init__(self, **data):\n data[\"uuid\"] = uuid.uuid4()\n super().__init__(**data)\n\n @abc.abstractmethod\n def __lt__(self, other):\n raise NotImplementedError()\n\n @classmethod\n def parse(cls, example_uuid: UUID4, obj: Any):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def create_type(self, project: Project) -> Optional[LabelType]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def create(self, user, example: Example, types: LabelTypes, **kwargs) -> LabelModel:\n raise NotImplementedError\n\n def __hash__(self):\n return hash(tuple(self.dict()))\n\n\nclass CategoryLabel(Label):\n label: NonEmptyStr\n\n def __lt__(self, other):\n return self.label < other.label\n\n @classmethod\n def parse(cls, example_uuid: UUID4, obj: Any):\n return cls(example_uuid=example_uuid, label=obj)\n\n def create_type(self, project: Project) -> Optional[LabelType]:\n return CategoryType(text=self.label, project=project)\n\n def create(self, user, example: Example, types: LabelTypes, **kwargs):\n return CategoryModel(uuid=self.uuid, user=user, example=example, label=types[self.label])\n\n\nclass SpanLabel(Label):\n label: NonEmptyStr\n start_offset: NonNegativeInt\n end_offset: NonNegativeInt\n\n def __lt__(self, other):\n return self.start_offset < other.start_offset\n\n @root_validator\n def check_start_offset_is_less_than_end_offset(cls, values):\n start_offset, end_offset = values.get(\"start_offset\"), values.get(\"end_offset\")\n if start_offset >= end_offset:\n raise ValueError(\"start_offset must be less than end_offset.\")\n return values\n\n @classmethod\n def parse(cls, example_uuid: UUID4, obj: Any):\n if isinstance(obj, list) or isinstance(obj, tuple):\n columns = [\"start_offset\", \"end_offset\", \"label\"]\n obj = zip(columns, obj)\n return cls(example_uuid=example_uuid, **dict(obj))\n elif isinstance(obj, dict):\n return cls(example_uuid=example_uuid, **obj)\n raise ValueError(\"SpanLabel.parse()\")\n\n def create_type(self, project: Project) -> Optional[LabelType]:\n return SpanType(text=self.label, project=project)\n\n def create(self, user, example: Example, types: LabelTypes, **kwargs):\n return SpanModel(\n uuid=self.uuid,\n user=user,\n example=example,\n start_offset=self.start_offset,\n end_offset=self.end_offset,\n label=types[self.label],\n )\n\n\nclass TextLabel(Label):\n text: NonEmptyStr\n\n def __lt__(self, other):\n return self.text < other.text\n\n @classmethod\n def parse(cls, example_uuid: UUID4, obj: Any):\n return cls(example_uuid=example_uuid, text=obj)\n\n def create_type(self, project: Project) -> Optional[LabelType]:\n return None\n\n def create(self, user, example: Example, types: LabelTypes, **kwargs):\n return TextLabelModel(uuid=self.uuid, user=user, example=example, text=self.text)\n\n\nclass RelationLabel(Label):\n from_id: int\n to_id: int\n type: NonEmptyStr\n\n def __lt__(self, other):\n return self.from_id < other.from_id\n\n @classmethod\n def parse(cls, example_uuid: UUID4, obj: Any):\n return cls(example_uuid=example_uuid, **obj)\n\n def create_type(self, project: Project) -> Optional[LabelType]:\n return RelationType(text=self.type, project=project)\n\n def create(self, user, example: Example, types: LabelTypes, **kwargs):\n return RelationModel(\n uuid=self.uuid,\n user=user,\n example=example,\n type=types[self.type],\n from_id=kwargs[\"id_to_span\"][self.from_id],\n to_id=kwargs[\"id_to_span\"][self.to_id],\n )\n", "path": "backend/data_import/pipeline/label.py"}, {"content": "import abc\nfrom itertools import groupby\nfrom typing import Dict, List\n\nfrom .examples import Examples\nfrom .label import Label\nfrom .label_types import LabelTypes\nfrom labels.models import Category as CategoryModel\nfrom labels.models import Label as LabelModel\nfrom labels.models import Relation as RelationModel\nfrom labels.models import Span as SpanModel\nfrom labels.models import TextLabel as TextLabelModel\nfrom projects.models import Project\n\n\nclass Labels(abc.ABC):\n label_model = LabelModel\n\n def __init__(self, labels: List[Label], types: LabelTypes):\n self.labels = labels\n self.types = types\n\n def __len__(self) -> int:\n return len(self.labels)\n\n def clean(self, project: Project):\n pass\n\n def save_types(self, project: Project):\n types = [label.create_type(project) for label in self.labels]\n filtered_types = list(filter(None, types))\n self.types.save(filtered_types)\n self.types.update(project)\n\n def save(self, user, examples: Examples, **kwargs):\n labels = [\n label.create(user, examples[label.example_uuid], self.types, **kwargs)\n for label in self.labels\n if label.example_uuid in examples\n ]\n self.label_model.objects.bulk_create(labels)\n\n\nclass Categories(Labels):\n label_model = CategoryModel\n\n def clean(self, project: Project):\n exclusive = getattr(project, \"single_class_classification\", False)\n if exclusive:\n groups = groupby(self.labels, lambda label: label.example_uuid)\n self.labels = [next(group) for _, group in groups]\n\n\nclass Spans(Labels):\n label_model = SpanModel\n\n def clean(self, project: Project):\n allow_overlapping = getattr(project, \"allow_overlapping\", False)\n if allow_overlapping:\n return\n spans = []\n groups = groupby(self.labels, lambda label: label.example_uuid)\n for _, group in groups:\n labels = sorted(group)\n last_offset = -1\n for label in labels:\n if getattr(label, \"start_offset\") >= last_offset:\n last_offset = getattr(label, \"end_offset\")\n spans.append(label)\n self.labels = spans\n\n @property\n def id_to_span(self) -> Dict[int, SpanModel]:\n span_uuids = [str(label.uuid) for label in self.labels]\n spans = SpanModel.objects.filter(uuid__in=span_uuids)\n uuid_to_span = {span.uuid: span for span in spans}\n return {span.id: uuid_to_span[span.uuid] for span in self.labels}\n\n\nclass Texts(Labels):\n label_model = TextLabelModel\n\n\nclass Relations(Labels):\n label_model = RelationModel\n\n def save(self, user, examples: Examples, **kwargs):\n id_to_span = kwargs[\"spans\"].id_to_span\n super().save(user, examples, id_to_span=id_to_span)\n", "path": "backend/data_import/pipeline/labels.py"}]}
3,578
431
gh_patches_debug_36217
rasdani/github-patches
git_diff
elastic__apm-agent-python-1090
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Botomongo - S3 spans throwing an "validation error: span: context: destination: service: 'name' required" **Flask Python application doesn't record the SPAN with S3 call**: **To Reproduce** 1. Flask Application doing S3 call 2. In console you will see the exception `Failed to submit message: 'HTTP 400: {"accepted":3,"errors":[{"message":"validation error: span: context: destination: service: \'name\' required",...` **Environment (please complete the following information)** - OS: Linux - Python version: 3.6 - Framework and version: Flask 1.1.2 - APM Server version: v7.12.0 - Agent version: 6.1.0 **From the APM version 7.12 name field is required** Problem is located here: elasticapm/instrumentation/packages/botocore.py `context["destination"]["service"] = {"type": span_type}` for destination.service there is no destination.service.name element IMHO: destination.service.name should be set as in the elasticapm/instrumentation/packages/elasticsearch.py </issue> <code> [start of elasticapm/instrumentation/packages/botocore.py] 1 # BSD 3-Clause License 2 # 3 # Copyright (c) 2019, Elasticsearch BV 4 # All rights reserved. 5 # 6 # Redistribution and use in source and binary forms, with or without 7 # modification, are permitted provided that the following conditions are met: 8 # 9 # * Redistributions of source code must retain the above copyright notice, this 10 # list of conditions and the following disclaimer. 11 # 12 # * Redistributions in binary form must reproduce the above copyright notice, 13 # this list of conditions and the following disclaimer in the documentation 14 # and/or other materials provided with the distribution. 15 # 16 # * Neither the name of the copyright holder nor the names of its 17 # contributors may be used to endorse or promote products derived from 18 # this software without specific prior written permission. 19 # 20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 from collections import namedtuple 32 33 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule 34 from elasticapm.traces import capture_span 35 from elasticapm.utils.compat import urlparse 36 37 HandlerInfo = namedtuple("HandlerInfo", ("signature", "span_type", "span_subtype", "span_action", "context")) 38 39 # Used for boto3 < 1.7 40 endpoint_to_service_id = {"SNS": "SNS", "S3": "S3", "DYNAMODB": "DynamoDB", "SQS": "SQS"} 41 42 43 class BotocoreInstrumentation(AbstractInstrumentedModule): 44 name = "botocore" 45 46 instrument_list = [("botocore.client", "BaseClient._make_api_call")] 47 48 def call(self, module, method, wrapped, instance, args, kwargs): 49 if "operation_name" in kwargs: 50 operation_name = kwargs["operation_name"] 51 else: 52 operation_name = args[0] 53 54 service_model = instance.meta.service_model 55 if hasattr(service_model, "service_id"): # added in boto3 1.7 56 service = service_model.service_id 57 else: 58 service = service_model.service_name.upper() 59 service = endpoint_to_service_id.get(service, service) 60 61 parsed_url = urlparse.urlparse(instance.meta.endpoint_url) 62 context = { 63 "destination": { 64 "address": parsed_url.hostname, 65 "port": parsed_url.port, 66 "cloud": {"region": instance.meta.region_name}, 67 } 68 } 69 70 handler_info = None 71 handler = handlers.get(service, False) 72 if handler: 73 handler_info = handler(operation_name, service, instance, args, kwargs, context) 74 if not handler_info: 75 handler_info = handle_default(operation_name, service, instance, args, kwargs, context) 76 77 with capture_span( 78 handler_info.signature, 79 span_type=handler_info.span_type, 80 leaf=True, 81 span_subtype=handler_info.span_subtype, 82 span_action=handler_info.span_action, 83 extra=handler_info.context, 84 ): 85 return wrapped(*args, **kwargs) 86 87 88 def handle_s3(operation_name, service, instance, args, kwargs, context): 89 span_type = "storage" 90 span_subtype = "s3" 91 span_action = operation_name 92 if len(args) > 1 and "Bucket" in args[1]: 93 bucket = args[1]["Bucket"] 94 else: 95 # TODO handle Access Points 96 bucket = "" 97 signature = f"S3 {operation_name} {bucket}" 98 99 context["destination"]["name"] = span_subtype 100 context["destination"]["resource"] = bucket 101 context["destination"]["service"] = {"type": span_type} 102 103 return HandlerInfo(signature, span_type, span_subtype, span_action, context) 104 105 106 def handle_dynamodb(operation_name, service, instance, args, kwargs, context): 107 span_type = "db" 108 span_subtype = "dynamodb" 109 span_action = "query" 110 if len(args) > 1 and "TableName" in args[1]: 111 table = args[1]["TableName"] 112 else: 113 table = "" 114 signature = f"DynamoDB {operation_name} {table}".rstrip() 115 116 context["db"] = {"type": "dynamodb", "instance": instance.meta.region_name} 117 if operation_name == "Query" and len(args) > 1 and "KeyConditionExpression" in args[1]: 118 context["db"]["statement"] = args[1]["KeyConditionExpression"] 119 120 context["destination"]["name"] = span_subtype 121 context["destination"]["resource"] = table 122 context["destination"]["service"] = {"type": span_type} 123 return HandlerInfo(signature, span_type, span_subtype, span_action, context) 124 125 126 def handle_sns(operation_name, service, instance, args, kwargs, context): 127 if operation_name != "Publish": 128 # only "publish" is handled specifically, other endpoints get the default treatment 129 return False 130 span_type = "messaging" 131 span_subtype = "sns" 132 span_action = "send" 133 topic_name = "" 134 if len(args) > 1: 135 if "Name" in args[1]: 136 topic_name = args[1]["Name"] 137 if "TopicArn" in args[1]: 138 topic_name = args[1]["TopicArn"].rsplit(":", maxsplit=1)[-1] 139 signature = f"SNS {operation_name} {topic_name}".rstrip() 140 context["destination"]["name"] = span_subtype 141 context["destination"]["resource"] = f"{span_subtype}/{topic_name}" if topic_name else span_subtype 142 context["destination"]["type"] = span_type 143 return HandlerInfo(signature, span_type, span_subtype, span_action, context) 144 145 146 def handle_sqs(operation_name, service, instance, args, kwargs, destination): 147 pass 148 149 150 def handle_default(operation_name, service, instance, args, kwargs, destination): 151 span_type = "aws" 152 span_subtype = service.lower() 153 span_action = operation_name 154 155 signature = f"{service}:{operation_name}" 156 return HandlerInfo(signature, span_type, span_subtype, span_action, destination) 157 158 159 handlers = { 160 "S3": handle_s3, 161 "DynamoDB": handle_dynamodb, 162 "SNS": handle_sns, 163 "default": handle_default, 164 } 165 [end of elasticapm/instrumentation/packages/botocore.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/elasticapm/instrumentation/packages/botocore.py b/elasticapm/instrumentation/packages/botocore.py --- a/elasticapm/instrumentation/packages/botocore.py +++ b/elasticapm/instrumentation/packages/botocore.py @@ -96,9 +96,7 @@ bucket = "" signature = f"S3 {operation_name} {bucket}" - context["destination"]["name"] = span_subtype - context["destination"]["resource"] = bucket - context["destination"]["service"] = {"type": span_type} + context["destination"]["service"] = {"name": span_subtype, "resource": bucket, "type": span_type} return HandlerInfo(signature, span_type, span_subtype, span_action, context) @@ -117,9 +115,7 @@ if operation_name == "Query" and len(args) > 1 and "KeyConditionExpression" in args[1]: context["db"]["statement"] = args[1]["KeyConditionExpression"] - context["destination"]["name"] = span_subtype - context["destination"]["resource"] = table - context["destination"]["service"] = {"type": span_type} + context["destination"]["service"] = {"name": span_subtype, "resource": table, "type": span_type} return HandlerInfo(signature, span_type, span_subtype, span_action, context) @@ -137,9 +133,11 @@ if "TopicArn" in args[1]: topic_name = args[1]["TopicArn"].rsplit(":", maxsplit=1)[-1] signature = f"SNS {operation_name} {topic_name}".rstrip() - context["destination"]["name"] = span_subtype - context["destination"]["resource"] = f"{span_subtype}/{topic_name}" if topic_name else span_subtype - context["destination"]["type"] = span_type + context["destination"]["service"] = { + "name": span_subtype, + "resource": f"{span_subtype}/{topic_name}" if topic_name else span_subtype, + "type": span_type, + } return HandlerInfo(signature, span_type, span_subtype, span_action, context) @@ -152,6 +150,8 @@ span_subtype = service.lower() span_action = operation_name + destination["service"] = {"name": span_subtype, "resource": span_subtype, "type": span_type} + signature = f"{service}:{operation_name}" return HandlerInfo(signature, span_type, span_subtype, span_action, destination)
{"golden_diff": "diff --git a/elasticapm/instrumentation/packages/botocore.py b/elasticapm/instrumentation/packages/botocore.py\n--- a/elasticapm/instrumentation/packages/botocore.py\n+++ b/elasticapm/instrumentation/packages/botocore.py\n@@ -96,9 +96,7 @@\n bucket = \"\"\n signature = f\"S3 {operation_name} {bucket}\"\n \n- context[\"destination\"][\"name\"] = span_subtype\n- context[\"destination\"][\"resource\"] = bucket\n- context[\"destination\"][\"service\"] = {\"type\": span_type}\n+ context[\"destination\"][\"service\"] = {\"name\": span_subtype, \"resource\": bucket, \"type\": span_type}\n \n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n \n@@ -117,9 +115,7 @@\n if operation_name == \"Query\" and len(args) > 1 and \"KeyConditionExpression\" in args[1]:\n context[\"db\"][\"statement\"] = args[1][\"KeyConditionExpression\"]\n \n- context[\"destination\"][\"name\"] = span_subtype\n- context[\"destination\"][\"resource\"] = table\n- context[\"destination\"][\"service\"] = {\"type\": span_type}\n+ context[\"destination\"][\"service\"] = {\"name\": span_subtype, \"resource\": table, \"type\": span_type}\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n \n \n@@ -137,9 +133,11 @@\n if \"TopicArn\" in args[1]:\n topic_name = args[1][\"TopicArn\"].rsplit(\":\", maxsplit=1)[-1]\n signature = f\"SNS {operation_name} {topic_name}\".rstrip()\n- context[\"destination\"][\"name\"] = span_subtype\n- context[\"destination\"][\"resource\"] = f\"{span_subtype}/{topic_name}\" if topic_name else span_subtype\n- context[\"destination\"][\"type\"] = span_type\n+ context[\"destination\"][\"service\"] = {\n+ \"name\": span_subtype,\n+ \"resource\": f\"{span_subtype}/{topic_name}\" if topic_name else span_subtype,\n+ \"type\": span_type,\n+ }\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n \n \n@@ -152,6 +150,8 @@\n span_subtype = service.lower()\n span_action = operation_name\n \n+ destination[\"service\"] = {\"name\": span_subtype, \"resource\": span_subtype, \"type\": span_type}\n+\n signature = f\"{service}:{operation_name}\"\n return HandlerInfo(signature, span_type, span_subtype, span_action, destination)\n", "issue": "Botomongo - S3 spans throwing an \"validation error: span: context: destination: service: 'name' required\"\n**Flask Python application doesn't record the SPAN with S3 call**:\r\n\r\n**To Reproduce**\r\n\r\n1. Flask Application doing S3 call\r\n2. In console you will see the exception `Failed to submit message: 'HTTP 400: {\"accepted\":3,\"errors\":[{\"message\":\"validation error: span: context: destination: service: \\'name\\' required\",...`\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux\r\n- Python version: 3.6\r\n- Framework and version: Flask 1.1.2\r\n- APM Server version: v7.12.0\r\n- Agent version: 6.1.0\r\n\r\n\r\n**From the APM version 7.12 name field is required**\r\n\r\nProblem is located here:\r\nelasticapm/instrumentation/packages/botocore.py\r\n`context[\"destination\"][\"service\"] = {\"type\": span_type}`\r\nfor destination.service there is no destination.service.name element\r\n\r\nIMHO: destination.service.name should be set as in the elasticapm/instrumentation/packages/elasticsearch.py\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom collections import namedtuple\n\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import capture_span\nfrom elasticapm.utils.compat import urlparse\n\nHandlerInfo = namedtuple(\"HandlerInfo\", (\"signature\", \"span_type\", \"span_subtype\", \"span_action\", \"context\"))\n\n# Used for boto3 < 1.7\nendpoint_to_service_id = {\"SNS\": \"SNS\", \"S3\": \"S3\", \"DYNAMODB\": \"DynamoDB\", \"SQS\": \"SQS\"}\n\n\nclass BotocoreInstrumentation(AbstractInstrumentedModule):\n name = \"botocore\"\n\n instrument_list = [(\"botocore.client\", \"BaseClient._make_api_call\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if \"operation_name\" in kwargs:\n operation_name = kwargs[\"operation_name\"]\n else:\n operation_name = args[0]\n\n service_model = instance.meta.service_model\n if hasattr(service_model, \"service_id\"): # added in boto3 1.7\n service = service_model.service_id\n else:\n service = service_model.service_name.upper()\n service = endpoint_to_service_id.get(service, service)\n\n parsed_url = urlparse.urlparse(instance.meta.endpoint_url)\n context = {\n \"destination\": {\n \"address\": parsed_url.hostname,\n \"port\": parsed_url.port,\n \"cloud\": {\"region\": instance.meta.region_name},\n }\n }\n\n handler_info = None\n handler = handlers.get(service, False)\n if handler:\n handler_info = handler(operation_name, service, instance, args, kwargs, context)\n if not handler_info:\n handler_info = handle_default(operation_name, service, instance, args, kwargs, context)\n\n with capture_span(\n handler_info.signature,\n span_type=handler_info.span_type,\n leaf=True,\n span_subtype=handler_info.span_subtype,\n span_action=handler_info.span_action,\n extra=handler_info.context,\n ):\n return wrapped(*args, **kwargs)\n\n\ndef handle_s3(operation_name, service, instance, args, kwargs, context):\n span_type = \"storage\"\n span_subtype = \"s3\"\n span_action = operation_name\n if len(args) > 1 and \"Bucket\" in args[1]:\n bucket = args[1][\"Bucket\"]\n else:\n # TODO handle Access Points\n bucket = \"\"\n signature = f\"S3 {operation_name} {bucket}\"\n\n context[\"destination\"][\"name\"] = span_subtype\n context[\"destination\"][\"resource\"] = bucket\n context[\"destination\"][\"service\"] = {\"type\": span_type}\n\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n\n\ndef handle_dynamodb(operation_name, service, instance, args, kwargs, context):\n span_type = \"db\"\n span_subtype = \"dynamodb\"\n span_action = \"query\"\n if len(args) > 1 and \"TableName\" in args[1]:\n table = args[1][\"TableName\"]\n else:\n table = \"\"\n signature = f\"DynamoDB {operation_name} {table}\".rstrip()\n\n context[\"db\"] = {\"type\": \"dynamodb\", \"instance\": instance.meta.region_name}\n if operation_name == \"Query\" and len(args) > 1 and \"KeyConditionExpression\" in args[1]:\n context[\"db\"][\"statement\"] = args[1][\"KeyConditionExpression\"]\n\n context[\"destination\"][\"name\"] = span_subtype\n context[\"destination\"][\"resource\"] = table\n context[\"destination\"][\"service\"] = {\"type\": span_type}\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n\n\ndef handle_sns(operation_name, service, instance, args, kwargs, context):\n if operation_name != \"Publish\":\n # only \"publish\" is handled specifically, other endpoints get the default treatment\n return False\n span_type = \"messaging\"\n span_subtype = \"sns\"\n span_action = \"send\"\n topic_name = \"\"\n if len(args) > 1:\n if \"Name\" in args[1]:\n topic_name = args[1][\"Name\"]\n if \"TopicArn\" in args[1]:\n topic_name = args[1][\"TopicArn\"].rsplit(\":\", maxsplit=1)[-1]\n signature = f\"SNS {operation_name} {topic_name}\".rstrip()\n context[\"destination\"][\"name\"] = span_subtype\n context[\"destination\"][\"resource\"] = f\"{span_subtype}/{topic_name}\" if topic_name else span_subtype\n context[\"destination\"][\"type\"] = span_type\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n\n\ndef handle_sqs(operation_name, service, instance, args, kwargs, destination):\n pass\n\n\ndef handle_default(operation_name, service, instance, args, kwargs, destination):\n span_type = \"aws\"\n span_subtype = service.lower()\n span_action = operation_name\n\n signature = f\"{service}:{operation_name}\"\n return HandlerInfo(signature, span_type, span_subtype, span_action, destination)\n\n\nhandlers = {\n \"S3\": handle_s3,\n \"DynamoDB\": handle_dynamodb,\n \"SNS\": handle_sns,\n \"default\": handle_default,\n}\n", "path": "elasticapm/instrumentation/packages/botocore.py"}]}
2,674
576
gh_patches_debug_2122
rasdani/github-patches
git_diff
docker__docker-py-3099
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unable to use docker to run containers started from Jupyter Notebook Hello, I'm currently following this tutorial: https://github.com/aws/amazon-sagemaker-examples/blob/main/sagemaker-pipelines/tabular/local-mode/sagemaker-pipelines-local-mode.ipynb I'm getting the following error from trying to run it (it uses docker in the background to run contaiiners). It executes everything when the following command is run: ``` python execution = pipeline.start() ``` I get the following error: ```python Creating q0r36pja78-algo-1-ywafn ... Creating q0r36pja78-algo-1-ywafn ... done Attaching to q0r36pja78-algo-1-ywafn Traceback (most recent call last): File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\runpy.py", line 194, in _run_module_as_main return _run_code(code, main_globals, None, File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\runpy.py", line 87, in _run_code exec(code, run_globals) File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\Scripts\docker-compose.exe\__main__.py", line 7, in <module> File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\cli\main.py", line 81, in main command_func() File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\cli\main.py", line 203, in perform_command handler(command, command_options) File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\metrics\decorator.py", line 18, in wrapper result = fn(*args, **kwargs) File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\cli\main.py", line 1216, in up cascade_starter = log_printer.run() File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\cli\log_printer.py", line 88, in run for line in consume_queue(queue, self.cascade_stop): File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\cli\log_printer.py", line 250, in consume_queue raise item.exc File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\cli\log_printer.py", line 162, in tail_container_logs for item in build_log_generator(container, log_args): File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\utils.py", line 50, in split_buffer for data in stream_as_text(stream): File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\utils.py", line 26, in stream_as_text for data in stream: File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\docker\types\daemon.py", line 32, in __next__ return next(self._stream) File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\docker\api\client.py", line 418, in <genexpr> gen = (data for (_, data) in gen) File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\docker\utils\socket.py", line 95, in <genexpr> return ((STDOUT, frame) for frame in frames_iter_tty(socket)) File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\docker\utils\socket.py", line 128, in frames_iter_tty if len(result) == 0: TypeError: object of type 'int' has no len() Pipeline step 'AbaloneProcess' FAILED. Failure message is: RuntimeError: Failed to run: ['docker-compose', '-f', 'C:\\Users\\franc\\AppData\\Local\\Temp\\tmpga4umz96\\docker-compose.yaml', 'up', '--build', '--abort-on-container-exit'] Pipeline execution c0a11456-aec5-48ec-adde-4ee45085efa8 FAILED because step 'AbaloneProcess' failed. ``` Version of the modules: Python 3.8.16 docker 6.0.1 docker-compose 1.29.2 docker desktop 4.16.3 Thanks </issue> <code> [start of docker/utils/socket.py] 1 import errno 2 import os 3 import select 4 import socket as pysocket 5 import struct 6 7 try: 8 from ..transport import NpipeSocket 9 except ImportError: 10 NpipeSocket = type(None) 11 12 13 STDOUT = 1 14 STDERR = 2 15 16 17 class SocketError(Exception): 18 pass 19 20 21 # NpipeSockets have their own error types 22 # pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.') 23 NPIPE_ENDED = 109 24 25 26 def read(socket, n=4096): 27 """ 28 Reads at most n bytes from socket 29 """ 30 31 recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK) 32 33 if not isinstance(socket, NpipeSocket): 34 select.select([socket], [], []) 35 36 try: 37 if hasattr(socket, 'recv'): 38 return socket.recv(n) 39 if isinstance(socket, getattr(pysocket, 'SocketIO')): 40 return socket.read(n) 41 return os.read(socket.fileno(), n) 42 except OSError as e: 43 if e.errno not in recoverable_errors: 44 raise 45 except Exception as e: 46 is_pipe_ended = (isinstance(socket, NpipeSocket) and 47 len(e.args) > 0 and 48 e.args[0] == NPIPE_ENDED) 49 if is_pipe_ended: 50 # npipes don't support duplex sockets, so we interpret 51 # a PIPE_ENDED error as a close operation (0-length read). 52 return 0 53 raise 54 55 56 def read_exactly(socket, n): 57 """ 58 Reads exactly n bytes from socket 59 Raises SocketError if there isn't enough data 60 """ 61 data = bytes() 62 while len(data) < n: 63 next_data = read(socket, n - len(data)) 64 if not next_data: 65 raise SocketError("Unexpected EOF") 66 data += next_data 67 return data 68 69 70 def next_frame_header(socket): 71 """ 72 Returns the stream and size of the next frame of data waiting to be read 73 from socket, according to the protocol defined here: 74 75 https://docs.docker.com/engine/api/v1.24/#attach-to-a-container 76 """ 77 try: 78 data = read_exactly(socket, 8) 79 except SocketError: 80 return (-1, -1) 81 82 stream, actual = struct.unpack('>BxxxL', data) 83 return (stream, actual) 84 85 86 def frames_iter(socket, tty): 87 """ 88 Return a generator of frames read from socket. A frame is a tuple where 89 the first item is the stream number and the second item is a chunk of data. 90 91 If the tty setting is enabled, the streams are multiplexed into the stdout 92 stream. 93 """ 94 if tty: 95 return ((STDOUT, frame) for frame in frames_iter_tty(socket)) 96 else: 97 return frames_iter_no_tty(socket) 98 99 100 def frames_iter_no_tty(socket): 101 """ 102 Returns a generator of data read from the socket when the tty setting is 103 not enabled. 104 """ 105 while True: 106 (stream, n) = next_frame_header(socket) 107 if n < 0: 108 break 109 while n > 0: 110 result = read(socket, n) 111 if result is None: 112 continue 113 data_length = len(result) 114 if data_length == 0: 115 # We have reached EOF 116 return 117 n -= data_length 118 yield (stream, result) 119 120 121 def frames_iter_tty(socket): 122 """ 123 Return a generator of data read from the socket when the tty setting is 124 enabled. 125 """ 126 while True: 127 result = read(socket) 128 if len(result) == 0: 129 # We have reached EOF 130 return 131 yield result 132 133 134 def consume_socket_output(frames, demux=False): 135 """ 136 Iterate through frames read from the socket and return the result. 137 138 Args: 139 140 demux (bool): 141 If False, stdout and stderr are multiplexed, and the result is the 142 concatenation of all the frames. If True, the streams are 143 demultiplexed, and the result is a 2-tuple where each item is the 144 concatenation of frames belonging to the same stream. 145 """ 146 if demux is False: 147 # If the streams are multiplexed, the generator returns strings, that 148 # we just need to concatenate. 149 return bytes().join(frames) 150 151 # If the streams are demultiplexed, the generator yields tuples 152 # (stdout, stderr) 153 out = [None, None] 154 for frame in frames: 155 # It is guaranteed that for each frame, one and only one stream 156 # is not None. 157 assert frame != (None, None) 158 if frame[0] is not None: 159 if out[0] is None: 160 out[0] = frame[0] 161 else: 162 out[0] += frame[0] 163 else: 164 if out[1] is None: 165 out[1] = frame[1] 166 else: 167 out[1] += frame[1] 168 return tuple(out) 169 170 171 def demux_adaptor(stream_id, data): 172 """ 173 Utility to demultiplex stdout and stderr when reading frames from the 174 socket. 175 """ 176 if stream_id == STDOUT: 177 return (data, None) 178 elif stream_id == STDERR: 179 return (None, data) 180 else: 181 raise ValueError(f'{stream_id} is not a valid stream') 182 [end of docker/utils/socket.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docker/utils/socket.py b/docker/utils/socket.py --- a/docker/utils/socket.py +++ b/docker/utils/socket.py @@ -49,7 +49,7 @@ if is_pipe_ended: # npipes don't support duplex sockets, so we interpret # a PIPE_ENDED error as a close operation (0-length read). - return 0 + return '' raise
{"golden_diff": "diff --git a/docker/utils/socket.py b/docker/utils/socket.py\n--- a/docker/utils/socket.py\n+++ b/docker/utils/socket.py\n@@ -49,7 +49,7 @@\n if is_pipe_ended:\n # npipes don't support duplex sockets, so we interpret\n # a PIPE_ENDED error as a close operation (0-length read).\n- return 0\n+ return ''\n raise\n", "issue": "Unable to use docker to run containers started from Jupyter Notebook\nHello,\r\n\r\nI'm currently following this tutorial: https://github.com/aws/amazon-sagemaker-examples/blob/main/sagemaker-pipelines/tabular/local-mode/sagemaker-pipelines-local-mode.ipynb\r\n\r\nI'm getting the following error from trying to run it (it uses docker in the background to run contaiiners). It executes everything when the following command is run: \r\n\r\n``` python\r\nexecution = pipeline.start()\r\n\r\n```\r\n\r\nI get the following error:\r\n\r\n```python\r\nCreating q0r36pja78-algo-1-ywafn ... \r\nCreating q0r36pja78-algo-1-ywafn ... done\r\nAttaching to q0r36pja78-algo-1-ywafn\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\runpy.py\", line 194, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\Scripts\\docker-compose.exe\\__main__.py\", line 7, in <module>\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\site-packages\\compose\\cli\\main.py\", line 81, in main\r\n command_func()\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\site-packages\\compose\\cli\\main.py\", line 203, in perform_command\r\n handler(command, command_options)\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\site-packages\\compose\\metrics\\decorator.py\", line 18, in wrapper\r\n result = fn(*args, **kwargs)\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\site-packages\\compose\\cli\\main.py\", line 1216, in up\r\n cascade_starter = log_printer.run()\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\site-packages\\compose\\cli\\log_printer.py\", line 88, in run\r\n for line in consume_queue(queue, self.cascade_stop):\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\site-packages\\compose\\cli\\log_printer.py\", line 250, in consume_queue\r\n raise item.exc\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\site-packages\\compose\\cli\\log_printer.py\", line 162, in tail_container_logs\r\n for item in build_log_generator(container, log_args):\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\site-packages\\compose\\utils.py\", line 50, in split_buffer\r\n for data in stream_as_text(stream):\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\site-packages\\compose\\utils.py\", line 26, in stream_as_text\r\n for data in stream:\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\site-packages\\docker\\types\\daemon.py\", line 32, in __next__\r\n return next(self._stream)\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\site-packages\\docker\\api\\client.py\", line 418, in <genexpr>\r\n gen = (data for (_, data) in gen)\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\site-packages\\docker\\utils\\socket.py\", line 95, in <genexpr>\r\n return ((STDOUT, frame) for frame in frames_iter_tty(socket))\r\n File \"C:\\Users\\franc\\anaconda3\\envs\\sm-pipelines-modelbuild\\lib\\site-packages\\docker\\utils\\socket.py\", line 128, in frames_iter_tty\r\n if len(result) == 0:\r\nTypeError: object of type 'int' has no len()\r\nPipeline step 'AbaloneProcess' FAILED. Failure message is: RuntimeError: Failed to run: ['docker-compose', '-f', 'C:\\\\Users\\\\franc\\\\AppData\\\\Local\\\\Temp\\\\tmpga4umz96\\\\docker-compose.yaml', 'up', '--build', '--abort-on-container-exit']\r\nPipeline execution c0a11456-aec5-48ec-adde-4ee45085efa8 FAILED because step 'AbaloneProcess' failed.\r\n\r\n```\r\n\r\nVersion of the modules:\r\nPython 3.8.16\r\ndocker 6.0.1\r\ndocker-compose 1.29.2\r\ndocker desktop 4.16.3\r\n\r\nThanks\r\n\n", "before_files": [{"content": "import errno\nimport os\nimport select\nimport socket as pysocket\nimport struct\n\ntry:\n from ..transport import NpipeSocket\nexcept ImportError:\n NpipeSocket = type(None)\n\n\nSTDOUT = 1\nSTDERR = 2\n\n\nclass SocketError(Exception):\n pass\n\n\n# NpipeSockets have their own error types\n# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')\nNPIPE_ENDED = 109\n\n\ndef read(socket, n=4096):\n \"\"\"\n Reads at most n bytes from socket\n \"\"\"\n\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n\n if not isinstance(socket, NpipeSocket):\n select.select([socket], [], [])\n\n try:\n if hasattr(socket, 'recv'):\n return socket.recv(n)\n if isinstance(socket, getattr(pysocket, 'SocketIO')):\n return socket.read(n)\n return os.read(socket.fileno(), n)\n except OSError as e:\n if e.errno not in recoverable_errors:\n raise\n except Exception as e:\n is_pipe_ended = (isinstance(socket, NpipeSocket) and\n len(e.args) > 0 and\n e.args[0] == NPIPE_ENDED)\n if is_pipe_ended:\n # npipes don't support duplex sockets, so we interpret\n # a PIPE_ENDED error as a close operation (0-length read).\n return 0\n raise\n\n\ndef read_exactly(socket, n):\n \"\"\"\n Reads exactly n bytes from socket\n Raises SocketError if there isn't enough data\n \"\"\"\n data = bytes()\n while len(data) < n:\n next_data = read(socket, n - len(data))\n if not next_data:\n raise SocketError(\"Unexpected EOF\")\n data += next_data\n return data\n\n\ndef next_frame_header(socket):\n \"\"\"\n Returns the stream and size of the next frame of data waiting to be read\n from socket, according to the protocol defined here:\n\n https://docs.docker.com/engine/api/v1.24/#attach-to-a-container\n \"\"\"\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n return (-1, -1)\n\n stream, actual = struct.unpack('>BxxxL', data)\n return (stream, actual)\n\n\ndef frames_iter(socket, tty):\n \"\"\"\n Return a generator of frames read from socket. A frame is a tuple where\n the first item is the stream number and the second item is a chunk of data.\n\n If the tty setting is enabled, the streams are multiplexed into the stdout\n stream.\n \"\"\"\n if tty:\n return ((STDOUT, frame) for frame in frames_iter_tty(socket))\n else:\n return frames_iter_no_tty(socket)\n\n\ndef frames_iter_no_tty(socket):\n \"\"\"\n Returns a generator of data read from the socket when the tty setting is\n not enabled.\n \"\"\"\n while True:\n (stream, n) = next_frame_header(socket)\n if n < 0:\n break\n while n > 0:\n result = read(socket, n)\n if result is None:\n continue\n data_length = len(result)\n if data_length == 0:\n # We have reached EOF\n return\n n -= data_length\n yield (stream, result)\n\n\ndef frames_iter_tty(socket):\n \"\"\"\n Return a generator of data read from the socket when the tty setting is\n enabled.\n \"\"\"\n while True:\n result = read(socket)\n if len(result) == 0:\n # We have reached EOF\n return\n yield result\n\n\ndef consume_socket_output(frames, demux=False):\n \"\"\"\n Iterate through frames read from the socket and return the result.\n\n Args:\n\n demux (bool):\n If False, stdout and stderr are multiplexed, and the result is the\n concatenation of all the frames. If True, the streams are\n demultiplexed, and the result is a 2-tuple where each item is the\n concatenation of frames belonging to the same stream.\n \"\"\"\n if demux is False:\n # If the streams are multiplexed, the generator returns strings, that\n # we just need to concatenate.\n return bytes().join(frames)\n\n # If the streams are demultiplexed, the generator yields tuples\n # (stdout, stderr)\n out = [None, None]\n for frame in frames:\n # It is guaranteed that for each frame, one and only one stream\n # is not None.\n assert frame != (None, None)\n if frame[0] is not None:\n if out[0] is None:\n out[0] = frame[0]\n else:\n out[0] += frame[0]\n else:\n if out[1] is None:\n out[1] = frame[1]\n else:\n out[1] += frame[1]\n return tuple(out)\n\n\ndef demux_adaptor(stream_id, data):\n \"\"\"\n Utility to demultiplex stdout and stderr when reading frames from the\n socket.\n \"\"\"\n if stream_id == STDOUT:\n return (data, None)\n elif stream_id == STDERR:\n return (None, data)\n else:\n raise ValueError(f'{stream_id} is not a valid stream')\n", "path": "docker/utils/socket.py"}]}
3,346
90
gh_patches_debug_10458
rasdani/github-patches
git_diff
interlegis__sapl-2070
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Erro ao imprimir etiquetas (campo descrição estourando) Nome da Cidade: | Birigui Estado: | São Paulo Casa Legislativa: | Câmara Municipal Bom dia, Começamos hoje a utilizar a versão 3.1 do SAPL que foi migrado ontem. Mas estamos com um problema na impressão das etiquetas do protocolo administrativo. O problema está na última linha da etiqueta, ela imprimi o Assunto ao invés do Tipo/Número do documento vinculado, gerando uma etiqueta enorme. No aguardo, Evandro. </issue> <code> [start of sapl/relatorios/templates/pdf_etiqueta_protocolo_gerar.py] 1 # parameters=sessao,imagem,data,lst_protocolos,dic_cabecalho,lst_rodape,dic_filtro 2 3 """relatorio_protocolo.py 4 External method para gerar o arquivo rml da etiqueta de protocolo 5 Autor: Luciano De Fazio 6 Empresa: OpenLegis Consultoria 7 versão: 1.0 8 """ 9 import time 10 11 from trml2pdf import parseString 12 13 14 def cabecalho(dic_cabecalho, imagem): 15 """Gera o codigo rml do cabecalho""" 16 tmp_data = '' 17 tmp_data += '\t\t\t\t<image x="2.1cm" y="25.7cm" width="59" height="62" file="' + \ 18 imagem + '"/>\n' 19 tmp_data += '\t\t\t\t<lines>2cm 25.4cm 19cm 25.4cm</lines>\n' 20 tmp_data += '\t\t\t\t<setFont name="Helvetica-Bold" size="15"/>\n' 21 tmp_data += '\t\t\t\t<drawString x="5cm" y="27.2cm">' + \ 22 dic_cabecalho['nom_casa'] + '</drawString>\n' 23 tmp_data += '\t\t\t\t<setFont name="Helvetica" size="12"/>\n' 24 tmp_data += '\t\t\t\t<drawString x="5cm" y="26.6cm">Sistema de Apoio ao Processo Legislativo</drawString>\n' 25 tmp_data += '\t\t\t\t<setFont name="Helvetica-Bold" size="13"/>\n' 26 tmp_data += '\t\t\t\t<drawString x="2.2cm" y="24.6cm">Relatório de Controle do Protocolo</drawString>\n' 27 28 return tmp_data 29 30 31 def rodape(lst_rodape): 32 """Gera o codigo rml do rodape""" 33 34 tmp_data = '' 35 tmp_data += '\t\t\t\t<lines>2cm 3.2cm 19cm 3.2cm</lines>\n' 36 tmp_data += '\t\t\t\t<setFont name="Helvetica" size="8"/>\n' 37 tmp_data += '\t\t\t\t<drawString x="2cm" y="3.3cm">' + \ 38 lst_rodape[2] + '</drawString>\n' 39 tmp_data += '\t\t\t\t<drawString x="17.9cm" y="3.3cm">Página <pageNumber/></drawString>\n' 40 tmp_data += '\t\t\t\t<drawCentredString x="10.5cm" y="2.7cm">' + \ 41 lst_rodape[0] + '</drawCentredString>\n' 42 tmp_data += '\t\t\t\t<drawCentredString x="10.5cm" y="2.3cm">' + \ 43 lst_rodape[1] + '</drawCentredString>\n' 44 45 return tmp_data 46 47 48 def paraStyle(): 49 """Gera o codigo rml que define o estilo dos paragrafos""" 50 51 tmp_data = '' 52 tmp_data += '\t<stylesheet>\n' 53 tmp_data += '\t\t<blockTableStyle id="Standard_Outline">\n' 54 tmp_data += '\t\t\t<blockAlignment value="CENTER"/>\n' 55 tmp_data += '\t\t\t<blockValign value="TOP"/>\n' 56 tmp_data += '\t\t</blockTableStyle>\n' 57 tmp_data += '\t\t<initialize>\n' 58 tmp_data += '\t\t\t<paraStyle name="all" alignment="justify"/>\n' 59 tmp_data += '\t\t</initialize>\n' 60 tmp_data += '\t\t<paraStyle name="P1" fontName="Helvetica-Bold" fontSize="5.0" leading="6" alignment="CENTER"/>\n' 61 tmp_data += '\t\t<paraStyle name="P2" fontName="Helvetica" fontSize="8.0" leading="9" alignment="CENTER"/>\n' 62 tmp_data += '\t</stylesheet>\n' 63 64 return tmp_data 65 66 67 def protocolos(lst_protocolos, dic_cabecalho): 68 """Gera o codigo rml do conteudo da pesquisa de protocolos""" 69 70 tmp_data = '' 71 72 # inicio do bloco que contem os flowables 73 tmp_data += '\t<story>\n' 74 75 for dic in lst_protocolos: 76 # condicao para a quebra de pagina 77 tmp_data += '\t\t<condPageBreak height="8mm"/>\n' 78 79 # protocolos 80 if dic['titulo'] != None: 81 tmp_data += '\t\t<para style="P1">\n' 82 tmp_data += '\t\t\t<font color="white"> </font>\n' 83 tmp_data += '\t\t</para>\n' 84 tmp_data += '\t\t<para style="P2"><b>' + \ 85 dic_cabecalho['nom_casa'] + '</b></para>\n' 86 tmp_data += '\t\t<para style="P2">\n' 87 tmp_data += '\t\t\t<font color="white"> </font>\n' 88 tmp_data += '\t\t</para>\n' 89 tmp_data += '<blockTable style="Standard_Outline"><tr><td>' 90 tmp_data += '<barCode code="Code128" x="0.15cm" barHeight="0.34in" barWidth="0.018in">' + \ 91 dic['titulo'] + '</barCode>\n' 92 tmp_data += '</td></tr></blockTable>' 93 tmp_data += '\t\t<para style="P2"><b>PROTOCOLO GERAL ' + \ 94 dic['titulo'] + '</b></para>\n' 95 if dic['data'] != None: 96 tmp_data += '\t\t<para style="P2"><b>' + \ 97 dic['data'] + '</b></para>\n' 98 tmp_data += '\t\t<para style="P2"><b>' + \ 99 dic['natureza'] 100 if dic['ident_processo']: 101 tmp_data += ' - ' + dic['ident_processo'] + '</b></para>\n' 102 else: 103 tmp_data += '</b></para>\n' 104 105 tmp_data += '\t</story>\n' 106 return tmp_data 107 108 109 def principal(imagem, lst_protocolos, dic_cabecalho, lst_rodape): 110 """Funcao pricipal que gera a estrutura global do arquivo rml""" 111 112 arquivoPdf = str(int(time.time() * 100)) + ".pdf" 113 114 tmp_data = '' 115 tmp_data += '<?xml version="1.0" encoding="utf-8" standalone="no" ?>\n' 116 tmp_data += '<!DOCTYPE document SYSTEM "rml_1_0.dtd">\n' 117 tmp_data += '<document filename="etiquetas.pdf">\n' 118 tmp_data += '\t<template pageSize="(62mm, 29mm)" title="Etiquetas de Protocolo" author="Luciano De Fazio" allowSplitting="20">\n' 119 tmp_data += '\t\t<pageTemplate id="first">\n' 120 tmp_data += '\t\t\t<pageGraphics>\n' 121 tmp_data += '\t\t\t<frame id="first" x1="0.03cm" y1="0.1cm" width="61mm" height="29mm"/>\n' 122 tmp_data += '\t\t\t</pageGraphics>\n' 123 tmp_data += '\t\t</pageTemplate>\n' 124 tmp_data += '\t</template>\n' 125 tmp_data += paraStyle() 126 tmp_data += protocolos(lst_protocolos, dic_cabecalho) 127 tmp_data += '</document>\n' 128 tmp_pdf = parseString(tmp_data) 129 130 return tmp_pdf 131 # if hasattr(context.temp_folder,arquivoPdf): 132 # context.temp_folder.manage_delObjects(ids=arquivoPdf) 133 # context.temp_folder.manage_addFile(arquivoPdf) 134 # arq=context.temp_folder[arquivoPdf] 135 # arq.manage_edit(title='Arquivo PDF temporário.',filedata=tmp_pdf,content_type='application/pdf') 136 137 # return "/temp_folder/"+arquivoPdf 138 139 # return 140 # principal(sessao,imagem,data,lst_protocolos,dic_cabecalho,lst_rodape,dic_filtro) 141 [end of sapl/relatorios/templates/pdf_etiqueta_protocolo_gerar.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sapl/relatorios/templates/pdf_etiqueta_protocolo_gerar.py b/sapl/relatorios/templates/pdf_etiqueta_protocolo_gerar.py --- a/sapl/relatorios/templates/pdf_etiqueta_protocolo_gerar.py +++ b/sapl/relatorios/templates/pdf_etiqueta_protocolo_gerar.py @@ -98,7 +98,11 @@ tmp_data += '\t\t<para style="P2"><b>' + \ dic['natureza'] if dic['ident_processo']: - tmp_data += ' - ' + dic['ident_processo'] + '</b></para>\n' + # Limita o tamanho do texto para não "explodir" as etiquetas + descricao = dic['ident_processo'][:60] + if len(dic['ident_processo']) > 60: + descricao += '...' + tmp_data += ' - ' + descricao + '</b></para>\n' else: tmp_data += '</b></para>\n'
{"golden_diff": "diff --git a/sapl/relatorios/templates/pdf_etiqueta_protocolo_gerar.py b/sapl/relatorios/templates/pdf_etiqueta_protocolo_gerar.py\n--- a/sapl/relatorios/templates/pdf_etiqueta_protocolo_gerar.py\n+++ b/sapl/relatorios/templates/pdf_etiqueta_protocolo_gerar.py\n@@ -98,7 +98,11 @@\n tmp_data += '\\t\\t<para style=\"P2\"><b>' + \\\n dic['natureza']\n if dic['ident_processo']:\n- tmp_data += ' - ' + dic['ident_processo'] + '</b></para>\\n'\n+ # Limita o tamanho do texto para n\u00e3o \"explodir\" as etiquetas\n+ descricao = dic['ident_processo'][:60]\n+ if len(dic['ident_processo']) > 60:\n+ descricao += '...'\n+ tmp_data += ' - ' + descricao + '</b></para>\\n'\n else:\n tmp_data += '</b></para>\\n'\n", "issue": "Erro ao imprimir etiquetas (campo descri\u00e7\u00e3o estourando)\nNome da Cidade: | Birigui\r\nEstado: | S\u00e3o Paulo\r\nCasa Legislativa: | C\u00e2mara Municipal\r\n\r\nBom dia,\r\nCome\u00e7amos hoje a utilizar a vers\u00e3o 3.1 do SAPL que foi migrado ontem. Mas estamos com um problema na impress\u00e3o das etiquetas do protocolo administrativo. O problema est\u00e1 na \u00faltima linha da etiqueta, ela imprimi o Assunto ao inv\u00e9s do Tipo/N\u00famero do documento vinculado, gerando uma etiqueta enorme. \r\nNo aguardo,\r\nEvandro.\n", "before_files": [{"content": "# parameters=sessao,imagem,data,lst_protocolos,dic_cabecalho,lst_rodape,dic_filtro\n\n\"\"\"relatorio_protocolo.py\n External method para gerar o arquivo rml da etiqueta de protocolo\n Autor: Luciano De Fazio\n Empresa: OpenLegis Consultoria\n vers\u00c3\u00a3o: 1.0\n\"\"\"\nimport time\n\nfrom trml2pdf import parseString\n\n\ndef cabecalho(dic_cabecalho, imagem):\n \"\"\"Gera o codigo rml do cabecalho\"\"\"\n tmp_data = ''\n tmp_data += '\\t\\t\\t\\t<image x=\"2.1cm\" y=\"25.7cm\" width=\"59\" height=\"62\" file=\"' + \\\n imagem + '\"/>\\n'\n tmp_data += '\\t\\t\\t\\t<lines>2cm 25.4cm 19cm 25.4cm</lines>\\n'\n tmp_data += '\\t\\t\\t\\t<setFont name=\"Helvetica-Bold\" size=\"15\"/>\\n'\n tmp_data += '\\t\\t\\t\\t<drawString x=\"5cm\" y=\"27.2cm\">' + \\\n dic_cabecalho['nom_casa'] + '</drawString>\\n'\n tmp_data += '\\t\\t\\t\\t<setFont name=\"Helvetica\" size=\"12\"/>\\n'\n tmp_data += '\\t\\t\\t\\t<drawString x=\"5cm\" y=\"26.6cm\">Sistema de Apoio ao Processo Legislativo</drawString>\\n'\n tmp_data += '\\t\\t\\t\\t<setFont name=\"Helvetica-Bold\" size=\"13\"/>\\n'\n tmp_data += '\\t\\t\\t\\t<drawString x=\"2.2cm\" y=\"24.6cm\">Relat\u00c3\u00b3rio de Controle do Protocolo</drawString>\\n'\n\n return tmp_data\n\n\ndef rodape(lst_rodape):\n \"\"\"Gera o codigo rml do rodape\"\"\"\n\n tmp_data = ''\n tmp_data += '\\t\\t\\t\\t<lines>2cm 3.2cm 19cm 3.2cm</lines>\\n'\n tmp_data += '\\t\\t\\t\\t<setFont name=\"Helvetica\" size=\"8\"/>\\n'\n tmp_data += '\\t\\t\\t\\t<drawString x=\"2cm\" y=\"3.3cm\">' + \\\n lst_rodape[2] + '</drawString>\\n'\n tmp_data += '\\t\\t\\t\\t<drawString x=\"17.9cm\" y=\"3.3cm\">P\u00c3\u00a1gina <pageNumber/></drawString>\\n'\n tmp_data += '\\t\\t\\t\\t<drawCentredString x=\"10.5cm\" y=\"2.7cm\">' + \\\n lst_rodape[0] + '</drawCentredString>\\n'\n tmp_data += '\\t\\t\\t\\t<drawCentredString x=\"10.5cm\" y=\"2.3cm\">' + \\\n lst_rodape[1] + '</drawCentredString>\\n'\n\n return tmp_data\n\n\ndef paraStyle():\n \"\"\"Gera o codigo rml que define o estilo dos paragrafos\"\"\"\n\n tmp_data = ''\n tmp_data += '\\t<stylesheet>\\n'\n tmp_data += '\\t\\t<blockTableStyle id=\"Standard_Outline\">\\n'\n tmp_data += '\\t\\t\\t<blockAlignment value=\"CENTER\"/>\\n'\n tmp_data += '\\t\\t\\t<blockValign value=\"TOP\"/>\\n'\n tmp_data += '\\t\\t</blockTableStyle>\\n'\n tmp_data += '\\t\\t<initialize>\\n'\n tmp_data += '\\t\\t\\t<paraStyle name=\"all\" alignment=\"justify\"/>\\n'\n tmp_data += '\\t\\t</initialize>\\n'\n tmp_data += '\\t\\t<paraStyle name=\"P1\" fontName=\"Helvetica-Bold\" fontSize=\"5.0\" leading=\"6\" alignment=\"CENTER\"/>\\n'\n tmp_data += '\\t\\t<paraStyle name=\"P2\" fontName=\"Helvetica\" fontSize=\"8.0\" leading=\"9\" alignment=\"CENTER\"/>\\n'\n tmp_data += '\\t</stylesheet>\\n'\n\n return tmp_data\n\n\ndef protocolos(lst_protocolos, dic_cabecalho):\n \"\"\"Gera o codigo rml do conteudo da pesquisa de protocolos\"\"\"\n\n tmp_data = ''\n\n # inicio do bloco que contem os flowables\n tmp_data += '\\t<story>\\n'\n\n for dic in lst_protocolos:\n # condicao para a quebra de pagina\n tmp_data += '\\t\\t<condPageBreak height=\"8mm\"/>\\n'\n\n # protocolos\n if dic['titulo'] != None:\n tmp_data += '\\t\\t<para style=\"P1\">\\n'\n tmp_data += '\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp_data += '\\t\\t</para>\\n'\n tmp_data += '\\t\\t<para style=\"P2\"><b>' + \\\n dic_cabecalho['nom_casa'] + '</b></para>\\n'\n tmp_data += '\\t\\t<para style=\"P2\">\\n'\n tmp_data += '\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp_data += '\\t\\t</para>\\n'\n tmp_data += '<blockTable style=\"Standard_Outline\"><tr><td>'\n tmp_data += '<barCode code=\"Code128\" x=\"0.15cm\" barHeight=\"0.34in\" barWidth=\"0.018in\">' + \\\n dic['titulo'] + '</barCode>\\n'\n tmp_data += '</td></tr></blockTable>'\n tmp_data += '\\t\\t<para style=\"P2\"><b>PROTOCOLO GERAL ' + \\\n dic['titulo'] + '</b></para>\\n'\n if dic['data'] != None:\n tmp_data += '\\t\\t<para style=\"P2\"><b>' + \\\n dic['data'] + '</b></para>\\n'\n tmp_data += '\\t\\t<para style=\"P2\"><b>' + \\\n dic['natureza']\n if dic['ident_processo']:\n tmp_data += ' - ' + dic['ident_processo'] + '</b></para>\\n'\n else:\n tmp_data += '</b></para>\\n'\n\n tmp_data += '\\t</story>\\n'\n return tmp_data\n\n\ndef principal(imagem, lst_protocolos, dic_cabecalho, lst_rodape):\n \"\"\"Funcao pricipal que gera a estrutura global do arquivo rml\"\"\"\n\n arquivoPdf = str(int(time.time() * 100)) + \".pdf\"\n\n tmp_data = ''\n tmp_data += '<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"no\" ?>\\n'\n tmp_data += '<!DOCTYPE document SYSTEM \"rml_1_0.dtd\">\\n'\n tmp_data += '<document filename=\"etiquetas.pdf\">\\n'\n tmp_data += '\\t<template pageSize=\"(62mm, 29mm)\" title=\"Etiquetas de Protocolo\" author=\"Luciano De Fazio\" allowSplitting=\"20\">\\n'\n tmp_data += '\\t\\t<pageTemplate id=\"first\">\\n'\n tmp_data += '\\t\\t\\t<pageGraphics>\\n'\n tmp_data += '\\t\\t\\t<frame id=\"first\" x1=\"0.03cm\" y1=\"0.1cm\" width=\"61mm\" height=\"29mm\"/>\\n'\n tmp_data += '\\t\\t\\t</pageGraphics>\\n'\n tmp_data += '\\t\\t</pageTemplate>\\n'\n tmp_data += '\\t</template>\\n'\n tmp_data += paraStyle()\n tmp_data += protocolos(lst_protocolos, dic_cabecalho)\n tmp_data += '</document>\\n'\n tmp_pdf = parseString(tmp_data)\n\n return tmp_pdf\n# if hasattr(context.temp_folder,arquivoPdf):\n# context.temp_folder.manage_delObjects(ids=arquivoPdf)\n# context.temp_folder.manage_addFile(arquivoPdf)\n# arq=context.temp_folder[arquivoPdf]\n# arq.manage_edit(title='Arquivo PDF tempor\u00c3\u00a1rio.',filedata=tmp_pdf,content_type='application/pdf')\n\n# return \"/temp_folder/\"+arquivoPdf\n\n# return\n# principal(sessao,imagem,data,lst_protocolos,dic_cabecalho,lst_rodape,dic_filtro)\n", "path": "sapl/relatorios/templates/pdf_etiqueta_protocolo_gerar.py"}]}
2,883
232
gh_patches_debug_61039
rasdani/github-patches
git_diff
google-research__text-to-text-transfer-transformer-327
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Issue Running T5 in colab TPU Hi Team, I was trying to do a pre training of T5 from scratch on colab. I could see if i install t5 using (pip install t5[gcp]), and tried to connect to execute ` tf.tpu.experimental.initialize_tpu_system(tpu)`, getting below error. `InvalidArgumentError: NodeDef expected inputs 'string' do not match 0 inputs specified; Op<name=_Send; signature=tensor:T -> ; attr=T:type; attr=tensor_name:string; attr=send_device:string; attr=send_device_incarnation:int; attr=recv_device:string; attr=client_terminated:bool,default=false; is_stateful=true>; NodeDef: {{node _Send}}` If install/ upgrade tensorflow, it gets resolved, however import of t5 does not work as below. ` import t5` `NotFoundError: /usr/local/lib/python3.6/dist-packages/tensorflow_text/python/metrics/_text_similarity_metric_ops.so: undefined symbol: _ZN10tensorflow14kernel_factory17OpKernelRegistrar12InitInternalEPKNS_9KernelDefEN4absl11string_viewESt10unique_ptrINS0_15OpKernelFactoryESt14default_deleteIS8_EE` Please let me know how if there is a way to resolve this. Thanks. </issue> <code> [start of setup.py] 1 # Copyright 2020 The T5 Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Install T5.""" 16 17 import os 18 import sys 19 import setuptools 20 21 # To enable importing version.py directly, we add its path to sys.path. 22 version_path = os.path.join(os.path.dirname(__file__), 't5') 23 sys.path.append(version_path) 24 from version import __version__ # pylint: disable=g-import-not-at-top 25 26 # Get the long description from the README file. 27 with open('README.md') as fp: 28 _LONG_DESCRIPTION = fp.read() 29 30 setuptools.setup( 31 name='t5', 32 version=__version__, 33 description='Text-to-text transfer transformer', 34 long_description=_LONG_DESCRIPTION, 35 long_description_content_type='text/markdown', 36 author='Google Inc.', 37 author_email='[email protected]', 38 url='http://github.com/google-research/text-to-text-transfer-transformer', 39 license='Apache 2.0', 40 packages=setuptools.find_packages(), 41 package_data={ 42 '': ['*.gin'], 43 }, 44 scripts=[], 45 install_requires=[ 46 'absl-py', 47 'babel', 48 'gin-config', 49 'mesh-tensorflow[transformer]>=0.1.13', 50 'nltk', 51 'numpy', 52 'pandas', 53 'rouge-score', 54 'sacrebleu', 55 'scikit-learn', 56 'scipy', 57 'sentencepiece', 58 'six>=1.14', # TODO(adarob): Remove once rouge-score is updated. 59 'tensorflow-text<2.3', # TODO(adarob): Unpin once #320 is resolved. 60 'tfds-nightly', 61 'torch', 62 'transformers>=2.7.0', 63 ], 64 extras_require={ 65 'gcp': ['gevent', 'google-api-python-client', 'google-compute-engine', 66 'google-cloud-storage', 'oauth2client'], 67 'cache-tasks': ['apache-beam'], 68 'test': ['pytest'], 69 }, 70 entry_points={ 71 'console_scripts': [ 72 't5_mesh_transformer = t5.models.mesh_transformer_main:console_entry_point', 73 't5_cache_tasks = t5.data.cache_tasks_main:console_entry_point' 74 ], 75 }, 76 classifiers=[ 77 'Development Status :: 4 - Beta', 78 'Intended Audience :: Developers', 79 'Intended Audience :: Science/Research', 80 'License :: OSI Approved :: Apache Software License', 81 'Topic :: Scientific/Engineering :: Artificial Intelligence', 82 ], 83 keywords='text nlp machinelearning', 84 ) 85 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -56,7 +56,7 @@ 'scipy', 'sentencepiece', 'six>=1.14', # TODO(adarob): Remove once rouge-score is updated. - 'tensorflow-text<2.3', # TODO(adarob): Unpin once #320 is resolved. + 'tensorflow-text', 'tfds-nightly', 'torch', 'transformers>=2.7.0',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -56,7 +56,7 @@\n 'scipy',\n 'sentencepiece',\n 'six>=1.14', # TODO(adarob): Remove once rouge-score is updated.\n- 'tensorflow-text<2.3', # TODO(adarob): Unpin once #320 is resolved.\n+ 'tensorflow-text',\n 'tfds-nightly',\n 'torch',\n 'transformers>=2.7.0',\n", "issue": "Issue Running T5 in colab TPU\nHi Team,\r\n\r\nI was trying to do a pre training of T5 from scratch on colab. I could see if i install t5 using (pip install t5[gcp]), and tried to connect to execute ` tf.tpu.experimental.initialize_tpu_system(tpu)`, getting below error.\r\n\r\n`InvalidArgumentError: NodeDef expected inputs 'string' do not match 0 inputs specified; Op<name=_Send; signature=tensor:T -> ; attr=T:type; attr=tensor_name:string; attr=send_device:string; attr=send_device_incarnation:int; attr=recv_device:string; attr=client_terminated:bool,default=false; is_stateful=true>; NodeDef: {{node _Send}}`\r\n\r\nIf install/ upgrade tensorflow, it gets resolved, however import of t5 does not work as below.\r\n`\r\nimport t5`\r\n\r\n`NotFoundError: /usr/local/lib/python3.6/dist-packages/tensorflow_text/python/metrics/_text_similarity_metric_ops.so: undefined symbol: _ZN10tensorflow14kernel_factory17OpKernelRegistrar12InitInternalEPKNS_9KernelDefEN4absl11string_viewESt10unique_ptrINS0_15OpKernelFactoryESt14default_deleteIS8_EE`\r\n\r\nPlease let me know how if there is a way to resolve this.\r\nThanks.\r\n\n", "before_files": [{"content": "# Copyright 2020 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Install T5.\"\"\"\n\nimport os\nimport sys\nimport setuptools\n\n# To enable importing version.py directly, we add its path to sys.path.\nversion_path = os.path.join(os.path.dirname(__file__), 't5')\nsys.path.append(version_path)\nfrom version import __version__ # pylint: disable=g-import-not-at-top\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\nsetuptools.setup(\n name='t5',\n version=__version__,\n description='Text-to-text transfer transformer',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n author='Google Inc.',\n author_email='[email protected]',\n url='http://github.com/google-research/text-to-text-transfer-transformer',\n license='Apache 2.0',\n packages=setuptools.find_packages(),\n package_data={\n '': ['*.gin'],\n },\n scripts=[],\n install_requires=[\n 'absl-py',\n 'babel',\n 'gin-config',\n 'mesh-tensorflow[transformer]>=0.1.13',\n 'nltk',\n 'numpy',\n 'pandas',\n 'rouge-score',\n 'sacrebleu',\n 'scikit-learn',\n 'scipy',\n 'sentencepiece',\n 'six>=1.14', # TODO(adarob): Remove once rouge-score is updated.\n 'tensorflow-text<2.3', # TODO(adarob): Unpin once #320 is resolved.\n 'tfds-nightly',\n 'torch',\n 'transformers>=2.7.0',\n ],\n extras_require={\n 'gcp': ['gevent', 'google-api-python-client', 'google-compute-engine',\n 'google-cloud-storage', 'oauth2client'],\n 'cache-tasks': ['apache-beam'],\n 'test': ['pytest'],\n },\n entry_points={\n 'console_scripts': [\n 't5_mesh_transformer = t5.models.mesh_transformer_main:console_entry_point',\n 't5_cache_tasks = t5.data.cache_tasks_main:console_entry_point'\n ],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n keywords='text nlp machinelearning',\n)\n", "path": "setup.py"}]}
1,649
120
gh_patches_debug_12711
rasdani/github-patches
git_diff
conda__conda-6221
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add flag to build environment.yml without build strings https://gitter.im/conda/conda?at=59ef54ebe44c43700a70e9a4 https://twitter.com/drvinceknight/status/922837449092542464?ref_src=twsrc%5Etfw > Due to hashes of packages being introduced in `envinronment.yml` I'm getting all sorts of issues with building envs from file. (Very new problem) </issue> <code> [start of conda_env/env.py] 1 from __future__ import absolute_import, print_function 2 3 import os 4 from collections import OrderedDict 5 from conda.base.context import context 6 from conda.cli import common # TODO: this should never have to import form conda.cli 7 from conda.core.linked_data import linked 8 from copy import copy 9 from itertools import chain 10 11 from . import compat, exceptions, yaml 12 from .pip_util import add_pip_installed 13 14 def load_from_directory(directory): 15 """Load and return an ``Environment`` from a given ``directory``""" 16 files = ['environment.yml', 'environment.yaml'] 17 while True: 18 for f in files: 19 try: 20 return from_file(os.path.join(directory, f)) 21 except exceptions.EnvironmentFileNotFound: 22 pass 23 old_directory = directory 24 directory = os.path.dirname(directory) 25 if directory == old_directory: 26 break 27 raise exceptions.EnvironmentFileNotFound(files[0]) 28 29 30 # TODO This should lean more on conda instead of divining it from the outside 31 # TODO tests!!! 32 def from_environment(name, prefix, no_builds=False, ignore_channels=False): 33 """ 34 Get environment object from prefix 35 Args: 36 name: The name of environment 37 prefix: The path of prefix 38 no_builds: Whether has build requirement 39 ignore_channels: whether ignore_channels 40 41 Returns: Environment object 42 """ 43 installed = linked(prefix, ignore_channels=ignore_channels) 44 conda_pkgs = copy(installed) 45 # json=True hides the output, data is added to installed 46 add_pip_installed(prefix, installed, json=True) 47 48 pip_pkgs = sorted(installed - conda_pkgs) 49 50 if no_builds: 51 dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)] 52 else: 53 dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)] 54 if len(pip_pkgs) > 0: 55 dependencies.append({'pip': ['=='.join(a.rsplit('-', 2)[:2]) for a in pip_pkgs]}) 56 # conda uses ruamel_yaml which returns a ruamel_yaml.comments.CommentedSeq 57 # this doesn't dump correctly using pyyaml 58 channels = list(context.channels) 59 if not ignore_channels: 60 for dist in conda_pkgs: 61 if dist.channel not in channels: 62 channels.insert(0, dist.channel) 63 return Environment(name=name, dependencies=dependencies, channels=channels, prefix=prefix) 64 65 66 def from_yaml(yamlstr, **kwargs): 67 """Load and return a ``Environment`` from a given ``yaml string``""" 68 data = yaml.load(yamlstr) 69 if kwargs is not None: 70 for key, value in kwargs.items(): 71 data[key] = value 72 return Environment(**data) 73 74 75 def from_file(filename): 76 if not os.path.exists(filename): 77 raise exceptions.EnvironmentFileNotFound(filename) 78 with open(filename, 'r') as fp: 79 yamlstr = fp.read() 80 return from_yaml(yamlstr, filename=filename) 81 82 83 # TODO test explicitly 84 class Dependencies(OrderedDict): 85 def __init__(self, raw, *args, **kwargs): 86 super(Dependencies, self).__init__(*args, **kwargs) 87 self.raw = raw 88 self.parse() 89 90 def parse(self): 91 if not self.raw: 92 return 93 94 self.update({'conda': []}) 95 96 for line in self.raw: 97 if isinstance(line, dict): 98 self.update(line) 99 else: 100 self['conda'].append(common.arg2spec(line)) 101 102 # TODO only append when it's not already present 103 def add(self, package_name): 104 self.raw.append(package_name) 105 self.parse() 106 107 108 def unique(seq, key=None): 109 """ Return only unique elements of a sequence 110 >>> tuple(unique((1, 2, 3))) 111 (1, 2, 3) 112 >>> tuple(unique((1, 2, 1, 3))) 113 (1, 2, 3) 114 Uniqueness can be defined by key keyword 115 >>> tuple(unique(['cat', 'mouse', 'dog', 'hen'], key=len)) 116 ('cat', 'mouse') 117 """ 118 seen = set() 119 seen_add = seen.add 120 if key is None: 121 for item in seq: 122 if item not in seen: 123 seen_add(item) 124 yield item 125 else: # calculate key 126 for item in seq: 127 val = key(item) 128 if val not in seen: 129 seen_add(val) 130 yield item 131 132 133 class Environment(object): 134 def __init__(self, name=None, filename=None, channels=None, 135 dependencies=None, prefix=None): 136 self.name = name 137 self.filename = filename 138 self.prefix = prefix 139 self.dependencies = Dependencies(dependencies) 140 141 if channels is None: 142 channels = [] 143 self.channels = channels 144 145 def add_channels(self, channels): 146 self.channels = list(unique(chain.from_iterable((channels, self.channels)))) 147 148 def remove_channels(self): 149 self.channels = [] 150 151 def to_dict(self): 152 d = yaml.dict([('name', self.name)]) 153 if self.channels: 154 d['channels'] = self.channels 155 if self.dependencies: 156 d['dependencies'] = self.dependencies.raw 157 if self.prefix: 158 d['prefix'] = self.prefix 159 return d 160 161 def to_yaml(self, stream=None): 162 d = self.to_dict() 163 out = compat.u(yaml.dump(d, default_flow_style=False)) 164 if stream is None: 165 return out 166 stream.write(compat.b(out, encoding="utf-8")) 167 168 def save(self): 169 with open(self.filename, "wb") as fp: 170 self.to_yaml(stream=fp) 171 [end of conda_env/env.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conda_env/env.py b/conda_env/env.py --- a/conda_env/env.py +++ b/conda_env/env.py @@ -48,9 +48,9 @@ pip_pkgs = sorted(installed - conda_pkgs) if no_builds: - dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)] + dependencies = ['='.join((a.name, a.version)) for a in sorted(conda_pkgs)] else: - dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)] + dependencies = ['='.join((a.name, a.version, a.build)) for a in sorted(conda_pkgs)] if len(pip_pkgs) > 0: dependencies.append({'pip': ['=='.join(a.rsplit('-', 2)[:2]) for a in pip_pkgs]}) # conda uses ruamel_yaml which returns a ruamel_yaml.comments.CommentedSeq
{"golden_diff": "diff --git a/conda_env/env.py b/conda_env/env.py\n--- a/conda_env/env.py\n+++ b/conda_env/env.py\n@@ -48,9 +48,9 @@\n pip_pkgs = sorted(installed - conda_pkgs)\n \n if no_builds:\n- dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)]\n+ dependencies = ['='.join((a.name, a.version)) for a in sorted(conda_pkgs)]\n else:\n- dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)]\n+ dependencies = ['='.join((a.name, a.version, a.build)) for a in sorted(conda_pkgs)]\n if len(pip_pkgs) > 0:\n dependencies.append({'pip': ['=='.join(a.rsplit('-', 2)[:2]) for a in pip_pkgs]})\n # conda uses ruamel_yaml which returns a ruamel_yaml.comments.CommentedSeq\n", "issue": "Add flag to build environment.yml without build strings\nhttps://gitter.im/conda/conda?at=59ef54ebe44c43700a70e9a4\r\nhttps://twitter.com/drvinceknight/status/922837449092542464?ref_src=twsrc%5Etfw\r\n\r\n> Due to hashes of packages being introduced in `envinronment.yml` I'm getting all sorts of issues with building envs from file. (Very new problem)\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\n\nimport os\nfrom collections import OrderedDict\nfrom conda.base.context import context\nfrom conda.cli import common # TODO: this should never have to import form conda.cli\nfrom conda.core.linked_data import linked\nfrom copy import copy\nfrom itertools import chain\n\nfrom . import compat, exceptions, yaml\nfrom .pip_util import add_pip_installed\n\ndef load_from_directory(directory):\n \"\"\"Load and return an ``Environment`` from a given ``directory``\"\"\"\n files = ['environment.yml', 'environment.yaml']\n while True:\n for f in files:\n try:\n return from_file(os.path.join(directory, f))\n except exceptions.EnvironmentFileNotFound:\n pass\n old_directory = directory\n directory = os.path.dirname(directory)\n if directory == old_directory:\n break\n raise exceptions.EnvironmentFileNotFound(files[0])\n\n\n# TODO This should lean more on conda instead of divining it from the outside\n# TODO tests!!!\ndef from_environment(name, prefix, no_builds=False, ignore_channels=False):\n \"\"\"\n Get environment object from prefix\n Args:\n name: The name of environment\n prefix: The path of prefix\n no_builds: Whether has build requirement\n ignore_channels: whether ignore_channels\n\n Returns: Environment object\n \"\"\"\n installed = linked(prefix, ignore_channels=ignore_channels)\n conda_pkgs = copy(installed)\n # json=True hides the output, data is added to installed\n add_pip_installed(prefix, installed, json=True)\n\n pip_pkgs = sorted(installed - conda_pkgs)\n\n if no_builds:\n dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)]\n else:\n dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)]\n if len(pip_pkgs) > 0:\n dependencies.append({'pip': ['=='.join(a.rsplit('-', 2)[:2]) for a in pip_pkgs]})\n # conda uses ruamel_yaml which returns a ruamel_yaml.comments.CommentedSeq\n # this doesn't dump correctly using pyyaml\n channels = list(context.channels)\n if not ignore_channels:\n for dist in conda_pkgs:\n if dist.channel not in channels:\n channels.insert(0, dist.channel)\n return Environment(name=name, dependencies=dependencies, channels=channels, prefix=prefix)\n\n\ndef from_yaml(yamlstr, **kwargs):\n \"\"\"Load and return a ``Environment`` from a given ``yaml string``\"\"\"\n data = yaml.load(yamlstr)\n if kwargs is not None:\n for key, value in kwargs.items():\n data[key] = value\n return Environment(**data)\n\n\ndef from_file(filename):\n if not os.path.exists(filename):\n raise exceptions.EnvironmentFileNotFound(filename)\n with open(filename, 'r') as fp:\n yamlstr = fp.read()\n return from_yaml(yamlstr, filename=filename)\n\n\n# TODO test explicitly\nclass Dependencies(OrderedDict):\n def __init__(self, raw, *args, **kwargs):\n super(Dependencies, self).__init__(*args, **kwargs)\n self.raw = raw\n self.parse()\n\n def parse(self):\n if not self.raw:\n return\n\n self.update({'conda': []})\n\n for line in self.raw:\n if isinstance(line, dict):\n self.update(line)\n else:\n self['conda'].append(common.arg2spec(line))\n\n # TODO only append when it's not already present\n def add(self, package_name):\n self.raw.append(package_name)\n self.parse()\n\n\ndef unique(seq, key=None):\n \"\"\" Return only unique elements of a sequence\n >>> tuple(unique((1, 2, 3)))\n (1, 2, 3)\n >>> tuple(unique((1, 2, 1, 3)))\n (1, 2, 3)\n Uniqueness can be defined by key keyword\n >>> tuple(unique(['cat', 'mouse', 'dog', 'hen'], key=len))\n ('cat', 'mouse')\n \"\"\"\n seen = set()\n seen_add = seen.add\n if key is None:\n for item in seq:\n if item not in seen:\n seen_add(item)\n yield item\n else: # calculate key\n for item in seq:\n val = key(item)\n if val not in seen:\n seen_add(val)\n yield item\n\n\nclass Environment(object):\n def __init__(self, name=None, filename=None, channels=None,\n dependencies=None, prefix=None):\n self.name = name\n self.filename = filename\n self.prefix = prefix\n self.dependencies = Dependencies(dependencies)\n\n if channels is None:\n channels = []\n self.channels = channels\n\n def add_channels(self, channels):\n self.channels = list(unique(chain.from_iterable((channels, self.channels))))\n\n def remove_channels(self):\n self.channels = []\n\n def to_dict(self):\n d = yaml.dict([('name', self.name)])\n if self.channels:\n d['channels'] = self.channels\n if self.dependencies:\n d['dependencies'] = self.dependencies.raw\n if self.prefix:\n d['prefix'] = self.prefix\n return d\n\n def to_yaml(self, stream=None):\n d = self.to_dict()\n out = compat.u(yaml.dump(d, default_flow_style=False))\n if stream is None:\n return out\n stream.write(compat.b(out, encoding=\"utf-8\"))\n\n def save(self):\n with open(self.filename, \"wb\") as fp:\n self.to_yaml(stream=fp)\n", "path": "conda_env/env.py"}]}
2,295
222
gh_patches_debug_4221
rasdani/github-patches
git_diff
pystiche__pystiche-534
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Setup CI workflow to build and upload galleries Although we have logic to download pre-built galleries in the documentation https://github.com/pystiche/pystiche/blob/65f4d787e44b1ffbf7e5b6e48298ed8c7460e5a9/docs/source/conf.py#L160-L166 the builder isn't running for quite some time, because I have no longer have access to the infrastructure. Preferably, we should have a CI workflow that does this. The problem is, that without a GPU our gallery takes forever to build. Since CI machines that have a GPU are not free (at least I couldn't find any), we probably need to spend some money to achieve this. So far I came up with two possible solutions: 1. Build our own custom build server with a GPU and run a self-hosted GitHub Actions workflow on it. This is problematic for a couple of reasons: 1. GitHub actually warns not to do that in public repositories due to security concerns. 2. Buying the machine and especially the GPU is quite expensive and as long no one sponsors this, I'm currently not willing to do this out of my own pocket. 3. We need to maintain the server which will take some time from doing something else on `pystiche`. 2. Spin up a cloud instance with GPU to do the building for us. This solves all the issues of 1. with the disadvantage of being harder to setup. But this also vanishes if we use something like [`cirun.io`](https://cirun.io) (cc @aktech). It looks like other than specifying the type of cloud instance we want to use we can simply use the default GitHub Actions workflow syntax, which is amazing. The only roadblock I'm currently seeing is that we probably don't need a run every day. If you look at the past commit history, there are quite a few days were no commit is merged so we would be wasting money by running the build every day. My current idea is to run a default CPU workflow that checks if the current commit was already built and only if that is not the case spins up the could instance. From a quick research into this, we probably can use https://github.com/peter-evans/repository-dispatch is a dispatcher. </issue> <code> [start of docs/source/conf.py] 1 import contextlib 2 import os 3 import re 4 import shutil 5 import warnings 6 from datetime import datetime 7 from distutils.util import strtobool 8 from importlib_metadata import metadata as extract_metadata 9 from os import path 10 from unittest import mock 11 from urllib.parse import urljoin 12 13 from sphinx_gallery.sorting import ExampleTitleSortKey, ExplicitOrder 14 from tqdm import tqdm 15 16 import torch 17 18 from pystiche.misc import download_file 19 20 HERE = path.dirname(__file__) 21 PROJECT_ROOT = path.abspath(path.join(HERE, "..", "..")) 22 23 24 def get_bool_env_var(name, default=False): 25 try: 26 return bool(strtobool(os.environ[name])) 27 except KeyError: 28 return default 29 30 31 GITHUB_ACTIONS = get_bool_env_var("GITHUB_ACTIONS") 32 RTD = get_bool_env_var("READTHEDOCS") 33 CI = GITHUB_ACTIONS or RTD or get_bool_env_var("CI") 34 35 36 def project(): 37 extension = None 38 39 metadata = extract_metadata("pystiche") 40 project = metadata["name"] 41 author = metadata["author"] 42 copyright = f"{datetime.now().year}, {author}" 43 release = metadata["version"] 44 version = release.split(".dev")[0] 45 config = dict( 46 project=project, 47 author=author, 48 copyright=copyright, 49 release=release, 50 version=version, 51 ) 52 53 return extension, config 54 55 56 def autodoc(): 57 extensions = [ 58 "sphinx.ext.autodoc", 59 "sphinx.ext.napoleon", 60 "sphinx_autodoc_typehints", 61 ] 62 63 config = None 64 65 return extensions, config 66 67 68 def intersphinx(): 69 extension = "sphinx.ext.intersphinx" 70 config = dict( 71 intersphinx_mapping={ 72 "python": ("https://docs.python.org/3.6", None), 73 "torch": ("https://pytorch.org/docs/stable/", None), 74 "torchvision": ("https://pytorch.org/docs/stable/", None), 75 "PIL": ("https://pillow.readthedocs.io/en/stable/", None), 76 "numpy": ("https://numpy.org/doc/1.18/", None), 77 "requests": ("https://requests.readthedocs.io/en/stable/", None), 78 "matplotlib": ("https://matplotlib.org", None), 79 } 80 ) 81 return extension, config 82 83 84 def html(): 85 extension = None 86 87 config = dict(html_theme="sphinx_rtd_theme") 88 89 return extension, config 90 91 92 def latex(): 93 extension = None 94 95 with open(path.join(HERE, "custom_cmds.tex"), "r") as fh: 96 custom_cmds = fh.read() 97 config = dict( 98 latex_elements={"preamble": custom_cmds}, 99 mathjax_inline=[r"\(" + custom_cmds, r"\)"], 100 mathjax_display=[r"\[" + custom_cmds, r"\]"], 101 ) 102 103 return extension, config 104 105 106 def bibtex(): 107 extension = "sphinxcontrib.bibtex" 108 109 config = dict(bibtex_bibfiles=["references.bib"]) 110 111 return extension, config 112 113 114 def doctest(): 115 extension = "sphinx.ext.doctest" 116 117 doctest_global_setup = """ 118 import torch 119 from torch import nn 120 121 import pystiche 122 123 import warnings 124 warnings.filterwarnings("ignore", category=FutureWarning) 125 126 from unittest import mock 127 128 patcher = mock.patch( 129 "pystiche.enc.models.utils.ModelMultiLayerEncoder.load_state_dict_from_url" 130 ) 131 patcher.start() 132 """ 133 134 doctest_global_cleanup = """ 135 mock.patch.stopall() 136 """ 137 config = dict( 138 doctest_global_setup=doctest_global_setup, 139 doctest_global_cleanup=doctest_global_cleanup, 140 ) 141 142 return extension, config 143 144 145 def sphinx_gallery(): 146 extension = "sphinx_gallery.gen_gallery" 147 148 plot_gallery = get_bool_env_var("PYSTICHE_PLOT_GALLERY", default=not CI) 149 download_gallery = get_bool_env_var("PYSTICHE_DOWNLOAD_GALLERY", default=CI) 150 151 def download(): 152 nonlocal extension 153 nonlocal plot_gallery 154 155 # version and release are available as soon as the project config is loaded 156 version = globals()["version"] 157 release = globals()["release"] 158 159 base = "https://download.pystiche.org/galleries/" 160 is_dev = version != release 161 file = "master.zip" if is_dev else f"v{version}.zip" 162 163 url = urljoin(base, file) 164 print(f"Downloading pre-built galleries from {url}") 165 download_file(url, file) 166 167 with contextlib.suppress(FileNotFoundError): 168 shutil.rmtree(path.join(HERE, "galleries")) 169 shutil.unpack_archive(file, extract_dir=".") 170 os.remove(file) 171 172 extension = "sphinx_gallery.load_style" 173 plot_gallery = False 174 175 def show_cuda_memory(func): 176 torch.cuda.reset_peak_memory_stats() 177 out = func() 178 179 stats = torch.cuda.memory_stats() 180 peak_bytes_usage = stats["allocated_bytes.all.peak"] 181 memory = peak_bytes_usage / 1024 ** 2 182 183 return memory, out 184 185 def patch_tqdm(): 186 patchers = [mock.patch("tqdm.std._supports_unicode", return_value=True)] 187 188 display = tqdm.display 189 close = tqdm.close 190 displayed = set() 191 192 def display_only_last(self, msg=None, pos=None): 193 if self.n != self.total or self in displayed: 194 return 195 196 display(self, msg=msg, pos=pos) 197 displayed.add(self) 198 199 patchers.append(mock.patch("tqdm.std.tqdm.display", new=display_only_last)) 200 201 def close_(self): 202 close(self) 203 with contextlib.suppress(KeyError): 204 displayed.remove(self) 205 206 patchers.append(mock.patch("tqdm.std.tqdm.close", new=close_)) 207 208 for patcher in patchers: 209 patcher.start() 210 211 class PysticheExampleTitleSortKey(ExampleTitleSortKey): 212 def __call__(self, filename): 213 # The beginner example *without* pystiche is placed before the example 214 # *with* to clarify the narrative. 215 if filename == "example_nst_without_pystiche.py": 216 return "1" 217 elif filename == "example_nst_with_pystiche.py": 218 return "2" 219 else: 220 return super().__call__(filename) 221 222 def filter_warnings(): 223 # See #https://github.com/pytorch/pytorch/issues/60053 224 warnings.filterwarnings( 225 "ignore", 226 category=UserWarning, 227 message=( 228 re.escape( 229 "Named tensors and all their associated APIs are an experimental " 230 "feature and subject to change. Please do not use them for " 231 "anything important until they are released as stable. (Triggered " 232 "internally at /pytorch/c10/core/TensorImpl.h:1156.)" 233 ) 234 ), 235 ) 236 237 if download_gallery: 238 download() 239 240 if plot_gallery and not torch.cuda.is_available(): 241 msg = ( 242 "The galleries will be built, but CUDA is not available. " 243 "This will take a long time." 244 ) 245 print(msg) 246 247 sphinx_gallery_conf = { 248 "examples_dirs": path.join(PROJECT_ROOT, "examples"), 249 "gallery_dirs": path.join("galleries", "examples"), 250 "filename_pattern": re.escape(os.sep) + r"example_\w+[.]py$", 251 "ignore_pattern": re.escape(os.sep) + r"_\w+[.]py$", 252 "line_numbers": True, 253 "remove_config_comments": True, 254 "plot_gallery": plot_gallery, 255 "subsection_order": ExplicitOrder( 256 [ 257 path.join("..", "..", "examples", sub_gallery) 258 for sub_gallery in ("beginner", "advanced") 259 ] 260 ), 261 "within_subsection_order": PysticheExampleTitleSortKey, 262 "show_memory": show_cuda_memory if torch.cuda.is_available() else True, 263 } 264 265 config = dict(sphinx_gallery_conf=sphinx_gallery_conf) 266 filter_warnings() 267 268 patch_tqdm() 269 filter_warnings() 270 271 return extension, config 272 273 274 def logo(): 275 extension = None 276 277 config = dict(html_logo="../../logo.svg") 278 279 return extension, config 280 281 282 extensions = [] 283 for loader in ( 284 project, 285 autodoc, 286 intersphinx, 287 html, 288 latex, 289 bibtex, 290 doctest, 291 sphinx_gallery, 292 logo, 293 ): 294 extension, config = loader() 295 296 if extension: 297 if isinstance(extension, str): 298 extension = (extension,) 299 extensions.extend(extension) 300 301 if config: 302 globals().update(config) 303 [end of docs/source/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -158,7 +158,7 @@ base = "https://download.pystiche.org/galleries/" is_dev = version != release - file = "master.zip" if is_dev else f"v{version}.zip" + file = "main.zip" if is_dev else f"v{version}.zip" url = urljoin(base, file) print(f"Downloading pre-built galleries from {url}")
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -158,7 +158,7 @@\n \n base = \"https://download.pystiche.org/galleries/\"\n is_dev = version != release\n- file = \"master.zip\" if is_dev else f\"v{version}.zip\"\n+ file = \"main.zip\" if is_dev else f\"v{version}.zip\"\n \n url = urljoin(base, file)\n print(f\"Downloading pre-built galleries from {url}\")\n", "issue": "Setup CI workflow to build and upload galleries\nAlthough we have logic to download pre-built galleries in the documentation\r\n\r\nhttps://github.com/pystiche/pystiche/blob/65f4d787e44b1ffbf7e5b6e48298ed8c7460e5a9/docs/source/conf.py#L160-L166\r\n\r\nthe builder isn't running for quite some time, because I have no longer have access to the infrastructure.\r\n\r\nPreferably, we should have a CI workflow that does this. The problem is, that without a GPU our gallery takes forever to build. Since CI machines that have a GPU are not free (at least I couldn't find any), we probably need to spend some money to achieve this. \r\n\r\nSo far I came up with two possible solutions:\r\n\r\n1. Build our own custom build server with a GPU and run a self-hosted GitHub Actions workflow on it. This is problematic for a couple of reasons:\r\n 1. GitHub actually warns not to do that in public repositories due to security concerns.\r\n 2. Buying the machine and especially the GPU is quite expensive and as long no one sponsors this, I'm currently not willing to do this out of my own pocket.\r\n 3. We need to maintain the server which will take some time from doing something else on `pystiche`.\r\n2. Spin up a cloud instance with GPU to do the building for us. This solves all the issues of 1. with the disadvantage of being harder to setup. But this also vanishes if we use something like [`cirun.io`](https://cirun.io) (cc @aktech). It looks like other than specifying the type of cloud instance we want to use we can simply use the default GitHub Actions workflow syntax, which is amazing.\r\n\r\nThe only roadblock I'm currently seeing is that we probably don't need a run every day. If you look at the past commit history, there are quite a few days were no commit is merged so we would be wasting money by running the build every day. My current idea is to run a default CPU workflow that checks if the current commit was already built and only if that is not the case spins up the could instance. From a quick research into this, we probably can use https://github.com/peter-evans/repository-dispatch is a dispatcher.\r\n\n", "before_files": [{"content": "import contextlib\nimport os\nimport re\nimport shutil\nimport warnings\nfrom datetime import datetime\nfrom distutils.util import strtobool\nfrom importlib_metadata import metadata as extract_metadata\nfrom os import path\nfrom unittest import mock\nfrom urllib.parse import urljoin\n\nfrom sphinx_gallery.sorting import ExampleTitleSortKey, ExplicitOrder\nfrom tqdm import tqdm\n\nimport torch\n\nfrom pystiche.misc import download_file\n\nHERE = path.dirname(__file__)\nPROJECT_ROOT = path.abspath(path.join(HERE, \"..\", \"..\"))\n\n\ndef get_bool_env_var(name, default=False):\n try:\n return bool(strtobool(os.environ[name]))\n except KeyError:\n return default\n\n\nGITHUB_ACTIONS = get_bool_env_var(\"GITHUB_ACTIONS\")\nRTD = get_bool_env_var(\"READTHEDOCS\")\nCI = GITHUB_ACTIONS or RTD or get_bool_env_var(\"CI\")\n\n\ndef project():\n extension = None\n\n metadata = extract_metadata(\"pystiche\")\n project = metadata[\"name\"]\n author = metadata[\"author\"]\n copyright = f\"{datetime.now().year}, {author}\"\n release = metadata[\"version\"]\n version = release.split(\".dev\")[0]\n config = dict(\n project=project,\n author=author,\n copyright=copyright,\n release=release,\n version=version,\n )\n\n return extension, config\n\n\ndef autodoc():\n extensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.napoleon\",\n \"sphinx_autodoc_typehints\",\n ]\n\n config = None\n\n return extensions, config\n\n\ndef intersphinx():\n extension = \"sphinx.ext.intersphinx\"\n config = dict(\n intersphinx_mapping={\n \"python\": (\"https://docs.python.org/3.6\", None),\n \"torch\": (\"https://pytorch.org/docs/stable/\", None),\n \"torchvision\": (\"https://pytorch.org/docs/stable/\", None),\n \"PIL\": (\"https://pillow.readthedocs.io/en/stable/\", None),\n \"numpy\": (\"https://numpy.org/doc/1.18/\", None),\n \"requests\": (\"https://requests.readthedocs.io/en/stable/\", None),\n \"matplotlib\": (\"https://matplotlib.org\", None),\n }\n )\n return extension, config\n\n\ndef html():\n extension = None\n\n config = dict(html_theme=\"sphinx_rtd_theme\")\n\n return extension, config\n\n\ndef latex():\n extension = None\n\n with open(path.join(HERE, \"custom_cmds.tex\"), \"r\") as fh:\n custom_cmds = fh.read()\n config = dict(\n latex_elements={\"preamble\": custom_cmds},\n mathjax_inline=[r\"\\(\" + custom_cmds, r\"\\)\"],\n mathjax_display=[r\"\\[\" + custom_cmds, r\"\\]\"],\n )\n\n return extension, config\n\n\ndef bibtex():\n extension = \"sphinxcontrib.bibtex\"\n\n config = dict(bibtex_bibfiles=[\"references.bib\"])\n\n return extension, config\n\n\ndef doctest():\n extension = \"sphinx.ext.doctest\"\n\n doctest_global_setup = \"\"\"\nimport torch\nfrom torch import nn\n\nimport pystiche\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\n\nfrom unittest import mock\n\npatcher = mock.patch(\n \"pystiche.enc.models.utils.ModelMultiLayerEncoder.load_state_dict_from_url\"\n)\npatcher.start()\n\"\"\"\n\n doctest_global_cleanup = \"\"\"\nmock.patch.stopall()\n\"\"\"\n config = dict(\n doctest_global_setup=doctest_global_setup,\n doctest_global_cleanup=doctest_global_cleanup,\n )\n\n return extension, config\n\n\ndef sphinx_gallery():\n extension = \"sphinx_gallery.gen_gallery\"\n\n plot_gallery = get_bool_env_var(\"PYSTICHE_PLOT_GALLERY\", default=not CI)\n download_gallery = get_bool_env_var(\"PYSTICHE_DOWNLOAD_GALLERY\", default=CI)\n\n def download():\n nonlocal extension\n nonlocal plot_gallery\n\n # version and release are available as soon as the project config is loaded\n version = globals()[\"version\"]\n release = globals()[\"release\"]\n\n base = \"https://download.pystiche.org/galleries/\"\n is_dev = version != release\n file = \"master.zip\" if is_dev else f\"v{version}.zip\"\n\n url = urljoin(base, file)\n print(f\"Downloading pre-built galleries from {url}\")\n download_file(url, file)\n\n with contextlib.suppress(FileNotFoundError):\n shutil.rmtree(path.join(HERE, \"galleries\"))\n shutil.unpack_archive(file, extract_dir=\".\")\n os.remove(file)\n\n extension = \"sphinx_gallery.load_style\"\n plot_gallery = False\n\n def show_cuda_memory(func):\n torch.cuda.reset_peak_memory_stats()\n out = func()\n\n stats = torch.cuda.memory_stats()\n peak_bytes_usage = stats[\"allocated_bytes.all.peak\"]\n memory = peak_bytes_usage / 1024 ** 2\n\n return memory, out\n\n def patch_tqdm():\n patchers = [mock.patch(\"tqdm.std._supports_unicode\", return_value=True)]\n\n display = tqdm.display\n close = tqdm.close\n displayed = set()\n\n def display_only_last(self, msg=None, pos=None):\n if self.n != self.total or self in displayed:\n return\n\n display(self, msg=msg, pos=pos)\n displayed.add(self)\n\n patchers.append(mock.patch(\"tqdm.std.tqdm.display\", new=display_only_last))\n\n def close_(self):\n close(self)\n with contextlib.suppress(KeyError):\n displayed.remove(self)\n\n patchers.append(mock.patch(\"tqdm.std.tqdm.close\", new=close_))\n\n for patcher in patchers:\n patcher.start()\n\n class PysticheExampleTitleSortKey(ExampleTitleSortKey):\n def __call__(self, filename):\n # The beginner example *without* pystiche is placed before the example\n # *with* to clarify the narrative.\n if filename == \"example_nst_without_pystiche.py\":\n return \"1\"\n elif filename == \"example_nst_with_pystiche.py\":\n return \"2\"\n else:\n return super().__call__(filename)\n\n def filter_warnings():\n # See #https://github.com/pytorch/pytorch/issues/60053\n warnings.filterwarnings(\n \"ignore\",\n category=UserWarning,\n message=(\n re.escape(\n \"Named tensors and all their associated APIs are an experimental \"\n \"feature and subject to change. Please do not use them for \"\n \"anything important until they are released as stable. (Triggered \"\n \"internally at /pytorch/c10/core/TensorImpl.h:1156.)\"\n )\n ),\n )\n\n if download_gallery:\n download()\n\n if plot_gallery and not torch.cuda.is_available():\n msg = (\n \"The galleries will be built, but CUDA is not available. \"\n \"This will take a long time.\"\n )\n print(msg)\n\n sphinx_gallery_conf = {\n \"examples_dirs\": path.join(PROJECT_ROOT, \"examples\"),\n \"gallery_dirs\": path.join(\"galleries\", \"examples\"),\n \"filename_pattern\": re.escape(os.sep) + r\"example_\\w+[.]py$\",\n \"ignore_pattern\": re.escape(os.sep) + r\"_\\w+[.]py$\",\n \"line_numbers\": True,\n \"remove_config_comments\": True,\n \"plot_gallery\": plot_gallery,\n \"subsection_order\": ExplicitOrder(\n [\n path.join(\"..\", \"..\", \"examples\", sub_gallery)\n for sub_gallery in (\"beginner\", \"advanced\")\n ]\n ),\n \"within_subsection_order\": PysticheExampleTitleSortKey,\n \"show_memory\": show_cuda_memory if torch.cuda.is_available() else True,\n }\n\n config = dict(sphinx_gallery_conf=sphinx_gallery_conf)\n filter_warnings()\n\n patch_tqdm()\n filter_warnings()\n\n return extension, config\n\n\ndef logo():\n extension = None\n\n config = dict(html_logo=\"../../logo.svg\")\n\n return extension, config\n\n\nextensions = []\nfor loader in (\n project,\n autodoc,\n intersphinx,\n html,\n latex,\n bibtex,\n doctest,\n sphinx_gallery,\n logo,\n):\n extension, config = loader()\n\n if extension:\n if isinstance(extension, str):\n extension = (extension,)\n extensions.extend(extension)\n\n if config:\n globals().update(config)\n", "path": "docs/source/conf.py"}]}
3,713
127
gh_patches_debug_8296
rasdani/github-patches
git_diff
plone__Products.CMFPlone-1383
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Author feedback form broken Author feedback seems to have been missed in the refactoring of settings into the registry. It stills expects to find portal.email_from_name </issue> <code> [start of Products/CMFPlone/browser/author.py] 1 from AccessControl import Unauthorized 2 3 from Products.CMFCore.interfaces import IPropertiesTool 4 from Products.CMFPlone import PloneMessageFactory as _ 5 from Products.CMFPlone.interfaces import ISecuritySchema 6 from Products.CMFPlone.interfaces.controlpanel import IMailSchema 7 from Products.CMFPlone.utils import getToolByName 8 from Products.CMFPlone.utils import pretty_title_or_id 9 from Products.Five.browser import BrowserView 10 from Products.MailHost.interfaces import IMailHost 11 from Products.statusmessages.interfaces import IStatusMessage 12 13 from ZODB.POSException import ConflictError 14 15 from interfaces import IAuthorFeedbackForm 16 17 from plone.registry.interfaces import IRegistry 18 19 from urllib import quote_plus 20 21 from z3c.form import button 22 from z3c.form import field 23 from z3c.form import form 24 from z3c.form.interfaces import HIDDEN_MODE 25 26 from zope.component import getMultiAdapter 27 from zope.component import getUtility 28 from zope.interface import implementer 29 from zope.publisher.interfaces import IPublishTraverse 30 31 import logging 32 33 logger = logging.getLogger("Plone") 34 35 36 class AuthorFeedbackForm(form.Form): 37 38 fields = field.Fields(IAuthorFeedbackForm) 39 ignoreContext = True 40 41 @button.buttonAndHandler(_(u'label_send', default='Send'), 42 name='send') 43 def handle_send(self, action): 44 self.portal_state = getMultiAdapter( 45 (self.context, self.request), 46 name=u'plone_portal_state' 47 ) 48 49 self.portal = self.portal_state.portal() 50 self.membership_tool = getToolByName( 51 self.context, 'portal_membership' 52 ) 53 54 self.feedback_template = self.context.restrictedTraverse( 55 '@@author-feedback-template' 56 ) 57 58 data, errors = self.extractData() 59 if errors: 60 IStatusMessage(self.request).addStatusMessage( 61 self.formErrorsMessage, 62 type=u'error' 63 ) 64 65 return 66 67 referer = data.get('referer', 'unknown referer') 68 subject = data.get('subject', '') 69 message = data.get('message', '') 70 # Author is None means portal administrator 71 author = data.get('author', None) 72 73 sender = self.portal_state.member() 74 registry = getUtility(IRegistry) 75 mail_settings = registry.forInterface(IMailSchema, prefix='plone') 76 envelope_from = mail_settings.email_from_address 77 78 if author is None: 79 send_to_address = mail_settings.email_from_address 80 else: 81 author_member = self.membership_tool.getMemberById(author) 82 send_to_address = author_member.getProperty('email') 83 84 send_from_address = sender.getProperty('email') 85 86 if send_from_address == '': 87 IStatusMessage(self.request).addStatusMessage( 88 _(u'Could not find a valid email address'), 89 type=u'error' 90 ) 91 return 92 93 sender_id = "%s (%s), %s" % ( 94 sender.getProperty('fullname'), 95 sender.getId(), 96 send_from_address 97 ) 98 99 mail_host = getUtility(IMailHost) 100 registry = getUtility(IRegistry) 101 email_charset = registry.get('plone.email_charset', 'utf-8') 102 103 try: 104 message = self.feedback_template( 105 self, send_from_address=send_from_address, 106 sender_id=sender_id, url=referer, subject=subject, 107 message=message, encoding=email_charset 108 ) 109 110 message = message.encode(email_charset) 111 112 mail_host.send( 113 message, send_to_address, envelope_from, 114 subject=subject, charset=email_charset 115 ) 116 except ConflictError: 117 raise 118 except Exception as e: 119 logger.info("Unable to send mail: " + str(e)) 120 121 IStatusMessage(self.request).addStatusMessage( 122 _(u'Unable to send mail.'), 123 type=u'error' 124 ) 125 126 return 127 128 IStatusMessage(self.request).addStatusMessage( 129 _(u'Mail sent.'), 130 type=u'info' 131 ) 132 self.request.response.redirect('%s/author/%s' % ( 133 self.portal.absolute_url(), 134 author or '')) 135 return 136 137 138 @implementer(IPublishTraverse) 139 class AuthorView(BrowserView): 140 141 def __init__(self, context, request): 142 super(AuthorView, self).__init__(context, request) 143 144 self.username = None 145 146 def publishTraverse(self, request, name): 147 request['TraversalRequestNameStack'] = [] 148 149 self.username = name 150 return self 151 152 @property 153 def is_anonymous(self): 154 return self.portal_state.anonymous() 155 156 @property 157 def is_owner(self): 158 current_member = self.portal_state.member() 159 return current_member.getId() == self.username 160 161 @property 162 def author(self): 163 username = self.username 164 165 if not username: 166 return {} 167 168 authorinfo = self.membership_tool.getMemberInfo(username) 169 portrait = self.membership_tool.getPersonalPortrait(username) 170 171 if not authorinfo or not portrait: 172 return {} 173 174 return { 175 'info': authorinfo, 176 'portrait': portrait 177 } 178 179 @property 180 def member_info(self): 181 current_member = self.portal_state.member() 182 if not current_member or not current_member.getId(): 183 return {'url': None, 'email': None} 184 185 return { 186 'url': quote_plus(current_member.getId()), 187 'email': current_member.getProperty('email') 188 } 189 190 @property 191 def author_content(self): 192 results = [] 193 194 plone_view = self.context.restrictedTraverse( 195 '@@plone' 196 ) 197 198 brains = self.portal_catalog.searchResults( 199 Creator=self.username, 200 sort_on='created', 201 sort_order='reverse' 202 ) 203 204 for brain in brains[:10]: 205 results.append({ 206 'title': pretty_title_or_id( 207 self, brain 208 ), 209 'date': plone_view.toLocalizedTime( 210 brain.Date 211 ), 212 'url': brain.getURL() 213 }) 214 215 return results 216 217 def home_folder(self, username): 218 return self.membership_tool.getHomeFolder(id=username) 219 220 def __call__(self): 221 222 self.portal_properties = getUtility( 223 IPropertiesTool 224 ) 225 226 self.portal_catalog = getToolByName( 227 self.context, 'portal_catalog' 228 ) 229 230 # XXX: getUtility call does not work. 231 self.membership_tool = getToolByName( 232 self.context, 'portal_membership' 233 ) 234 235 self.portal_state = getMultiAdapter( 236 (self.context, self.request), 237 name=u'plone_portal_state' 238 ) 239 240 self.feedback_form = AuthorFeedbackForm( 241 self.context, self.request 242 ) 243 self.feedback_form.update() 244 self.feedback_form.widgets["author"].mode = HIDDEN_MODE 245 self.feedback_form.widgets["referer"].mode = HIDDEN_MODE 246 self.feedback_form.widgets["author"].value = self.username 247 self.feedback_form.widgets["referer"].value = self.request.get( 248 'referer', 249 self.request.get('HTTP_REFERER', 'unknown url') 250 ) 251 252 registry = getUtility(IRegistry) 253 security_settings = registry.forInterface( 254 ISecuritySchema, prefix='plone') 255 allow_anonymous_view_about = security_settings.allow_anon_views_about 256 257 mail_settings = registry.forInterface(IMailSchema, prefix='plone') 258 self.email_from_address = mail_settings.email_from_address 259 260 if self.is_anonymous and not allow_anonymous_view_about: 261 raise Unauthorized() 262 263 return self.index() 264 [end of Products/CMFPlone/browser/author.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Products/CMFPlone/browser/author.py b/Products/CMFPlone/browser/author.py --- a/Products/CMFPlone/browser/author.py +++ b/Products/CMFPlone/browser/author.py @@ -104,7 +104,8 @@ message = self.feedback_template( self, send_from_address=send_from_address, sender_id=sender_id, url=referer, subject=subject, - message=message, encoding=email_charset + message=message, encoding=email_charset, + email_from_name=mail_settings.email_from_name ) message = message.encode(email_charset)
{"golden_diff": "diff --git a/Products/CMFPlone/browser/author.py b/Products/CMFPlone/browser/author.py\n--- a/Products/CMFPlone/browser/author.py\n+++ b/Products/CMFPlone/browser/author.py\n@@ -104,7 +104,8 @@\n message = self.feedback_template(\n self, send_from_address=send_from_address,\n sender_id=sender_id, url=referer, subject=subject,\n- message=message, encoding=email_charset\n+ message=message, encoding=email_charset,\n+ email_from_name=mail_settings.email_from_name\n )\n \n message = message.encode(email_charset)\n", "issue": "Author feedback form broken\nAuthor feedback seems to have been missed in the refactoring of settings into the registry. It stills expects to find portal.email_from_name\n\n", "before_files": [{"content": "from AccessControl import Unauthorized\n\nfrom Products.CMFCore.interfaces import IPropertiesTool\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces import ISecuritySchema\nfrom Products.CMFPlone.interfaces.controlpanel import IMailSchema\nfrom Products.CMFPlone.utils import getToolByName\nfrom Products.CMFPlone.utils import pretty_title_or_id\nfrom Products.Five.browser import BrowserView\nfrom Products.MailHost.interfaces import IMailHost\nfrom Products.statusmessages.interfaces import IStatusMessage\n\nfrom ZODB.POSException import ConflictError\n\nfrom interfaces import IAuthorFeedbackForm\n\nfrom plone.registry.interfaces import IRegistry\n\nfrom urllib import quote_plus\n\nfrom z3c.form import button\nfrom z3c.form import field\nfrom z3c.form import form\nfrom z3c.form.interfaces import HIDDEN_MODE\n\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtility\nfrom zope.interface import implementer\nfrom zope.publisher.interfaces import IPublishTraverse\n\nimport logging\n\nlogger = logging.getLogger(\"Plone\")\n\n\nclass AuthorFeedbackForm(form.Form):\n\n fields = field.Fields(IAuthorFeedbackForm)\n ignoreContext = True\n\n @button.buttonAndHandler(_(u'label_send', default='Send'),\n name='send')\n def handle_send(self, action):\n self.portal_state = getMultiAdapter(\n (self.context, self.request),\n name=u'plone_portal_state'\n )\n\n self.portal = self.portal_state.portal()\n self.membership_tool = getToolByName(\n self.context, 'portal_membership'\n )\n\n self.feedback_template = self.context.restrictedTraverse(\n '@@author-feedback-template'\n )\n\n data, errors = self.extractData()\n if errors:\n IStatusMessage(self.request).addStatusMessage(\n self.formErrorsMessage,\n type=u'error'\n )\n\n return\n\n referer = data.get('referer', 'unknown referer')\n subject = data.get('subject', '')\n message = data.get('message', '')\n # Author is None means portal administrator\n author = data.get('author', None)\n\n sender = self.portal_state.member()\n registry = getUtility(IRegistry)\n mail_settings = registry.forInterface(IMailSchema, prefix='plone')\n envelope_from = mail_settings.email_from_address\n\n if author is None:\n send_to_address = mail_settings.email_from_address\n else:\n author_member = self.membership_tool.getMemberById(author)\n send_to_address = author_member.getProperty('email')\n\n send_from_address = sender.getProperty('email')\n\n if send_from_address == '':\n IStatusMessage(self.request).addStatusMessage(\n _(u'Could not find a valid email address'),\n type=u'error'\n )\n return\n\n sender_id = \"%s (%s), %s\" % (\n sender.getProperty('fullname'),\n sender.getId(),\n send_from_address\n )\n\n mail_host = getUtility(IMailHost)\n registry = getUtility(IRegistry)\n email_charset = registry.get('plone.email_charset', 'utf-8')\n\n try:\n message = self.feedback_template(\n self, send_from_address=send_from_address,\n sender_id=sender_id, url=referer, subject=subject,\n message=message, encoding=email_charset\n )\n\n message = message.encode(email_charset)\n\n mail_host.send(\n message, send_to_address, envelope_from,\n subject=subject, charset=email_charset\n )\n except ConflictError:\n raise\n except Exception as e:\n logger.info(\"Unable to send mail: \" + str(e))\n\n IStatusMessage(self.request).addStatusMessage(\n _(u'Unable to send mail.'),\n type=u'error'\n )\n\n return\n\n IStatusMessage(self.request).addStatusMessage(\n _(u'Mail sent.'),\n type=u'info'\n )\n self.request.response.redirect('%s/author/%s' % (\n self.portal.absolute_url(),\n author or ''))\n return\n\n\n@implementer(IPublishTraverse)\nclass AuthorView(BrowserView):\n\n def __init__(self, context, request):\n super(AuthorView, self).__init__(context, request)\n\n self.username = None\n\n def publishTraverse(self, request, name):\n request['TraversalRequestNameStack'] = []\n\n self.username = name\n return self\n\n @property\n def is_anonymous(self):\n return self.portal_state.anonymous()\n\n @property\n def is_owner(self):\n current_member = self.portal_state.member()\n return current_member.getId() == self.username\n\n @property\n def author(self):\n username = self.username\n\n if not username:\n return {}\n\n authorinfo = self.membership_tool.getMemberInfo(username)\n portrait = self.membership_tool.getPersonalPortrait(username)\n\n if not authorinfo or not portrait:\n return {}\n\n return {\n 'info': authorinfo,\n 'portrait': portrait\n }\n\n @property\n def member_info(self):\n current_member = self.portal_state.member()\n if not current_member or not current_member.getId():\n return {'url': None, 'email': None}\n\n return {\n 'url': quote_plus(current_member.getId()),\n 'email': current_member.getProperty('email')\n }\n\n @property\n def author_content(self):\n results = []\n\n plone_view = self.context.restrictedTraverse(\n '@@plone'\n )\n\n brains = self.portal_catalog.searchResults(\n Creator=self.username,\n sort_on='created',\n sort_order='reverse'\n )\n\n for brain in brains[:10]:\n results.append({\n 'title': pretty_title_or_id(\n self, brain\n ),\n 'date': plone_view.toLocalizedTime(\n brain.Date\n ),\n 'url': brain.getURL()\n })\n\n return results\n\n def home_folder(self, username):\n return self.membership_tool.getHomeFolder(id=username)\n\n def __call__(self):\n\n self.portal_properties = getUtility(\n IPropertiesTool\n )\n\n self.portal_catalog = getToolByName(\n self.context, 'portal_catalog'\n )\n\n # XXX: getUtility call does not work.\n self.membership_tool = getToolByName(\n self.context, 'portal_membership'\n )\n\n self.portal_state = getMultiAdapter(\n (self.context, self.request),\n name=u'plone_portal_state'\n )\n\n self.feedback_form = AuthorFeedbackForm(\n self.context, self.request\n )\n self.feedback_form.update()\n self.feedback_form.widgets[\"author\"].mode = HIDDEN_MODE\n self.feedback_form.widgets[\"referer\"].mode = HIDDEN_MODE\n self.feedback_form.widgets[\"author\"].value = self.username\n self.feedback_form.widgets[\"referer\"].value = self.request.get(\n 'referer',\n self.request.get('HTTP_REFERER', 'unknown url')\n )\n\n registry = getUtility(IRegistry)\n security_settings = registry.forInterface(\n ISecuritySchema, prefix='plone')\n allow_anonymous_view_about = security_settings.allow_anon_views_about\n\n mail_settings = registry.forInterface(IMailSchema, prefix='plone')\n self.email_from_address = mail_settings.email_from_address\n\n if self.is_anonymous and not allow_anonymous_view_about:\n raise Unauthorized()\n\n return self.index()\n", "path": "Products/CMFPlone/browser/author.py"}]}
2,867
144
gh_patches_debug_6019
rasdani/github-patches
git_diff
cupy__cupy-3335
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `around` result is different compared to numpy ``` >>> np.__version__ '1.16.4' >>> np.around([2.5]) array([2.]) >>> cupy.__version__ '7.0.0a1' >>> cupy.around([2.5]) array([3.]) ``` NumPy seems to round to even. </issue> <code> [start of cupy/math/rounding.py] 1 from cupy import core 2 from cupy.core import fusion 3 from cupy.math import ufunc 4 5 6 def around(a, decimals=0, out=None): 7 """Rounds to the given number of decimals. 8 9 Args: 10 a (cupy.ndarray): The source array. 11 decimals (int): umber of decimal places to round to (default: 0). 12 If decimals is negative, it specifies the number of positions to 13 the left of the decimal point. 14 out (cupy.ndarray): Output array. 15 16 Returns: 17 cupy.ndarray: Rounded array. 18 19 .. seealso:: :func:`numpy.around` 20 21 """ 22 if fusion._is_fusing(): 23 return fusion._call_ufunc(core.core._round_ufunc, a, decimals, out=out) 24 a = core.array(a, copy=False) 25 return a.round(decimals, out=out) 26 27 28 def round_(a, decimals=0, out=None): 29 return around(a, decimals, out=out) 30 31 32 rint = ufunc.create_math_ufunc( 33 'rint', 1, 'cupy_rint', 34 '''Rounds each element of an array to the nearest integer. 35 36 .. seealso:: :data:`numpy.rint` 37 38 ''') 39 40 41 floor = ufunc.create_math_ufunc( 42 'floor', 1, 'cupy_floor', 43 '''Rounds each element of an array to its floor integer. 44 45 .. seealso:: :data:`numpy.floor` 46 47 ''', support_complex=False) 48 49 50 ceil = ufunc.create_math_ufunc( 51 'ceil', 1, 'cupy_ceil', 52 '''Rounds each element of an array to its ceiling integer. 53 54 .. seealso:: :data:`numpy.ceil` 55 56 ''', support_complex=False) 57 58 59 trunc = ufunc.create_math_ufunc( 60 'trunc', 1, 'cupy_trunc', 61 '''Rounds each element of an array towards zero. 62 63 .. seealso:: :data:`numpy.trunc` 64 65 ''', support_complex=False) 66 67 68 fix = core.create_ufunc( 69 'cupy_fix', ('e->e', 'f->f', 'd->d'), 70 'out0 = (in0 >= 0.0) ? floor(in0): ceil(in0)', 71 doc='''If given value x is positive, it return floor(x). 72 Else, it return ceil(x). 73 74 .. seealso:: :func:`numpy.fix` 75 76 ''') 77 [end of cupy/math/rounding.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cupy/math/rounding.py b/cupy/math/rounding.py --- a/cupy/math/rounding.py +++ b/cupy/math/rounding.py @@ -8,7 +8,7 @@ Args: a (cupy.ndarray): The source array. - decimals (int): umber of decimal places to round to (default: 0). + decimals (int): Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. out (cupy.ndarray): Output array.
{"golden_diff": "diff --git a/cupy/math/rounding.py b/cupy/math/rounding.py\n--- a/cupy/math/rounding.py\n+++ b/cupy/math/rounding.py\n@@ -8,7 +8,7 @@\n \n Args:\n a (cupy.ndarray): The source array.\n- decimals (int): umber of decimal places to round to (default: 0).\n+ decimals (int): Number of decimal places to round to (default: 0).\n If decimals is negative, it specifies the number of positions to\n the left of the decimal point.\n out (cupy.ndarray): Output array.\n", "issue": "`around` result is different compared to numpy\n```\r\n>>> np.__version__\r\n'1.16.4'\r\n>>> np.around([2.5])\r\narray([2.])\r\n>>> cupy.__version__\r\n'7.0.0a1'\r\n>>> cupy.around([2.5])\r\narray([3.])\r\n```\r\nNumPy seems to round to even.\n", "before_files": [{"content": "from cupy import core\nfrom cupy.core import fusion\nfrom cupy.math import ufunc\n\n\ndef around(a, decimals=0, out=None):\n \"\"\"Rounds to the given number of decimals.\n\n Args:\n a (cupy.ndarray): The source array.\n decimals (int): umber of decimal places to round to (default: 0).\n If decimals is negative, it specifies the number of positions to\n the left of the decimal point.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: Rounded array.\n\n .. seealso:: :func:`numpy.around`\n\n \"\"\"\n if fusion._is_fusing():\n return fusion._call_ufunc(core.core._round_ufunc, a, decimals, out=out)\n a = core.array(a, copy=False)\n return a.round(decimals, out=out)\n\n\ndef round_(a, decimals=0, out=None):\n return around(a, decimals, out=out)\n\n\nrint = ufunc.create_math_ufunc(\n 'rint', 1, 'cupy_rint',\n '''Rounds each element of an array to the nearest integer.\n\n .. seealso:: :data:`numpy.rint`\n\n ''')\n\n\nfloor = ufunc.create_math_ufunc(\n 'floor', 1, 'cupy_floor',\n '''Rounds each element of an array to its floor integer.\n\n .. seealso:: :data:`numpy.floor`\n\n ''', support_complex=False)\n\n\nceil = ufunc.create_math_ufunc(\n 'ceil', 1, 'cupy_ceil',\n '''Rounds each element of an array to its ceiling integer.\n\n .. seealso:: :data:`numpy.ceil`\n\n ''', support_complex=False)\n\n\ntrunc = ufunc.create_math_ufunc(\n 'trunc', 1, 'cupy_trunc',\n '''Rounds each element of an array towards zero.\n\n .. seealso:: :data:`numpy.trunc`\n\n ''', support_complex=False)\n\n\nfix = core.create_ufunc(\n 'cupy_fix', ('e->e', 'f->f', 'd->d'),\n 'out0 = (in0 >= 0.0) ? floor(in0): ceil(in0)',\n doc='''If given value x is positive, it return floor(x).\n Else, it return ceil(x).\n\n .. seealso:: :func:`numpy.fix`\n\n ''')\n", "path": "cupy/math/rounding.py"}]}
1,304
136
gh_patches_debug_27011
rasdani/github-patches
git_diff
dask__distributed-8347
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bokeh 3.3.0 uses absolute URLs and breaks proxied dashboards I noticed when using `distributed==2023.10.1` with `bokeh==3.2.2` the URLs to the Bokeh JavaScript are relative. ```html <script type="text/javascript" src="static/js/bokeh.min.js?v=3ca6425586de5036dc01992dd69aa61e9196dd02619557cfaeb1b3d8b77adf724be49401b1168483d165494ce57a6daa16e6f6d3660fef117d45028221f86357"></script> ``` But when upgrading to `bokeh==3.3.0` they become absolute. ```html <script type="text/javascript" src="/static/js/bokeh.min.js?v=39ef57c3a83533e24f961e5c27f651a61045dbccefac4b5df86a7680b1edaff31886a7c0322250ffb0d758fa14ae156c9b640f60cca99f020096b050a4dbb571"></script> ``` This breaks dashboards that are being proxied at some sub-url. Setting `dask scheduler --dashboard-prefix ""` doesn't fix it. ### Reproducer ``` $ pip install dask distributed bokeh==3.3.0 $ dask scheduler & $ curl localhost:8787/status | grep bokeh.min.js ``` </issue> <code> [start of distributed/dashboard/core.py] 1 from __future__ import annotations 2 3 import functools 4 import warnings 5 6 from bokeh.application import Application 7 from bokeh.application.handlers.function import FunctionHandler 8 from bokeh.server.server import BokehTornado 9 from bokeh.server.util import create_hosts_allowlist 10 11 import dask 12 13 from distributed.dashboard.utils import BOKEH_VERSION 14 from distributed.versions import BOKEH_REQUIREMENT 15 16 # Set `prereleases=True` to allow for use with dev versions of `bokeh` 17 if not BOKEH_REQUIREMENT.specifier.contains(BOKEH_VERSION, prereleases=True): 18 warnings.warn( 19 f"\nDask needs {BOKEH_REQUIREMENT} for the dashboard." 20 f"\nYou have bokeh={BOKEH_VERSION}." 21 "\nContinuing without the dashboard." 22 ) 23 raise ImportError( 24 f"Dask needs {BOKEH_REQUIREMENT} for the dashboard, not bokeh={BOKEH_VERSION}" 25 ) 26 27 28 if BOKEH_VERSION.major < 3: 29 from bokeh.models import Panel as TabPanel # noqa: F401 30 else: 31 from bokeh.models import TabPanel # noqa: F401 32 33 34 def BokehApplication(applications, server, prefix="/", template_variables=None): 35 template_variables = template_variables or {} 36 prefix = "/" + prefix.strip("/") + "/" if prefix else "/" 37 38 extra = {"prefix": prefix, **template_variables} 39 40 funcs = {k: functools.partial(v, server, extra) for k, v in applications.items()} 41 apps = {k: Application(FunctionHandler(v)) for k, v in funcs.items()} 42 43 kwargs = dask.config.get("distributed.scheduler.dashboard.bokeh-application").copy() 44 extra_websocket_origins = create_hosts_allowlist( 45 kwargs.pop("allow_websocket_origin"), server.http_server.port 46 ) 47 48 return BokehTornado( 49 apps, 50 prefix=prefix, 51 use_index=False, 52 extra_websocket_origins=extra_websocket_origins, 53 **kwargs, 54 ) 55 [end of distributed/dashboard/core.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/distributed/dashboard/core.py b/distributed/dashboard/core.py --- a/distributed/dashboard/core.py +++ b/distributed/dashboard/core.py @@ -5,6 +5,7 @@ from bokeh.application import Application from bokeh.application.handlers.function import FunctionHandler +from bokeh.resources import Resources from bokeh.server.server import BokehTornado from bokeh.server.util import create_hosts_allowlist @@ -31,6 +32,11 @@ from bokeh.models import TabPanel # noqa: F401 +class DaskBokehTornado(BokehTornado): + def resources(self, absolute_url: str | bool | None = True) -> Resources: + return super().resources(absolute_url) + + def BokehApplication(applications, server, prefix="/", template_variables=None): template_variables = template_variables or {} prefix = "/" + prefix.strip("/") + "/" if prefix else "/" @@ -45,10 +51,11 @@ kwargs.pop("allow_websocket_origin"), server.http_server.port ) - return BokehTornado( + return DaskBokehTornado( apps, prefix=prefix, use_index=False, extra_websocket_origins=extra_websocket_origins, + absolute_url="", **kwargs, )
{"golden_diff": "diff --git a/distributed/dashboard/core.py b/distributed/dashboard/core.py\n--- a/distributed/dashboard/core.py\n+++ b/distributed/dashboard/core.py\n@@ -5,6 +5,7 @@\n \n from bokeh.application import Application\n from bokeh.application.handlers.function import FunctionHandler\n+from bokeh.resources import Resources\n from bokeh.server.server import BokehTornado\n from bokeh.server.util import create_hosts_allowlist\n \n@@ -31,6 +32,11 @@\n from bokeh.models import TabPanel # noqa: F401\n \n \n+class DaskBokehTornado(BokehTornado):\n+ def resources(self, absolute_url: str | bool | None = True) -> Resources:\n+ return super().resources(absolute_url)\n+\n+\n def BokehApplication(applications, server, prefix=\"/\", template_variables=None):\n template_variables = template_variables or {}\n prefix = \"/\" + prefix.strip(\"/\") + \"/\" if prefix else \"/\"\n@@ -45,10 +51,11 @@\n kwargs.pop(\"allow_websocket_origin\"), server.http_server.port\n )\n \n- return BokehTornado(\n+ return DaskBokehTornado(\n apps,\n prefix=prefix,\n use_index=False,\n extra_websocket_origins=extra_websocket_origins,\n+ absolute_url=\"\",\n **kwargs,\n )\n", "issue": "Bokeh 3.3.0 uses absolute URLs and breaks proxied dashboards\nI noticed when using `distributed==2023.10.1` with `bokeh==3.2.2` the URLs to the Bokeh JavaScript are relative.\r\n\r\n```html\r\n<script type=\"text/javascript\" src=\"static/js/bokeh.min.js?v=3ca6425586de5036dc01992dd69aa61e9196dd02619557cfaeb1b3d8b77adf724be49401b1168483d165494ce57a6daa16e6f6d3660fef117d45028221f86357\"></script>\r\n```\r\n\r\nBut when upgrading to `bokeh==3.3.0` they become absolute.\r\n\r\n```html\r\n<script type=\"text/javascript\" src=\"/static/js/bokeh.min.js?v=39ef57c3a83533e24f961e5c27f651a61045dbccefac4b5df86a7680b1edaff31886a7c0322250ffb0d758fa14ae156c9b640f60cca99f020096b050a4dbb571\"></script>\r\n```\r\n\r\nThis breaks dashboards that are being proxied at some sub-url.\r\n\r\nSetting `dask scheduler --dashboard-prefix \"\"` doesn't fix it.\r\n\r\n### Reproducer\r\n\r\n```\r\n$ pip install dask distributed bokeh==3.3.0\r\n$ dask scheduler &\r\n$ curl localhost:8787/status | grep bokeh.min.js\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport warnings\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers.function import FunctionHandler\nfrom bokeh.server.server import BokehTornado\nfrom bokeh.server.util import create_hosts_allowlist\n\nimport dask\n\nfrom distributed.dashboard.utils import BOKEH_VERSION\nfrom distributed.versions import BOKEH_REQUIREMENT\n\n# Set `prereleases=True` to allow for use with dev versions of `bokeh`\nif not BOKEH_REQUIREMENT.specifier.contains(BOKEH_VERSION, prereleases=True):\n warnings.warn(\n f\"\\nDask needs {BOKEH_REQUIREMENT} for the dashboard.\"\n f\"\\nYou have bokeh={BOKEH_VERSION}.\"\n \"\\nContinuing without the dashboard.\"\n )\n raise ImportError(\n f\"Dask needs {BOKEH_REQUIREMENT} for the dashboard, not bokeh={BOKEH_VERSION}\"\n )\n\n\nif BOKEH_VERSION.major < 3:\n from bokeh.models import Panel as TabPanel # noqa: F401\nelse:\n from bokeh.models import TabPanel # noqa: F401\n\n\ndef BokehApplication(applications, server, prefix=\"/\", template_variables=None):\n template_variables = template_variables or {}\n prefix = \"/\" + prefix.strip(\"/\") + \"/\" if prefix else \"/\"\n\n extra = {\"prefix\": prefix, **template_variables}\n\n funcs = {k: functools.partial(v, server, extra) for k, v in applications.items()}\n apps = {k: Application(FunctionHandler(v)) for k, v in funcs.items()}\n\n kwargs = dask.config.get(\"distributed.scheduler.dashboard.bokeh-application\").copy()\n extra_websocket_origins = create_hosts_allowlist(\n kwargs.pop(\"allow_websocket_origin\"), server.http_server.port\n )\n\n return BokehTornado(\n apps,\n prefix=prefix,\n use_index=False,\n extra_websocket_origins=extra_websocket_origins,\n **kwargs,\n )\n", "path": "distributed/dashboard/core.py"}]}
1,489
292
gh_patches_debug_1824
rasdani/github-patches
git_diff
ManageIQ__integration_tests-8533
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> is_displayed for catalog all page is not working as expected Currently the is_display of catalog all page returns True even if the view is on Add catalog page. </issue> <code> [start of cfme/services/catalogs/catalog.py] 1 import attr 2 from navmazing import NavigateToAttribute 3 from navmazing import NavigateToSibling 4 from widgetastic.utils import Parameter 5 from widgetastic.widget import Text 6 from widgetastic_patternfly import Button 7 from widgetastic_patternfly import CandidateNotFound 8 from widgetastic_patternfly import Input 9 10 from . import ServicesCatalogView 11 from cfme.common import Taggable 12 from cfme.modeling.base import BaseCollection 13 from cfme.modeling.base import BaseEntity 14 from cfme.utils.appliance.implementations.ui import CFMENavigateStep 15 from cfme.utils.appliance.implementations.ui import navigate_to 16 from cfme.utils.appliance.implementations.ui import navigator 17 from cfme.utils.pretty import Pretty 18 from cfme.utils.update import Updateable 19 from cfme.utils.wait import wait_for 20 from widgetastic_manageiq import MultiBoxSelect 21 22 23 class CatalogsMultiBoxSelect(MultiBoxSelect): 24 move_into_button = Button(title=Parameter("@move_into")) 25 move_from_button = Button(title=Parameter("@move_from")) 26 27 28 class CatalogForm(ServicesCatalogView): 29 title = Text('#explorer_title_text') 30 31 name = Input(name='name') 32 description = Input(name="description") 33 assign_catalog_items = CatalogsMultiBoxSelect( 34 move_into="Move Selected buttons right", 35 move_from="Move Selected buttons left", 36 available_items="available_fields", 37 chosen_items="selected_fields" 38 ) 39 40 save_button = Button('Save') 41 cancel_button = Button('Cancel') 42 43 44 class CatalogsView(ServicesCatalogView): 45 title = Text("#explorer_title_text") 46 47 @property 48 def is_displayed(self): 49 return ( 50 self.in_explorer and 51 self.catalogs.is_opened and 52 self.catalogs.tree.currently_selected == ["All Catalogs"]) 53 54 55 class DetailsCatalogView(ServicesCatalogView): 56 title = Text("#explorer_title_text") 57 58 @property 59 def is_displayed(self): 60 return ( 61 self.in_explorer and self.catalogs.is_opened and 62 self.title.text == 'Catalog "{}"'.format(self.context["object"].name) 63 ) 64 65 66 class AddCatalogView(CatalogForm): 67 68 add_button = Button("Add") 69 70 @property 71 def is_displayed(self): 72 return ( 73 self.in_explorer and self.catalogs.is_opened and 74 self.title.text == 'Adding a new Catalog' 75 ) 76 77 78 class EditCatalogView(CatalogForm): 79 80 save_button = Button('Save') 81 reset_button = Button('Reset') 82 83 @property 84 def is_displayed(self): 85 return ( 86 self.in_explorer and self.catalogs.is_opened and 87 self.title.text == 'Editing Catalog "{}"'.format(self.context["object"].name) 88 ) 89 90 91 @attr.s 92 class Catalog(BaseEntity, Updateable, Pretty, Taggable): 93 94 name = attr.ib() 95 description = attr.ib() 96 items = attr.ib(default=None) 97 98 def update(self, updates): 99 view = navigate_to(self, 'Edit') 100 changed = view.fill(updates) 101 if changed: 102 view.save_button.click() 103 else: 104 view.cancel_button.click() 105 view = self.create_view(DetailsCatalogView, override=updates, wait='10s') 106 view.flash.assert_no_error() 107 if changed: 108 view.flash.assert_message( 109 'Catalog "{}" was saved'.format(updates.get('name', self.name))) 110 else: 111 view.flash.assert_message( 112 'Edit of Catalog "{}" was cancelled by the user'.format(self.name)) 113 114 def delete(self): 115 view = navigate_to(self, "Details") 116 view.configuration.item_select('Remove Catalog', handle_alert=True) 117 view = self.create_view(CatalogsView, wait='10s') 118 view.flash.assert_no_error() 119 view.flash.assert_success_message( 120 'Catalog "{}": Delete successful'.format(self.description or self.name)) 121 122 @property 123 def exists(self): 124 try: 125 navigate_to(self, 'Details') 126 return True 127 except (NameError, CandidateNotFound): 128 return False 129 130 131 @attr.s 132 class CatalogCollection(BaseCollection): 133 """A collection for the :py:class:`cfme.services.catalogs.catalog.Catalog`""" 134 ENTITY = Catalog 135 136 def create(self, name, description, items=None): 137 """Create a catalog. 138 139 Args: 140 name: The name of the catalog 141 description: The description of the catalog 142 items: Items in the catalog 143 """ 144 view = navigate_to(self, 'Add') 145 view.fill({ 146 'name': name, 147 'description': description, 148 'assign_catalog_items': items 149 }) 150 view.add_button.click() 151 catalog = self.instantiate(name=name, description=description, items=items) 152 view = self.create_view(CatalogsView) 153 assert view.is_displayed 154 view.flash.assert_no_error() 155 return catalog 156 157 158 @navigator.register(CatalogCollection) 159 class All(CFMENavigateStep): 160 VIEW = CatalogsView 161 prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn') 162 163 def step(self, *args, **kwargs): 164 self.prerequisite_view.navigation.select('Services', 'Catalogs') 165 self.view.catalogs.tree.click_path("All Catalogs") 166 167 168 @navigator.register(CatalogCollection) 169 class Add(CFMENavigateStep): 170 VIEW = AddCatalogView 171 prerequisite = NavigateToSibling('All') 172 173 def step(self, *args, **kwargs): 174 self.prerequisite_view.configuration.item_select('Add a New Catalog') 175 176 177 @navigator.register(Catalog) 178 class Details(CFMENavigateStep): 179 VIEW = DetailsCatalogView 180 prerequisite = NavigateToAttribute('parent', 'All') 181 182 def step(self, *args, **kwargs): 183 self.prerequisite_view.catalogs.tree.click_path("All Catalogs", self.obj.name) 184 185 186 @navigator.register(Catalog) 187 class Edit(CFMENavigateStep): 188 VIEW = EditCatalogView 189 prerequisite = NavigateToSibling('Details') 190 191 def step(self, *args, **kwargs): 192 self.prerequisite_view.configuration.item_select('Edit this Item') 193 [end of cfme/services/catalogs/catalog.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cfme/services/catalogs/catalog.py b/cfme/services/catalogs/catalog.py --- a/cfme/services/catalogs/catalog.py +++ b/cfme/services/catalogs/catalog.py @@ -49,6 +49,7 @@ return ( self.in_explorer and self.catalogs.is_opened and + self.title.text == "All Catalogs" and self.catalogs.tree.currently_selected == ["All Catalogs"])
{"golden_diff": "diff --git a/cfme/services/catalogs/catalog.py b/cfme/services/catalogs/catalog.py\n--- a/cfme/services/catalogs/catalog.py\n+++ b/cfme/services/catalogs/catalog.py\n@@ -49,6 +49,7 @@\n return (\n self.in_explorer and\n self.catalogs.is_opened and\n+ self.title.text == \"All Catalogs\" and\n self.catalogs.tree.currently_selected == [\"All Catalogs\"])\n", "issue": "is_displayed for catalog all page is not working as expected\nCurrently the is_display of catalog all page returns True even if the view is on Add catalog page.\n", "before_files": [{"content": "import attr\nfrom navmazing import NavigateToAttribute\nfrom navmazing import NavigateToSibling\nfrom widgetastic.utils import Parameter\nfrom widgetastic.widget import Text\nfrom widgetastic_patternfly import Button\nfrom widgetastic_patternfly import CandidateNotFound\nfrom widgetastic_patternfly import Input\n\nfrom . import ServicesCatalogView\nfrom cfme.common import Taggable\nfrom cfme.modeling.base import BaseCollection\nfrom cfme.modeling.base import BaseEntity\nfrom cfme.utils.appliance.implementations.ui import CFMENavigateStep\nfrom cfme.utils.appliance.implementations.ui import navigate_to\nfrom cfme.utils.appliance.implementations.ui import navigator\nfrom cfme.utils.pretty import Pretty\nfrom cfme.utils.update import Updateable\nfrom cfme.utils.wait import wait_for\nfrom widgetastic_manageiq import MultiBoxSelect\n\n\nclass CatalogsMultiBoxSelect(MultiBoxSelect):\n move_into_button = Button(title=Parameter(\"@move_into\"))\n move_from_button = Button(title=Parameter(\"@move_from\"))\n\n\nclass CatalogForm(ServicesCatalogView):\n title = Text('#explorer_title_text')\n\n name = Input(name='name')\n description = Input(name=\"description\")\n assign_catalog_items = CatalogsMultiBoxSelect(\n move_into=\"Move Selected buttons right\",\n move_from=\"Move Selected buttons left\",\n available_items=\"available_fields\",\n chosen_items=\"selected_fields\"\n )\n\n save_button = Button('Save')\n cancel_button = Button('Cancel')\n\n\nclass CatalogsView(ServicesCatalogView):\n title = Text(\"#explorer_title_text\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and\n self.catalogs.is_opened and\n self.catalogs.tree.currently_selected == [\"All Catalogs\"])\n\n\nclass DetailsCatalogView(ServicesCatalogView):\n title = Text(\"#explorer_title_text\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Catalog \"{}\"'.format(self.context[\"object\"].name)\n )\n\n\nclass AddCatalogView(CatalogForm):\n\n add_button = Button(\"Add\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Adding a new Catalog'\n )\n\n\nclass EditCatalogView(CatalogForm):\n\n save_button = Button('Save')\n reset_button = Button('Reset')\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Editing Catalog \"{}\"'.format(self.context[\"object\"].name)\n )\n\n\[email protected]\nclass Catalog(BaseEntity, Updateable, Pretty, Taggable):\n\n name = attr.ib()\n description = attr.ib()\n items = attr.ib(default=None)\n\n def update(self, updates):\n view = navigate_to(self, 'Edit')\n changed = view.fill(updates)\n if changed:\n view.save_button.click()\n else:\n view.cancel_button.click()\n view = self.create_view(DetailsCatalogView, override=updates, wait='10s')\n view.flash.assert_no_error()\n if changed:\n view.flash.assert_message(\n 'Catalog \"{}\" was saved'.format(updates.get('name', self.name)))\n else:\n view.flash.assert_message(\n 'Edit of Catalog \"{}\" was cancelled by the user'.format(self.name))\n\n def delete(self):\n view = navigate_to(self, \"Details\")\n view.configuration.item_select('Remove Catalog', handle_alert=True)\n view = self.create_view(CatalogsView, wait='10s')\n view.flash.assert_no_error()\n view.flash.assert_success_message(\n 'Catalog \"{}\": Delete successful'.format(self.description or self.name))\n\n @property\n def exists(self):\n try:\n navigate_to(self, 'Details')\n return True\n except (NameError, CandidateNotFound):\n return False\n\n\[email protected]\nclass CatalogCollection(BaseCollection):\n \"\"\"A collection for the :py:class:`cfme.services.catalogs.catalog.Catalog`\"\"\"\n ENTITY = Catalog\n\n def create(self, name, description, items=None):\n \"\"\"Create a catalog.\n\n Args:\n name: The name of the catalog\n description: The description of the catalog\n items: Items in the catalog\n \"\"\"\n view = navigate_to(self, 'Add')\n view.fill({\n 'name': name,\n 'description': description,\n 'assign_catalog_items': items\n })\n view.add_button.click()\n catalog = self.instantiate(name=name, description=description, items=items)\n view = self.create_view(CatalogsView)\n assert view.is_displayed\n view.flash.assert_no_error()\n return catalog\n\n\[email protected](CatalogCollection)\nclass All(CFMENavigateStep):\n VIEW = CatalogsView\n prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.navigation.select('Services', 'Catalogs')\n self.view.catalogs.tree.click_path(\"All Catalogs\")\n\n\[email protected](CatalogCollection)\nclass Add(CFMENavigateStep):\n VIEW = AddCatalogView\n prerequisite = NavigateToSibling('All')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.configuration.item_select('Add a New Catalog')\n\n\[email protected](Catalog)\nclass Details(CFMENavigateStep):\n VIEW = DetailsCatalogView\n prerequisite = NavigateToAttribute('parent', 'All')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.catalogs.tree.click_path(\"All Catalogs\", self.obj.name)\n\n\[email protected](Catalog)\nclass Edit(CFMENavigateStep):\n VIEW = EditCatalogView\n prerequisite = NavigateToSibling('Details')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.configuration.item_select('Edit this Item')\n", "path": "cfme/services/catalogs/catalog.py"}]}
2,345
102
gh_patches_debug_25072
rasdani/github-patches
git_diff
mindsdb__mindsdb-1010
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Removing predictor through HTTP returns 500 If mindsdb config have connection to DB with `publish: True`, but this DB cant be reached, then if removing predictor in Scout will return 'error 500'. That because mindsdb try to remove table related to predictor from database. That error is not critical. Probably, we need return code 200 with 'warnings' in this case, or only code 200. Request: ``` Request URL: http://localhost:5000/api/predictors/home_days_on_market Request Method: DELETE Status Code: 500 INTERNAL SERVER ERROR response {"message": "ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost'"} ``` error: ``` [2020-12-14 10:40:17,942] ERROR in app: Exception on /api/predictors/home_initial [DELETE] Traceback (most recent call last): File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 233, in _open_connection self._cmysql.connect(**cnx_kwargs) _mysql_connector.MySQLInterfaceError: Access denied for user 'roote'@'localhost' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request rv = self.dispatch_request() File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/api.py", line 375, in wrapper resp = resource(*args, **kwargs) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/views.py", line 89, in view return self.dispatch_request(*args, **kwargs) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/resource.py", line 44, in dispatch_request resp = meth(*args, **kwargs) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/api/http/namespaces/predictor.py", line 116, in delete ca.mindsdb_native.delete_model(name) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/native/mindsdb.py", line 97, in delete_model self.dbw.unregister_predictor(name) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/database/database.py", line 59, in unregister_predictor integration.unregister_predictor(name) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 131, in unregister_predictor self._query(q) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 42, in _query con = mysql.connector.connect( File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/__init__.py", line 270, in connect return CMySQLConnection(*args, **kwargs) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 86, in __init__ self.connect(**kwargs) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/abstracts.py", line 985, in connect self._open_connection() File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 235, in _open_connection raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno, mysql.connector.errors.ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost' ERROR:mindsdb.api.http.initialize:Exception on /api/predictors/home_initial [DELETE] Traceback (most recent call last): File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 233, in _open_connection self._cmysql.connect(**cnx_kwargs) _mysql_connector.MySQLInterfaceError: Access denied for user 'roote'@'localhost' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request rv = self.dispatch_request() File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/api.py", line 375, in wrapper resp = resource(*args, **kwargs) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/views.py", line 89, in view return self.dispatch_request(*args, **kwargs) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/resource.py", line 44, in dispatch_request resp = meth(*args, **kwargs) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/api/http/namespaces/predictor.py", line 116, in delete ca.mindsdb_native.delete_model(name) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/native/mindsdb.py", line 97, in delete_model self.dbw.unregister_predictor(name) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/database/database.py", line 59, in unregister_predictor integration.unregister_predictor(name) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 131, in unregister_predictor self._query(q) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 42, in _query con = mysql.connector.connect( File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/__init__.py", line 270, in connect return CMySQLConnection(*args, **kwargs) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 86, in __init__ self.connect(**kwargs) File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/abstracts.py", line 985, in connect self._open_connection() File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 235, in _open_connection raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno, mysql.connector.errors.ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost' ``` </issue> <code> [start of mindsdb/interfaces/database/database.py] 1 from mindsdb.integrations.clickhouse.clickhouse import Clickhouse 2 from mindsdb.integrations.postgres.postgres import PostgreSQL 3 from mindsdb.integrations.mariadb.mariadb import Mariadb 4 from mindsdb.integrations.mysql.mysql import MySQL 5 from mindsdb.integrations.mssql.mssql import MSSQL 6 7 8 class DatabaseWrapper(): 9 10 def __init__(self, config): 11 self.config = config 12 self._get_integrations() 13 14 def _setup_integration(self, integration): 15 success = False 16 try: 17 integration.setup() 18 success = True 19 except Exception as e: 20 print('Failed to integrate with database ' + integration.name + f', error: {e}') 21 return success 22 23 def _get_integrations(self): 24 # @TODO Once we have a presistent state sorted out this should be simplified as to not refresh the existing integrations every single time 25 integration_arr = [] 26 for db_alias in self.config['integrations']: 27 if self.config['integrations'][db_alias]['publish']: 28 db_type = self.config['integrations'][db_alias]['type'] 29 if db_type == 'clickhouse': 30 integration_arr.append(Clickhouse(self.config, db_alias)) 31 elif db_type == 'mariadb': 32 integration_arr.append(Mariadb(self.config, db_alias)) 33 elif db_type == 'mysql': 34 integration_arr.append(MySQL(self.config, db_alias)) 35 elif db_type == 'postgres': 36 integration_arr.append(PostgreSQL(self.config, db_alias)) 37 elif db_type == 'mssql': 38 integration_arr.append(MSSQL(self.config, db_alias)) 39 elif db_type == 'mongodb': 40 pass 41 else: 42 print(f'Uknown integration type: {db_type} for database called: {db_alias}') 43 44 return integration_arr 45 46 def register_predictors(self, model_data_arr, setup=True): 47 it = self._get_integrations() 48 for integration in it: 49 register = True 50 if setup: 51 register = self._setup_integration(integration) 52 if register: 53 integration.register_predictors(model_data_arr) 54 55 integration = [integration] 56 57 def unregister_predictor(self, name): 58 for integration in self._get_integrations(): 59 integration.unregister_predictor(name) 60 61 def check_connections(self): 62 connections = {} 63 for integration in self._get_integrations(): 64 connections[integration.name] = integration.check_connection() 65 66 return connections 67 [end of mindsdb/interfaces/database/database.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mindsdb/interfaces/database/database.py b/mindsdb/interfaces/database/database.py --- a/mindsdb/interfaces/database/database.py +++ b/mindsdb/interfaces/database/database.py @@ -4,6 +4,8 @@ from mindsdb.integrations.mysql.mysql import MySQL from mindsdb.integrations.mssql.mssql import MSSQL +from mindsdb.utilities.log import log as logger + class DatabaseWrapper(): @@ -50,13 +52,19 @@ if setup: register = self._setup_integration(integration) if register: - integration.register_predictors(model_data_arr) + if integration.check_connection(): + integration.register_predictors(model_data_arr) + else: + logger.warning(f"There is no connection to {integration.name}. predictor wouldn't be registred.") integration = [integration] def unregister_predictor(self, name): for integration in self._get_integrations(): - integration.unregister_predictor(name) + if integration.check_connection(): + integration.unregister_predictor(name) + else: + logger.warning(f"There is no connection to {integration.name}. predictor wouldn't be unregistred") def check_connections(self): connections = {}
{"golden_diff": "diff --git a/mindsdb/interfaces/database/database.py b/mindsdb/interfaces/database/database.py\n--- a/mindsdb/interfaces/database/database.py\n+++ b/mindsdb/interfaces/database/database.py\n@@ -4,6 +4,8 @@\n from mindsdb.integrations.mysql.mysql import MySQL\n from mindsdb.integrations.mssql.mssql import MSSQL\n \n+from mindsdb.utilities.log import log as logger\n+\n \n class DatabaseWrapper():\n \n@@ -50,13 +52,19 @@\n if setup:\n register = self._setup_integration(integration)\n if register:\n- integration.register_predictors(model_data_arr)\n+ if integration.check_connection():\n+ integration.register_predictors(model_data_arr)\n+ else:\n+ logger.warning(f\"There is no connection to {integration.name}. predictor wouldn't be registred.\")\n \n integration = [integration]\n \n def unregister_predictor(self, name):\n for integration in self._get_integrations():\n- integration.unregister_predictor(name)\n+ if integration.check_connection():\n+ integration.unregister_predictor(name)\n+ else:\n+ logger.warning(f\"There is no connection to {integration.name}. predictor wouldn't be unregistred\")\n \n def check_connections(self):\n connections = {}\n", "issue": "Removing predictor through HTTP returns 500 \nIf mindsdb config have connection to DB with `publish: True`, but this DB cant be reached, then if removing predictor in Scout will return 'error 500'. That because mindsdb try to remove table related to predictor from database. That error is not critical. Probably, we need return code 200 with 'warnings' in this case, or only code 200.\r\nRequest:\r\n```\r\nRequest URL: http://localhost:5000/api/predictors/home_days_on_market\r\nRequest Method: DELETE\r\nStatus Code: 500 INTERNAL SERVER ERROR\r\nresponse\r\n{\"message\": \"ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost'\"}\r\n```\r\nerror:\r\n```\r\n[2020-12-14 10:40:17,942] ERROR in app: Exception on /api/predictors/home_initial [DELETE]\r\nTraceback (most recent call last):\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py\", line 233, in _open_connection\r\n self._cmysql.connect(**cnx_kwargs)\r\n_mysql_connector.MySQLInterfaceError: Access denied for user 'roote'@'localhost'\r\nDuring handling of the above exception, another exception occurred:\r\nTraceback (most recent call last):\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py\", line 1936, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/api.py\", line 375, in wrapper\r\n resp = resource(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/views.py\", line 89, in view\r\n return self.dispatch_request(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/resource.py\", line 44, in dispatch_request\r\n resp = meth(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/api/http/namespaces/predictor.py\", line 116, in delete\r\n ca.mindsdb_native.delete_model(name)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/native/mindsdb.py\", line 97, in delete_model\r\n self.dbw.unregister_predictor(name)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/database/database.py\", line 59, in unregister_predictor\r\n integration.unregister_predictor(name)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py\", line 131, in unregister_predictor\r\n self._query(q)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py\", line 42, in _query\r\n con = mysql.connector.connect(\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/__init__.py\", line 270, in connect\r\n return CMySQLConnection(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py\", line 86, in __init__\r\n self.connect(**kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/abstracts.py\", line 985, in connect\r\n self._open_connection()\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py\", line 235, in _open_connection\r\n raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,\r\nmysql.connector.errors.ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost'\r\nERROR:mindsdb.api.http.initialize:Exception on /api/predictors/home_initial [DELETE]\r\nTraceback (most recent call last):\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py\", line 233, in _open_connection\r\n self._cmysql.connect(**cnx_kwargs)\r\n_mysql_connector.MySQLInterfaceError: Access denied for user 'roote'@'localhost'\r\nDuring handling of the above exception, another exception occurred:\r\nTraceback (most recent call last):\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py\", line 1936, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/api.py\", line 375, in wrapper\r\n resp = resource(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/views.py\", line 89, in view\r\n return self.dispatch_request(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/resource.py\", line 44, in dispatch_request\r\n resp = meth(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/api/http/namespaces/predictor.py\", line 116, in delete\r\n ca.mindsdb_native.delete_model(name)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/native/mindsdb.py\", line 97, in delete_model\r\n self.dbw.unregister_predictor(name)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/database/database.py\", line 59, in unregister_predictor\r\n integration.unregister_predictor(name)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py\", line 131, in unregister_predictor\r\n self._query(q)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py\", line 42, in _query\r\n con = mysql.connector.connect(\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/__init__.py\", line 270, in connect\r\n return CMySQLConnection(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py\", line 86, in __init__\r\n self.connect(**kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/abstracts.py\", line 985, in connect\r\n self._open_connection()\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py\", line 235, in _open_connection\r\n raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,\r\nmysql.connector.errors.ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost'\r\n```\n", "before_files": [{"content": "from mindsdb.integrations.clickhouse.clickhouse import Clickhouse\nfrom mindsdb.integrations.postgres.postgres import PostgreSQL\nfrom mindsdb.integrations.mariadb.mariadb import Mariadb\nfrom mindsdb.integrations.mysql.mysql import MySQL\nfrom mindsdb.integrations.mssql.mssql import MSSQL\n\n\nclass DatabaseWrapper():\n\n def __init__(self, config):\n self.config = config\n self._get_integrations()\n\n def _setup_integration(self, integration):\n success = False\n try:\n integration.setup()\n success = True\n except Exception as e:\n print('Failed to integrate with database ' + integration.name + f', error: {e}')\n return success\n\n def _get_integrations(self):\n # @TODO Once we have a presistent state sorted out this should be simplified as to not refresh the existing integrations every single time\n integration_arr = []\n for db_alias in self.config['integrations']:\n if self.config['integrations'][db_alias]['publish']:\n db_type = self.config['integrations'][db_alias]['type']\n if db_type == 'clickhouse':\n integration_arr.append(Clickhouse(self.config, db_alias))\n elif db_type == 'mariadb':\n integration_arr.append(Mariadb(self.config, db_alias))\n elif db_type == 'mysql':\n integration_arr.append(MySQL(self.config, db_alias))\n elif db_type == 'postgres':\n integration_arr.append(PostgreSQL(self.config, db_alias))\n elif db_type == 'mssql':\n integration_arr.append(MSSQL(self.config, db_alias))\n elif db_type == 'mongodb':\n pass\n else:\n print(f'Uknown integration type: {db_type} for database called: {db_alias}')\n\n return integration_arr\n\n def register_predictors(self, model_data_arr, setup=True):\n it = self._get_integrations()\n for integration in it:\n register = True\n if setup:\n register = self._setup_integration(integration)\n if register:\n integration.register_predictors(model_data_arr)\n\n integration = [integration]\n\n def unregister_predictor(self, name):\n for integration in self._get_integrations():\n integration.unregister_predictor(name)\n\n def check_connections(self):\n connections = {}\n for integration in self._get_integrations():\n connections[integration.name] = integration.check_connection()\n\n return connections\n", "path": "mindsdb/interfaces/database/database.py"}]}
3,085
265
gh_patches_debug_29922
rasdani/github-patches
git_diff
rasterio__rasterio-1058
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Return None when a dataset has an undefined CRS Instead of `CRS()`. Returning `CRS()` means that application-level concerns about what an undefined CRS means become Rasterio's concerns. `None` is what we have for undefined things in Python, and using it will help us tackle the concerns at the proper level. </issue> <code> [start of rasterio/rio/edit_info.py] 1 """Fetch and edit raster dataset metadata from the command line.""" 2 3 4 import json 5 import warnings 6 7 import click 8 9 import rasterio 10 import rasterio.crs 11 from rasterio.crs import CRS 12 from rasterio.errors import CRSError 13 from rasterio.rio import options 14 from rasterio.transform import guard_transform 15 16 17 # Handlers for info module options. 18 19 def all_handler(ctx, param, value): 20 """Get tags from a template file or command line.""" 21 if ctx.obj and ctx.obj.get('like') and value is not None: 22 ctx.obj['all_like'] = value 23 value = ctx.obj.get('like') 24 return value 25 26 27 def crs_handler(ctx, param, value): 28 """Get crs value from a template file or command line.""" 29 retval = options.from_like_context(ctx, param, value) 30 if retval is None and value: 31 try: 32 retval = json.loads(value) 33 except ValueError: 34 retval = value 35 try: 36 if isinstance(retval, dict): 37 retval = CRS(retval) 38 else: 39 retval = CRS.from_string(retval) 40 except CRSError: 41 raise click.BadParameter( 42 "'%s' is not a recognized CRS." % retval, 43 param=param, param_hint='crs') 44 return retval 45 46 47 def tags_handler(ctx, param, value): 48 """Get tags from a template file or command line.""" 49 retval = options.from_like_context(ctx, param, value) 50 if retval is None and value: 51 try: 52 retval = dict(p.split('=') for p in value) 53 except: 54 raise click.BadParameter( 55 "'%s' contains a malformed tag." % value, 56 param=param, param_hint='transform') 57 return retval 58 59 60 def transform_handler(ctx, param, value): 61 """Get transform value from a template file or command line.""" 62 retval = options.from_like_context(ctx, param, value) 63 if retval is None and value: 64 try: 65 value = json.loads(value) 66 except ValueError: 67 pass 68 try: 69 retval = guard_transform(value) 70 except: 71 raise click.BadParameter( 72 "'%s' is not recognized as an Affine array." % value, 73 param=param, param_hint='transform') 74 return retval 75 76 77 @click.command('edit-info', short_help="Edit dataset metadata.") 78 @options.file_in_arg 79 @options.bidx_opt 80 @options.edit_nodata_opt 81 @click.option('--unset-nodata', default=False, is_flag=True, 82 help="Unset the dataset's nodata value.") 83 @click.option('--crs', callback=crs_handler, default=None, 84 help="New coordinate reference system") 85 @click.option('--unset-crs', default=False, is_flag=True, 86 help="Unset the dataset's CRS value.") 87 @click.option('--transform', callback=transform_handler, 88 help="New affine transform matrix") 89 @click.option('--units', help="Edit units of a band (requires --bidx)") 90 @click.option('--description', 91 help="Edit description of a band (requires --bidx)") 92 @click.option('--tag', 'tags', callback=tags_handler, multiple=True, 93 metavar='KEY=VAL', help="New tag.") 94 @click.option('--all', 'allmd', callback=all_handler, flag_value='like', 95 is_eager=True, default=False, 96 help="Copy all metadata items from the template file.") 97 @options.like_opt 98 @click.pass_context 99 def edit(ctx, input, bidx, nodata, unset_nodata, crs, unset_crs, transform, 100 units, description, tags, allmd, like): 101 """Edit a dataset's metadata: coordinate reference system, affine 102 transformation matrix, nodata value, and tags. 103 104 The coordinate reference system may be either a PROJ.4 or EPSG:nnnn 105 string, 106 107 --crs 'EPSG:4326' 108 109 or a JSON text-encoded PROJ.4 object. 110 111 --crs '{"proj": "utm", "zone": 18, ...}' 112 113 Transforms are JSON-encoded Affine objects like: 114 115 --transform '[300.038, 0.0, 101985.0, 0.0, -300.042, 2826915.0]' 116 117 Prior to Rasterio 1.0 GDAL geotransforms were supported for --transform, 118 but are no longer supported. 119 120 Metadata items may also be read from an existing dataset using a 121 combination of the --like option with at least one of --all, 122 `--crs like`, `--nodata like`, and `--transform like`. 123 124 rio edit-info example.tif --like template.tif --all 125 126 To get just the transform from the template: 127 128 rio edit-info example.tif --like template.tif --transform like 129 130 """ 131 import numpy as np 132 133 def in_dtype_range(value, dtype): 134 infos = {'c': np.finfo, 'f': np.finfo, 'i': np.iinfo, 135 'u': np.iinfo} 136 rng = infos[np.dtype(dtype).kind](dtype) 137 return rng.min <= value <= rng.max 138 139 with ctx.obj['env'], rasterio.open(input, 'r+') as dst: 140 141 if allmd: 142 nodata = allmd['nodata'] 143 crs = allmd['crs'] 144 transform = allmd['transform'] 145 tags = allmd['tags'] 146 147 if unset_nodata and nodata is not options.IgnoreOption: 148 raise click.BadParameter( 149 "--unset-nodata and --nodata cannot be used together.") 150 151 if unset_crs and crs: 152 raise click.BadParameter( 153 "--unset-crs and --crs cannot be used together.") 154 155 if unset_nodata: 156 # Setting nodata to None will raise NotImplementedError 157 # if GDALDeleteRasterNoDataValue() isn't present in the 158 # GDAL library. 159 try: 160 dst.nodata = None 161 except NotImplementedError as exc: # pragma: no cover 162 raise click.ClickException(str(exc)) 163 164 elif nodata is not options.IgnoreOption: 165 dtype = dst.dtypes[0] 166 if nodata is not None and not in_dtype_range(nodata, dtype): 167 raise click.BadParameter( 168 "outside the range of the file's " 169 "data type (%s)." % dtype, 170 param=nodata, param_hint='nodata') 171 dst.nodata = nodata 172 173 if unset_crs: 174 dst.crs = CRS() 175 elif crs: 176 dst.crs = crs 177 178 if transform: 179 dst.transform = transform 180 181 if tags: 182 dst.update_tags(**tags) 183 184 if units: 185 dst.set_units(bidx, units) 186 187 if description: 188 dst.set_description(bidx, description) 189 190 # Post check - ensure that crs was unset properly 191 if unset_crs: 192 with ctx.obj['env'], rasterio.open(input, 'r') as src: 193 if dict(src.crs) != {}: 194 warnings.warn( 195 'CRS was not unset. Availability of his functionality ' 196 'differs depending on GDAL version and driver') 197 [end of rasterio/rio/edit_info.py] [start of rasterio/rio/info.py] 1 """Command access to dataset metadata, stats, and more.""" 2 3 4 import json 5 6 import click 7 8 import rasterio 9 import rasterio.crs 10 from rasterio.rio import options 11 12 13 @click.command(short_help="Print information about a data file.") 14 @options.file_in_arg 15 @click.option('--meta', 'aspect', flag_value='meta', default=True, 16 help="Show data file structure (default).") 17 @click.option('--tags', 'aspect', flag_value='tags', 18 help="Show data file tags.") 19 @click.option('--namespace', help="Select a tag namespace.") 20 @click.option('--indent', default=None, type=int, 21 help="Indentation level for pretty printed output") 22 # Options to pick out a single metadata item and print it as 23 # a string. 24 @click.option('--count', 'meta_member', flag_value='count', 25 help="Print the count of bands.") 26 @click.option('-t', '--dtype', 'meta_member', flag_value='dtype', 27 help="Print the dtype name.") 28 @click.option('--nodata', 'meta_member', flag_value='nodata', 29 help="Print the nodata value.") 30 @click.option('-f', '--format', '--driver', 'meta_member', flag_value='driver', 31 help="Print the format driver.") 32 @click.option('--shape', 'meta_member', flag_value='shape', 33 help="Print the (height, width) shape.") 34 @click.option('--height', 'meta_member', flag_value='height', 35 help="Print the height (number of rows).") 36 @click.option('--width', 'meta_member', flag_value='width', 37 help="Print the width (number of columns).") 38 @click.option('--crs', 'meta_member', flag_value='crs', 39 help="Print the CRS as a PROJ.4 string.") 40 @click.option('--bounds', 'meta_member', flag_value='bounds', 41 help="Print the boundary coordinates " 42 "(left, bottom, right, top).") 43 @click.option('-r', '--res', 'meta_member', flag_value='res', 44 help="Print pixel width and height.") 45 @click.option('--lnglat', 'meta_member', flag_value='lnglat', 46 help="Print longitude and latitude at center.") 47 @click.option('--stats', 'meta_member', flag_value='stats', 48 help="Print statistics (min, max, mean) of a single band " 49 "(use --bidx).") 50 @click.option('--checksum', 'meta_member', flag_value='checksum', 51 help="Print integer checksum of a single band " 52 "(use --bidx).") 53 @click.option('-v', '--tell-me-more', '--verbose', is_flag=True, 54 help="Output extra information.") 55 @options.bidx_opt 56 @options.masked_opt 57 @click.pass_context 58 def info(ctx, input, aspect, indent, namespace, meta_member, verbose, bidx, 59 masked): 60 """Print metadata about the dataset as JSON. 61 62 Optionally print a single metadata item as a string. 63 """ 64 try: 65 with ctx.obj['env'], rasterio.open(input) as src: 66 67 info = dict(src.profile) 68 info['shape'] = (info['height'], info['width']) 69 info['bounds'] = src.bounds 70 proj4 = src.crs.to_string() 71 if proj4.startswith('+init=epsg'): 72 proj4 = proj4.split('=')[1].upper() 73 info['crs'] = proj4 74 info['res'] = src.res 75 info['colorinterp'] = [src.colorinterp(i).name 76 for i in src.indexes] 77 info['units'] = [units or None for units in src.units] 78 info['descriptions'] = src.descriptions 79 info['indexes'] = src.indexes 80 info['mask_flags'] = [[ 81 flag.name for flag in flags] for flags in src.mask_flag_enums] 82 83 if proj4 != '': 84 info['lnglat'] = src.lnglat() 85 86 if verbose: 87 stats = [{'min': float(b.min()), 88 'max': float(b.max()), 89 'mean': float(b.mean()) 90 } for b in src.read(masked=masked)] 91 info['stats'] = stats 92 93 info['checksum'] = [src.checksum(i) for i in src.indexes] 94 95 gcps, crs = src.gcps 96 proj4 = crs.to_string() 97 if proj4.startswith('+init=epsg'): 98 proj4 = proj4.split('=')[1].upper() 99 if gcps: 100 info['gcps'] = { 101 'crs': proj4, 'points': [p.asdict() for p in gcps]} 102 103 if aspect == 'meta': 104 if meta_member == 'stats': 105 band = src.read(bidx, masked=masked) 106 click.echo('%f %f %f' % ( 107 float(band.min()), 108 float(band.max()), 109 float(band.mean()))) 110 elif meta_member == 'checksum': 111 click.echo(str(src.checksum(bidx))) 112 elif meta_member: 113 if isinstance(info[meta_member], (list, tuple)): 114 click.echo(" ".join(map(str, info[meta_member]))) 115 else: 116 click.echo(info[meta_member]) 117 else: 118 click.echo(json.dumps(info, sort_keys=True, indent=indent)) 119 120 elif aspect == 'tags': 121 click.echo( 122 json.dumps(src.tags(ns=namespace), indent=indent)) 123 except Exception: 124 raise click.Abort() 125 [end of rasterio/rio/info.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rasterio/rio/edit_info.py b/rasterio/rio/edit_info.py --- a/rasterio/rio/edit_info.py +++ b/rasterio/rio/edit_info.py @@ -171,7 +171,7 @@ dst.nodata = nodata if unset_crs: - dst.crs = CRS() + dst.crs = None # CRS() elif crs: dst.crs = crs @@ -190,7 +190,7 @@ # Post check - ensure that crs was unset properly if unset_crs: with ctx.obj['env'], rasterio.open(input, 'r') as src: - if dict(src.crs) != {}: + if src.crs: warnings.warn( 'CRS was not unset. Availability of his functionality ' 'differs depending on GDAL version and driver') diff --git a/rasterio/rio/info.py b/rasterio/rio/info.py --- a/rasterio/rio/info.py +++ b/rasterio/rio/info.py @@ -67,10 +67,15 @@ info = dict(src.profile) info['shape'] = (info['height'], info['width']) info['bounds'] = src.bounds - proj4 = src.crs.to_string() - if proj4.startswith('+init=epsg'): - proj4 = proj4.split('=')[1].upper() - info['crs'] = proj4 + + if src.crs: + proj4 = src.crs.to_string() + if proj4.startswith('+init=epsg'): + proj4 = proj4.split('=')[1].upper() + info['crs'] = proj4 + else: + info['crs'] = None + info['res'] = src.res info['colorinterp'] = [src.colorinterp(i).name for i in src.indexes]
{"golden_diff": "diff --git a/rasterio/rio/edit_info.py b/rasterio/rio/edit_info.py\n--- a/rasterio/rio/edit_info.py\n+++ b/rasterio/rio/edit_info.py\n@@ -171,7 +171,7 @@\n dst.nodata = nodata\n \n if unset_crs:\n- dst.crs = CRS()\n+ dst.crs = None # CRS()\n elif crs:\n dst.crs = crs\n \n@@ -190,7 +190,7 @@\n # Post check - ensure that crs was unset properly\n if unset_crs:\n with ctx.obj['env'], rasterio.open(input, 'r') as src:\n- if dict(src.crs) != {}:\n+ if src.crs:\n warnings.warn(\n 'CRS was not unset. Availability of his functionality '\n 'differs depending on GDAL version and driver')\ndiff --git a/rasterio/rio/info.py b/rasterio/rio/info.py\n--- a/rasterio/rio/info.py\n+++ b/rasterio/rio/info.py\n@@ -67,10 +67,15 @@\n info = dict(src.profile)\n info['shape'] = (info['height'], info['width'])\n info['bounds'] = src.bounds\n- proj4 = src.crs.to_string()\n- if proj4.startswith('+init=epsg'):\n- proj4 = proj4.split('=')[1].upper()\n- info['crs'] = proj4\n+\n+ if src.crs:\n+ proj4 = src.crs.to_string()\n+ if proj4.startswith('+init=epsg'):\n+ proj4 = proj4.split('=')[1].upper()\n+ info['crs'] = proj4\n+ else:\n+ info['crs'] = None\n+\n info['res'] = src.res\n info['colorinterp'] = [src.colorinterp(i).name\n for i in src.indexes]\n", "issue": "Return None when a dataset has an undefined CRS\nInstead of `CRS()`. Returning `CRS()` means that application-level concerns about what an undefined CRS means become Rasterio's concerns. `None` is what we have for undefined things in Python, and using it will help us tackle the concerns at the proper level.\n", "before_files": [{"content": "\"\"\"Fetch and edit raster dataset metadata from the command line.\"\"\"\n\n\nimport json\nimport warnings\n\nimport click\n\nimport rasterio\nimport rasterio.crs\nfrom rasterio.crs import CRS\nfrom rasterio.errors import CRSError\nfrom rasterio.rio import options\nfrom rasterio.transform import guard_transform\n\n\n# Handlers for info module options.\n\ndef all_handler(ctx, param, value):\n \"\"\"Get tags from a template file or command line.\"\"\"\n if ctx.obj and ctx.obj.get('like') and value is not None:\n ctx.obj['all_like'] = value\n value = ctx.obj.get('like')\n return value\n\n\ndef crs_handler(ctx, param, value):\n \"\"\"Get crs value from a template file or command line.\"\"\"\n retval = options.from_like_context(ctx, param, value)\n if retval is None and value:\n try:\n retval = json.loads(value)\n except ValueError:\n retval = value\n try:\n if isinstance(retval, dict):\n retval = CRS(retval)\n else:\n retval = CRS.from_string(retval)\n except CRSError:\n raise click.BadParameter(\n \"'%s' is not a recognized CRS.\" % retval,\n param=param, param_hint='crs')\n return retval\n\n\ndef tags_handler(ctx, param, value):\n \"\"\"Get tags from a template file or command line.\"\"\"\n retval = options.from_like_context(ctx, param, value)\n if retval is None and value:\n try:\n retval = dict(p.split('=') for p in value)\n except:\n raise click.BadParameter(\n \"'%s' contains a malformed tag.\" % value,\n param=param, param_hint='transform')\n return retval\n\n\ndef transform_handler(ctx, param, value):\n \"\"\"Get transform value from a template file or command line.\"\"\"\n retval = options.from_like_context(ctx, param, value)\n if retval is None and value:\n try:\n value = json.loads(value)\n except ValueError:\n pass\n try:\n retval = guard_transform(value)\n except:\n raise click.BadParameter(\n \"'%s' is not recognized as an Affine array.\" % value,\n param=param, param_hint='transform')\n return retval\n\n\[email protected]('edit-info', short_help=\"Edit dataset metadata.\")\[email protected]_in_arg\[email protected]_opt\[email protected]_nodata_opt\[email protected]('--unset-nodata', default=False, is_flag=True,\n help=\"Unset the dataset's nodata value.\")\[email protected]('--crs', callback=crs_handler, default=None,\n help=\"New coordinate reference system\")\[email protected]('--unset-crs', default=False, is_flag=True,\n help=\"Unset the dataset's CRS value.\")\[email protected]('--transform', callback=transform_handler,\n help=\"New affine transform matrix\")\[email protected]('--units', help=\"Edit units of a band (requires --bidx)\")\[email protected]('--description',\n help=\"Edit description of a band (requires --bidx)\")\[email protected]('--tag', 'tags', callback=tags_handler, multiple=True,\n metavar='KEY=VAL', help=\"New tag.\")\[email protected]('--all', 'allmd', callback=all_handler, flag_value='like',\n is_eager=True, default=False,\n help=\"Copy all metadata items from the template file.\")\[email protected]_opt\[email protected]_context\ndef edit(ctx, input, bidx, nodata, unset_nodata, crs, unset_crs, transform,\n units, description, tags, allmd, like):\n \"\"\"Edit a dataset's metadata: coordinate reference system, affine\n transformation matrix, nodata value, and tags.\n\n The coordinate reference system may be either a PROJ.4 or EPSG:nnnn\n string,\n\n --crs 'EPSG:4326'\n\n or a JSON text-encoded PROJ.4 object.\n\n --crs '{\"proj\": \"utm\", \"zone\": 18, ...}'\n\n Transforms are JSON-encoded Affine objects like:\n\n --transform '[300.038, 0.0, 101985.0, 0.0, -300.042, 2826915.0]'\n\n Prior to Rasterio 1.0 GDAL geotransforms were supported for --transform,\n but are no longer supported.\n\n Metadata items may also be read from an existing dataset using a\n combination of the --like option with at least one of --all,\n `--crs like`, `--nodata like`, and `--transform like`.\n\n rio edit-info example.tif --like template.tif --all\n\n To get just the transform from the template:\n\n rio edit-info example.tif --like template.tif --transform like\n\n \"\"\"\n import numpy as np\n\n def in_dtype_range(value, dtype):\n infos = {'c': np.finfo, 'f': np.finfo, 'i': np.iinfo,\n 'u': np.iinfo}\n rng = infos[np.dtype(dtype).kind](dtype)\n return rng.min <= value <= rng.max\n\n with ctx.obj['env'], rasterio.open(input, 'r+') as dst:\n\n if allmd:\n nodata = allmd['nodata']\n crs = allmd['crs']\n transform = allmd['transform']\n tags = allmd['tags']\n\n if unset_nodata and nodata is not options.IgnoreOption:\n raise click.BadParameter(\n \"--unset-nodata and --nodata cannot be used together.\")\n\n if unset_crs and crs:\n raise click.BadParameter(\n \"--unset-crs and --crs cannot be used together.\")\n\n if unset_nodata:\n # Setting nodata to None will raise NotImplementedError\n # if GDALDeleteRasterNoDataValue() isn't present in the\n # GDAL library.\n try:\n dst.nodata = None\n except NotImplementedError as exc: # pragma: no cover\n raise click.ClickException(str(exc))\n\n elif nodata is not options.IgnoreOption:\n dtype = dst.dtypes[0]\n if nodata is not None and not in_dtype_range(nodata, dtype):\n raise click.BadParameter(\n \"outside the range of the file's \"\n \"data type (%s).\" % dtype,\n param=nodata, param_hint='nodata')\n dst.nodata = nodata\n\n if unset_crs:\n dst.crs = CRS()\n elif crs:\n dst.crs = crs\n\n if transform:\n dst.transform = transform\n\n if tags:\n dst.update_tags(**tags)\n\n if units:\n dst.set_units(bidx, units)\n\n if description:\n dst.set_description(bidx, description)\n\n # Post check - ensure that crs was unset properly\n if unset_crs:\n with ctx.obj['env'], rasterio.open(input, 'r') as src:\n if dict(src.crs) != {}:\n warnings.warn(\n 'CRS was not unset. Availability of his functionality '\n 'differs depending on GDAL version and driver')\n", "path": "rasterio/rio/edit_info.py"}, {"content": "\"\"\"Command access to dataset metadata, stats, and more.\"\"\"\n\n\nimport json\n\nimport click\n\nimport rasterio\nimport rasterio.crs\nfrom rasterio.rio import options\n\n\[email protected](short_help=\"Print information about a data file.\")\[email protected]_in_arg\[email protected]('--meta', 'aspect', flag_value='meta', default=True,\n help=\"Show data file structure (default).\")\[email protected]('--tags', 'aspect', flag_value='tags',\n help=\"Show data file tags.\")\[email protected]('--namespace', help=\"Select a tag namespace.\")\[email protected]('--indent', default=None, type=int,\n help=\"Indentation level for pretty printed output\")\n# Options to pick out a single metadata item and print it as\n# a string.\[email protected]('--count', 'meta_member', flag_value='count',\n help=\"Print the count of bands.\")\[email protected]('-t', '--dtype', 'meta_member', flag_value='dtype',\n help=\"Print the dtype name.\")\[email protected]('--nodata', 'meta_member', flag_value='nodata',\n help=\"Print the nodata value.\")\[email protected]('-f', '--format', '--driver', 'meta_member', flag_value='driver',\n help=\"Print the format driver.\")\[email protected]('--shape', 'meta_member', flag_value='shape',\n help=\"Print the (height, width) shape.\")\[email protected]('--height', 'meta_member', flag_value='height',\n help=\"Print the height (number of rows).\")\[email protected]('--width', 'meta_member', flag_value='width',\n help=\"Print the width (number of columns).\")\[email protected]('--crs', 'meta_member', flag_value='crs',\n help=\"Print the CRS as a PROJ.4 string.\")\[email protected]('--bounds', 'meta_member', flag_value='bounds',\n help=\"Print the boundary coordinates \"\n \"(left, bottom, right, top).\")\[email protected]('-r', '--res', 'meta_member', flag_value='res',\n help=\"Print pixel width and height.\")\[email protected]('--lnglat', 'meta_member', flag_value='lnglat',\n help=\"Print longitude and latitude at center.\")\[email protected]('--stats', 'meta_member', flag_value='stats',\n help=\"Print statistics (min, max, mean) of a single band \"\n \"(use --bidx).\")\[email protected]('--checksum', 'meta_member', flag_value='checksum',\n help=\"Print integer checksum of a single band \"\n \"(use --bidx).\")\[email protected]('-v', '--tell-me-more', '--verbose', is_flag=True,\n help=\"Output extra information.\")\[email protected]_opt\[email protected]_opt\[email protected]_context\ndef info(ctx, input, aspect, indent, namespace, meta_member, verbose, bidx,\n masked):\n \"\"\"Print metadata about the dataset as JSON.\n\n Optionally print a single metadata item as a string.\n \"\"\"\n try:\n with ctx.obj['env'], rasterio.open(input) as src:\n\n info = dict(src.profile)\n info['shape'] = (info['height'], info['width'])\n info['bounds'] = src.bounds\n proj4 = src.crs.to_string()\n if proj4.startswith('+init=epsg'):\n proj4 = proj4.split('=')[1].upper()\n info['crs'] = proj4\n info['res'] = src.res\n info['colorinterp'] = [src.colorinterp(i).name\n for i in src.indexes]\n info['units'] = [units or None for units in src.units]\n info['descriptions'] = src.descriptions\n info['indexes'] = src.indexes\n info['mask_flags'] = [[\n flag.name for flag in flags] for flags in src.mask_flag_enums]\n\n if proj4 != '':\n info['lnglat'] = src.lnglat()\n\n if verbose:\n stats = [{'min': float(b.min()),\n 'max': float(b.max()),\n 'mean': float(b.mean())\n } for b in src.read(masked=masked)]\n info['stats'] = stats\n\n info['checksum'] = [src.checksum(i) for i in src.indexes]\n\n gcps, crs = src.gcps\n proj4 = crs.to_string()\n if proj4.startswith('+init=epsg'):\n proj4 = proj4.split('=')[1].upper()\n if gcps:\n info['gcps'] = {\n 'crs': proj4, 'points': [p.asdict() for p in gcps]}\n\n if aspect == 'meta':\n if meta_member == 'stats':\n band = src.read(bidx, masked=masked)\n click.echo('%f %f %f' % (\n float(band.min()),\n float(band.max()),\n float(band.mean())))\n elif meta_member == 'checksum':\n click.echo(str(src.checksum(bidx)))\n elif meta_member:\n if isinstance(info[meta_member], (list, tuple)):\n click.echo(\" \".join(map(str, info[meta_member])))\n else:\n click.echo(info[meta_member])\n else:\n click.echo(json.dumps(info, sort_keys=True, indent=indent))\n\n elif aspect == 'tags':\n click.echo(\n json.dumps(src.tags(ns=namespace), indent=indent))\n except Exception:\n raise click.Abort()\n", "path": "rasterio/rio/info.py"}]}
4,091
438
gh_patches_debug_64681
rasdani/github-patches
git_diff
chainer__chainer-751
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `cupy.array_split` doesn't accept empty indecies ``` >>> x=cupy.array([1]) >>> cupy.array_split(x, []) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/unno/git/chainer/cupy/manipulation/split.py", line 32, in array_split ret.append(ary[skip + (slice(index, size),)]) UnboundLocalError: local variable 'index' referenced before assignment ``` </issue> <code> [start of cupy/manipulation/split.py] 1 import numpy 2 import six 3 4 5 def array_split(ary, indices_or_sections, axis=0): 6 """Splits an array into multiple sub arrays along a given axis. 7 8 This function is almost equivalent to :func:`cupy.split`. The only 9 difference is that this function allows an integer sections that does not 10 evenly divide the axis. 11 12 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split` 13 14 """ 15 if ary.ndim <= axis: 16 raise IndexError('Axis exceeds ndim') 17 size = ary.shape[axis] 18 19 if numpy.isscalar(indices_or_sections): 20 each_size = (size - 1) // indices_or_sections + 1 21 indices = [i * each_size 22 for i in six.moves.range(1, indices_or_sections)] 23 else: 24 indices = indices_or_sections 25 26 skip = (slice(None),) * axis 27 ret = [] 28 i = 0 29 for index in indices: 30 ret.append(ary[skip + (slice(i, index),)]) 31 i = index 32 ret.append(ary[skip + (slice(index, size),)]) 33 34 return ret 35 36 37 def dsplit(ary, indices_or_sections): 38 """Splits an array into multiple sub arrays along the third axis. 39 40 This is equivalent to ``split`` with ``axis=2``. 41 42 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit` 43 44 """ 45 if ary.ndim <= 2: 46 raise ValueError('Cannot dsplit an array with less than 3 dimensions') 47 return split(ary, indices_or_sections, 2) 48 49 50 def hsplit(ary, indices_or_sections): 51 """Splits an array into multiple sub arrays horizontally. 52 53 This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one 54 dimension, and otherwise that with ``axis=1``. 55 56 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit` 57 58 """ 59 if ary.ndim == 0: 60 raise ValueError('Cannot hsplit a zero-dimensional array') 61 if ary.ndim == 1: 62 return split(ary, indices_or_sections, 0) 63 else: 64 return split(ary, indices_or_sections, 1) 65 66 67 def split(ary, indices_or_sections, axis=0): 68 """Splits an array into multiple sub arrays along a given axis. 69 70 Args: 71 ary (cupy.ndarray): Array to split. 72 indices_or_sections (int or sequence of ints): A value indicating how 73 to divide the axis. If it is an integer, then is treated as the 74 number of sections, and the axis is evenly divided. Otherwise, 75 the integers indicate indices to split at. Note that the sequence 76 on the device memory is not allowed. 77 axis (int): Axis along which the array is split. 78 79 Returns: 80 A list of sub arrays. Eacy array is a view of the corresponding input 81 array. 82 83 .. seealso:: :func:`numpy.split` 84 85 """ 86 if ary.ndim <= axis: 87 raise IndexError('Axis exceeds ndim') 88 size = ary.shape[axis] 89 90 if numpy.isscalar(indices_or_sections): 91 if size % indices_or_sections != 0: 92 raise ValueError( 93 'indices_or_sections must divide the size along the axes.\n' 94 'If you want to split the array into non-equally-sized ' 95 'arrays, use array_split instead.') 96 return array_split(ary, indices_or_sections, axis) 97 98 99 def vsplit(ary, indices_or_sections): 100 """Splits an array into multiple sub arrays along the first axis. 101 102 This is equivalent to ``split`` with ``axis=0``. 103 104 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit` 105 106 """ 107 if ary.ndim <= 1: 108 raise ValueError('Cannot vsplit an array with less than 2 dimensions') 109 return split(ary, indices_or_sections, 0) 110 [end of cupy/manipulation/split.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cupy/manipulation/split.py b/cupy/manipulation/split.py --- a/cupy/manipulation/split.py +++ b/cupy/manipulation/split.py @@ -23,6 +23,9 @@ else: indices = indices_or_sections + if len(indices) == 0: + return [ary] + skip = (slice(None),) * axis ret = [] i = 0
{"golden_diff": "diff --git a/cupy/manipulation/split.py b/cupy/manipulation/split.py\n--- a/cupy/manipulation/split.py\n+++ b/cupy/manipulation/split.py\n@@ -23,6 +23,9 @@\n else:\n indices = indices_or_sections\n \n+ if len(indices) == 0:\n+ return [ary]\n+\n skip = (slice(None),) * axis\n ret = []\n i = 0\n", "issue": "`cupy.array_split` doesn't accept empty indecies\n```\n>>> x=cupy.array([1])\n>>> cupy.array_split(x, [])\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"/home/unno/git/chainer/cupy/manipulation/split.py\", line 32, in array_split\n ret.append(ary[skip + (slice(index, size),)])\nUnboundLocalError: local variable 'index' referenced before assignment\n```\n\n", "before_files": [{"content": "import numpy\nimport six\n\n\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n This function is almost equivalent to :func:`cupy.split`. The only\n difference is that this function allows an integer sections that does not\n evenly divide the axis.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n each_size = (size - 1) // indices_or_sections + 1\n indices = [i * each_size\n for i in six.moves.range(1, indices_or_sections)]\n else:\n indices = indices_or_sections\n\n skip = (slice(None),) * axis\n ret = []\n i = 0\n for index in indices:\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(index, size),)])\n\n return ret\n\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the third axis.\n\n This is equivalent to ``split`` with ``axis=2``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 2:\n raise ValueError('Cannot dsplit an array with less than 3 dimensions')\n return split(ary, indices_or_sections, 2)\n\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays horizontally.\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`\n\n \"\"\"\n if ary.ndim == 0:\n raise ValueError('Cannot hsplit a zero-dimensional array')\n if ary.ndim == 1:\n return split(ary, indices_or_sections, 0)\n else:\n return split(ary, indices_or_sections, 1)\n\n\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n Args:\n ary (cupy.ndarray): Array to split.\n indices_or_sections (int or sequence of ints): A value indicating how\n to divide the axis. If it is an integer, then is treated as the\n number of sections, and the axis is evenly divided. Otherwise,\n the integers indicate indices to split at. Note that the sequence\n on the device memory is not allowed.\n axis (int): Axis along which the array is split.\n\n Returns:\n A list of sub arrays. Eacy array is a view of the corresponding input\n array.\n\n .. seealso:: :func:`numpy.split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n if size % indices_or_sections != 0:\n raise ValueError(\n 'indices_or_sections must divide the size along the axes.\\n'\n 'If you want to split the array into non-equally-sized '\n 'arrays, use array_split instead.')\n return array_split(ary, indices_or_sections, axis)\n\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the first axis.\n\n This is equivalent to ``split`` with ``axis=0``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 1:\n raise ValueError('Cannot vsplit an array with less than 2 dimensions')\n return split(ary, indices_or_sections, 0)\n", "path": "cupy/manipulation/split.py"}]}
1,755
104
gh_patches_debug_14270
rasdani/github-patches
git_diff
streamlink__streamlink-562
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 0.3.2 Release Closes #562 </issue> <code> [start of src/streamlink/__init__.py] 1 # coding: utf8 2 """Streamlink extracts streams from various services. 3 4 The main compontent of Streamlink is a command-line utility that 5 launches the streams in a video player. 6 7 An API is also provided that allows direct access to stream data. 8 9 Full documentation is available at https://streamlink.github.io. 10 11 """ 12 13 14 __title__ = "streamlink" 15 __version__ = "0.3.1" 16 __license__ = "Simplified BSD" 17 __author__ = "Streamlink" 18 __copyright__ = "Copyright 2016 Streamlink" 19 __credits__ = [ 20 "Agustín Carrasco (@asermax)", 21 "Andrew Bashore (@bashtech)", 22 "Andy Mikhailenko (@neithere)", 23 "Athanasios Oikonomou (@athoik)", 24 "Brian Callahan (@ibara)", 25 "Che (@chhe)", 26 "Christopher Rosell (@streamlink)", 27 "Daniel Meißner (@meise)", 28 "Daniel Miranda (@danielkza)", 29 "Daniel Wallace (@gtmanfred)", 30 "David Arvelo (@darvelo)", 31 "Dominik Dabrowski (@doda)", 32 "Erik G (@tboss)", 33 "Eric J (@wormeyman)", 34 "Ethan Jones (@jonesz)", 35 "Gaspard Jankowiak (@gapato)", 36 "Jaime Marquínez Ferrándiz (@jaimeMF)", 37 "Jan Tore Morken (@jantore)", 38 "John Peterson (@john-peterson)", 39 "Jon Bergli Heier (@sn4kebite)", 40 "Joseph Glanville (@josephglanville)", 41 "Julian Richen (@FireDart)", 42 "Kacper (@kasper93)", 43 "Martin Panter (@vadmium)", 44 "Max Nordlund (@maxnordlund)", 45 "Michael Cheah (@cheah)", 46 "Moritz Blanke", 47 "Niall McAndrew (@niallm90)", 48 "Niels Kräupl (@Gamewalker)", 49 "Pascal Romahn (@skulblakka)", 50 "Sam Edwards (@dotsam)", 51 "Stefan Breunig (@breunigs)", 52 "Suhail Patel (@suhailpatel)", 53 "Sunaga Takahiro (@sunaga720)", 54 "Vitaly Evtushenko (@eltiren)", 55 "Warnar Boekkooi (@boekkooi)", 56 "@blxd", 57 "@btiom", 58 "@daslicious", 59 "@MasterofJOKers", 60 "@mammothb", 61 "@medina", 62 "@monkeyphysics", 63 "@nixxquality", 64 "@papplampe", 65 "@Raziel-23", 66 "@t0mm0", 67 "@ToadKing", 68 "@unintended", 69 "@wolftankk", 70 "@yeeeargh" 71 ] 72 73 from .api import streams 74 from .exceptions import (StreamlinkError, PluginError, NoStreamsError, 75 NoPluginError, StreamError) 76 from .session import Streamlink 77 [end of src/streamlink/__init__.py] [start of setup.py] 1 #!/usr/bin/env python 2 3 from os import environ 4 from os.path import abspath, dirname, join 5 from setuptools import setup, find_packages 6 from sys import version_info, path as sys_path 7 8 deps = [] 9 10 if version_info[0] == 2: 11 # Require backport of concurrent.futures on Python 2 12 deps.append("futures") 13 14 # Require backport of argparse on Python 2.6 15 if version_info[1] == 6: 16 deps.append("argparse") 17 18 # Require singledispatch on Python <3.4 19 if version_info[0] == 2 or (version_info[0] == 3 and version_info[1] < 4): 20 deps.append("singledispatch") 21 22 # requests 2.0 does not work correctly on Python <2.6.3 23 if (version_info[0] == 2 and version_info[1] == 6 and version_info[2] < 3): 24 deps.append("requests>=1.0,<2.0") 25 else: 26 deps.append("requests>=1.0,!=2.12.0,!=2.12.1,<3.0") 27 28 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6 29 deps.append("pycryptodome>=3.4.3,<4") 30 31 # shutil.get_terminal_size and which were added in Python 3.3 32 if version_info[0] == 2: 33 deps.append("backports.shutil_which") 34 deps.append("backports.shutil_get_terminal_size") 35 36 # for localization 37 deps.append("iso-639") 38 deps.append("iso3166") 39 40 # When we build an egg for the Win32 bootstrap we don't want dependency 41 # information built into it. 42 if environ.get("NO_DEPS"): 43 deps = [] 44 45 srcdir = join(dirname(abspath(__file__)), "src/") 46 sys_path.insert(0, srcdir) 47 48 setup(name="streamlink", 49 version="0.3.1", 50 description="Streamlink is command-line utility that extracts streams " 51 "from various services and pipes them into a video player of " 52 "choice.", 53 url="https://github.com/streamlink/streamlink", 54 author="Streamlink", 55 author_email="[email protected]", # temp until we have a mailing list / global email 56 license="Simplified BSD", 57 packages=find_packages("src"), 58 package_dir={"": "src"}, 59 entry_points={ 60 "console_scripts": ["streamlink=streamlink_cli.main:main"] 61 }, 62 install_requires=deps, 63 test_suite="tests", 64 classifiers=["Development Status :: 5 - Production/Stable", 65 "Environment :: Console", 66 "Operating System :: POSIX", 67 "Operating System :: Microsoft :: Windows", 68 "Programming Language :: Python :: 2.6", 69 "Programming Language :: Python :: 2.7", 70 "Programming Language :: Python :: 3.3", 71 "Programming Language :: Python :: 3.4", 72 "Topic :: Internet :: WWW/HTTP", 73 "Topic :: Multimedia :: Sound/Audio", 74 "Topic :: Multimedia :: Video", 75 "Topic :: Utilities"]) 76 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -46,7 +46,7 @@ sys_path.insert(0, srcdir) setup(name="streamlink", - version="0.3.1", + version="0.3.2", description="Streamlink is command-line utility that extracts streams " "from various services and pipes them into a video player of " "choice.", diff --git a/src/streamlink/__init__.py b/src/streamlink/__init__.py --- a/src/streamlink/__init__.py +++ b/src/streamlink/__init__.py @@ -12,7 +12,7 @@ __title__ = "streamlink" -__version__ = "0.3.1" +__version__ = "0.3.2" __license__ = "Simplified BSD" __author__ = "Streamlink" __copyright__ = "Copyright 2016 Streamlink"
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,7 +46,7 @@\n sys_path.insert(0, srcdir)\n \n setup(name=\"streamlink\",\n- version=\"0.3.1\",\n+ version=\"0.3.2\",\n description=\"Streamlink is command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\ndiff --git a/src/streamlink/__init__.py b/src/streamlink/__init__.py\n--- a/src/streamlink/__init__.py\n+++ b/src/streamlink/__init__.py\n@@ -12,7 +12,7 @@\n \n \n __title__ = \"streamlink\"\n-__version__ = \"0.3.1\"\n+__version__ = \"0.3.2\"\n __license__ = \"Simplified BSD\"\n __author__ = \"Streamlink\"\n __copyright__ = \"Copyright 2016 Streamlink\"\n", "issue": "0.3.2 Release\nCloses #562 \n", "before_files": [{"content": "# coding: utf8\n\"\"\"Streamlink extracts streams from various services.\n\nThe main compontent of Streamlink is a command-line utility that\nlaunches the streams in a video player.\n\nAn API is also provided that allows direct access to stream data.\n\nFull documentation is available at https://streamlink.github.io.\n\n\"\"\"\n\n\n__title__ = \"streamlink\"\n__version__ = \"0.3.1\"\n__license__ = \"Simplified BSD\"\n__author__ = \"Streamlink\"\n__copyright__ = \"Copyright 2016 Streamlink\"\n__credits__ = [\n \"Agust\u00edn Carrasco (@asermax)\",\n \"Andrew Bashore (@bashtech)\",\n \"Andy Mikhailenko (@neithere)\",\n \"Athanasios Oikonomou (@athoik)\",\n \"Brian Callahan (@ibara)\",\n \"Che (@chhe)\",\n \"Christopher Rosell (@streamlink)\",\n \"Daniel Mei\u00dfner (@meise)\",\n \"Daniel Miranda (@danielkza)\",\n \"Daniel Wallace (@gtmanfred)\",\n \"David Arvelo (@darvelo)\",\n \"Dominik Dabrowski (@doda)\",\n \"Erik G (@tboss)\",\n \"Eric J (@wormeyman)\",\n \"Ethan Jones (@jonesz)\",\n \"Gaspard Jankowiak (@gapato)\",\n \"Jaime Marqu\u00ednez Ferr\u00e1ndiz (@jaimeMF)\",\n \"Jan Tore Morken (@jantore)\",\n \"John Peterson (@john-peterson)\",\n \"Jon Bergli Heier (@sn4kebite)\",\n \"Joseph Glanville (@josephglanville)\",\n \"Julian Richen (@FireDart)\",\n \"Kacper (@kasper93)\",\n \"Martin Panter (@vadmium)\",\n \"Max Nordlund (@maxnordlund)\",\n \"Michael Cheah (@cheah)\",\n \"Moritz Blanke\",\n \"Niall McAndrew (@niallm90)\",\n \"Niels Kr\u00e4upl (@Gamewalker)\",\n \"Pascal Romahn (@skulblakka)\",\n \"Sam Edwards (@dotsam)\",\n \"Stefan Breunig (@breunigs)\",\n \"Suhail Patel (@suhailpatel)\",\n \"Sunaga Takahiro (@sunaga720)\",\n \"Vitaly Evtushenko (@eltiren)\",\n \"Warnar Boekkooi (@boekkooi)\",\n \"@blxd\",\n \"@btiom\",\n \"@daslicious\",\n \"@MasterofJOKers\",\n \"@mammothb\",\n \"@medina\",\n \"@monkeyphysics\",\n \"@nixxquality\",\n \"@papplampe\",\n \"@Raziel-23\",\n \"@t0mm0\",\n \"@ToadKing\",\n \"@unintended\",\n \"@wolftankk\",\n \"@yeeeargh\"\n]\n\nfrom .api import streams\nfrom .exceptions import (StreamlinkError, PluginError, NoStreamsError,\n NoPluginError, StreamError)\nfrom .session import Streamlink\n", "path": "src/streamlink/__init__.py"}, {"content": "#!/usr/bin/env python\n\nfrom os import environ\nfrom os.path import abspath, dirname, join\nfrom setuptools import setup, find_packages\nfrom sys import version_info, path as sys_path\n\ndeps = []\n\nif version_info[0] == 2:\n # Require backport of concurrent.futures on Python 2\n deps.append(\"futures\")\n\n # Require backport of argparse on Python 2.6\n if version_info[1] == 6:\n deps.append(\"argparse\")\n\n# Require singledispatch on Python <3.4\nif version_info[0] == 2 or (version_info[0] == 3 and version_info[1] < 4):\n deps.append(\"singledispatch\")\n\n# requests 2.0 does not work correctly on Python <2.6.3\nif (version_info[0] == 2 and version_info[1] == 6 and version_info[2] < 3):\n deps.append(\"requests>=1.0,<2.0\")\nelse:\n deps.append(\"requests>=1.0,!=2.12.0,!=2.12.1,<3.0\")\n\n# this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\ndeps.append(\"pycryptodome>=3.4.3,<4\")\n\n# shutil.get_terminal_size and which were added in Python 3.3\nif version_info[0] == 2:\n deps.append(\"backports.shutil_which\")\n deps.append(\"backports.shutil_get_terminal_size\")\n\n# for localization\ndeps.append(\"iso-639\")\ndeps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don't want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nsrcdir = join(dirname(abspath(__file__)), \"src/\")\nsys_path.insert(0, srcdir)\n\nsetup(name=\"streamlink\",\n version=\"0.3.1\",\n description=\"Streamlink is command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n url=\"https://github.com/streamlink/streamlink\",\n author=\"Streamlink\",\n author_email=\"[email protected]\", # temp until we have a mailing list / global email\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points={\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n },\n install_requires=deps,\n test_suite=\"tests\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]}
2,225
215
gh_patches_debug_41086
rasdani/github-patches
git_diff
chainer__chainer-6824
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Stack overflow when `to_cpu`-family is overridden ### Code ```py import chainer class MyNet(chainer.Link): def to_cpu(self): super(MyNet, self).to_cpu() net = MyNet() net.to_cpu() ``` ### Result ``` : File "/data2/work/w/repos/chainer/chainer/link.py", line 447, in device_resident_accept super(Link, self).device_resident_accept(visitor) File "/data2/work/w/repos/chainer/chainer/device_resident.py", line 31, in device_resident_accept visitor.visit_device_resident(self) File "/data2/work/w/repos/chainer/chainer/device_resident.py", line 240, in visit_device_resident to_method(**kwargs) File "/home/niboshi/t/a.py", line 6, in to_cpu super(MyNet, self).to_cpu() File "/data2/work/w/repos/chainer/chainer/device_resident.py", line 67, in to_cpu self.__to_device(visitor) File "/data2/work/w/repos/chainer/chainer/device_resident.py", line 139, in __to_device self.device_resident_accept(to_device_visitor) File "/data2/work/w/repos/chainer/chainer/link.py", line 447, in device_resident_accept super(Link, self).device_resident_accept(visitor) File "/data2/work/w/repos/chainer/chainer/device_resident.py", line 31, in device_resident_accept visitor.visit_device_resident(self) RecursionError: maximum recursion depth exceeded ``` </issue> <code> [start of chainer/device_resident.py] 1 import abc 2 import sys 3 import typing as tp # NOQA 4 5 import numpy 6 7 import chainer 8 from chainer import backend 9 from chainer.backends import _cpu 10 from chainer.backends import cuda 11 from chainer.backends import intel64 12 from chainer import types # NOQA 13 from chainer import utils 14 import chainerx 15 16 17 class DeviceResident(utils.enable_final(meta_base=abc.ABCMeta)): 18 19 """A base class of objects with multi-device hierarchy.""" 20 21 _device = _cpu.CpuDevice() 22 23 def __init__(self): 24 # Store overridden to_device family method names. 25 self._overridden_to_methods = tuple([ 26 m for m in ('to_cpu', 'to_gpu', 'to_intel64') 27 if _is_to_device_method_overridden(self, m)]) 28 29 def device_resident_accept(self, visitor): 30 """Applies the visitor to all the device objects in this instance.""" 31 visitor.visit_device_resident(self) 32 33 @property 34 def device(self): 35 """Returns the device""" 36 return self._device 37 38 @property 39 def xp(self): 40 # type: () -> types.Xp 41 """Array module for this link. 42 43 Depending on which of CPU/GPU this link is on, this property returns 44 :mod:`numpy` or :mod:`cupy`. 45 46 """ 47 device = self.device 48 if device is None: 49 return None 50 return device.xp 51 52 def to_cpu(self): 53 # type: () -> 'DeviceResident' 54 """Copies parameter variables and persistent values to CPU. 55 56 This method does not handle non-registered attributes. If some of such 57 attributes must be copied to CPU, the link implementation must 58 override :meth:`Link.to_device` to do so. 59 60 Returns: self 61 62 """ 63 visitor = _ToDeviceVisitor( 64 backend.CpuDevice(), 65 entry_method_info=('to_cpu', {}), 66 skip_between_cupy_devices=True) 67 self.__to_device(visitor) 68 return self 69 70 def to_gpu( 71 self, 72 device=None, # type: tp.Optional[types.CudaDeviceSpec] 73 ): 74 # type: (...) -> 'DeviceResident' 75 """Copies parameter variables and persistent values to GPU. 76 77 This method does not handle non-registered attributes. If some of such 78 attributes must be copied to GPU, the link implementation must 79 override :meth:`Link.to_device` to do so. 80 81 Args: 82 device: Target device specifier. If omitted, the current device is 83 used. 84 85 Returns: self 86 87 """ 88 cuda.check_cuda_available() 89 cuda_device = cuda._get_device_or_current(device) 90 device = chainer.backends.cuda.GpuDevice(cuda_device) 91 visitor = _ToDeviceVisitor( 92 device, 93 entry_method_info=('to_gpu', {'device': device.device}), 94 skip_between_cupy_devices=True) 95 self.__to_device(visitor) 96 return self 97 98 def to_intel64(self): 99 # type: () -> 'DeviceResident' 100 """Copies parameter variables and persistent values to CPU.""" 101 intel64.check_ideep_available() 102 visitor = _ToDeviceVisitor( 103 chainer.get_device(intel64.Intel64Device()), 104 entry_method_info=('to_intel64', {})) 105 self.__to_device(visitor) 106 return self 107 108 @utils.final 109 def to_chx(self): 110 """Converts parameter variables and persistent values to ChainerX \ 111 without any copy. 112 113 This method does not handle non-registered attributes. If some of such 114 attributes must be copied to ChainerX, the link implementation must 115 override this method to do so. 116 117 Returns: self 118 """ 119 if not chainerx.is_available(): 120 raise RuntimeError('ChainerX is not available.') 121 122 if self.xp is chainerx: 123 return self 124 125 self.device_resident_accept(_ToChxVisitor()) 126 return self 127 128 @utils.final 129 def from_chx(self): 130 """Converts parameter variables and persistent values from ChainerX \ 131 to NumPy/CuPy devices without any copy.""" 132 if isinstance(self._device, backend.ChainerxDevice): 133 self._device = self._device.fallback_device 134 135 self.device_resident_accept(_FromChxVisitor()) 136 return self 137 138 def __to_device(self, to_device_visitor): 139 self.device_resident_accept(to_device_visitor) 140 141 @utils.final 142 def to_device( 143 self, 144 device # type: types.DeviceSpec 145 ): 146 # type: (...) -> 'DeviceResident' 147 """Copies parameter variables and persistent values to the specified \ 148 device. 149 150 This method does not handle non-registered attributes. If some of such 151 attributes must be copied to the device, the link implementation must 152 override this method to do so. 153 154 Args: 155 device: Target device specifier. See 156 :func:`~chainer.get_device` for available values. 157 158 Returns: self 159 160 """ 161 device = chainer.get_device(device) 162 self.__to_device(_ToDeviceVisitor(device)) 163 return self 164 165 166 def _is_to_device_method_overridden(device_resident, method_name): 167 # Returns whether the specified to_device family method is overridden. 168 to_method = getattr(device_resident, method_name, None).__func__ 169 to_method_orig = getattr(DeviceResident, method_name) 170 if sys.version_info < (3,): 171 to_method_orig = to_method_orig.__func__ 172 if to_method is not to_method_orig: 173 return True # overridden 174 return False 175 176 177 class DeviceResidentsVisitor(object): 178 179 """Base class of visitors that visits device resident objects recursively. 180 """ 181 182 def visit_device_resident(self, device_resident): 183 """Processes a :class:`DeviceResident` instance.""" 184 raise NotImplementedError() 185 186 def visit_array(self, arr): 187 """Processes an array and returns a new one. 188 189 If the visitor does not create a new array, it can simply return the 190 original array. 191 """ 192 raise NotImplementedError() 193 194 def visit_variable(self, param): 195 """Processes a variable or a parameter.""" 196 raise NotImplementedError() 197 198 199 class _ToDeviceVisitor(DeviceResidentsVisitor): 200 # A visitor that implements recursive to_device(). 201 # For backward compatibility, if any of to_cpu/to_gpu/to_intel64 are 202 # overridden on a device resident, this visitor calls it instead of 203 # `visit_device_resident`. That's true even if `to_device` was originally 204 # called. 205 206 def __init__( 207 self, device, entry_method_info=None, 208 skip_between_cupy_devices=False): 209 210 assert isinstance(device, chainer.backend.Device) 211 212 # `entry_method_info` is for backward compatibility workaround for 213 # overridden methods. 214 # It indicates which method originally causes this visitor. 215 # If it is any of the to_??? method names, descendant resident's 216 # respective method will be called if it's overridden 217 # (instead of `device_resident_accept`). 218 if entry_method_info is not None: 219 assert len(entry_method_info) == 2 220 assert entry_method_info[0] in ('to_cpu', 'to_gpu', 'to_intel64') 221 222 self._device = device 223 self._entry_method_info = entry_method_info 224 self._skip_between_cupy_devices = skip_between_cupy_devices 225 226 def visit_device_resident(self, device_resident): 227 device_resident._device = self._device 228 229 # Backward compatibility workaround for overridden methods 230 if device_resident._overridden_to_methods: 231 if self._entry_method_info is not None: 232 # Deprecated method is being called: e.g. to_cpu and to_gpu. 233 method_name, kwargs = self._entry_method_info 234 else: 235 # to_device is being called 236 method_name, kwargs = ( 237 self._device_to_method_name_and_kwargs(self._device)) 238 if method_name in device_resident._overridden_to_methods: 239 to_method = getattr(device_resident, method_name) 240 to_method(**kwargs) 241 return 242 243 def _device_to_method_name_and_kwargs(self, device): 244 # Converts a device instance to the corresponding combination of 245 # to_??? method name and kwargs. 246 247 # chainerx 248 if device.xp is chainerx: 249 return None, {} 250 # cupy 251 if device.xp is cuda.cupy: 252 return 'to_gpu', {'device': device.device.id} 253 # numpy 254 assert device.xp is numpy 255 if isinstance(device, _cpu.CpuDevice): 256 return 'to_cpu', {} 257 # intel64 258 assert isinstance(device, intel64.Intel64Device) 259 return 'to_intel64', {} 260 261 def visit_array(self, arr): 262 assert isinstance(arr, chainer.get_array_types()) 263 if not (self._skip_between_cupy_devices 264 and self._device.xp is cuda.cupy 265 and isinstance(arr, cuda.ndarray)): 266 return self._device.send(arr) 267 return arr 268 269 def visit_variable(self, param): 270 assert isinstance(param, chainer.Variable) 271 if not (self._skip_between_cupy_devices 272 and self._device.xp is cuda.cupy 273 and param.device.xp is cuda.cupy): 274 param.to_device(self._device) 275 276 277 class _ToChxVisitor(DeviceResidentsVisitor): 278 # A visitor that recursively calls to_chx(). 279 280 def visit_device_resident(self, device_resident): 281 device_resident._device = backend.ChainerxDevice.from_fallback_device( 282 device_resident._device) 283 284 def visit_array(self, arr): 285 assert isinstance(arr, chainer.get_array_types()) 286 return backend.to_chx(arr) 287 288 def visit_variable(self, param): 289 assert isinstance(param, chainer.Variable) 290 param.to_chx() 291 292 293 class _FromChxVisitor(DeviceResidentsVisitor): 294 # A visitor that recursively calls from_chx(). 295 296 def visit_device_resident(self, device_resident): 297 if isinstance(device_resident._device, backend.ChainerxDevice): 298 device_resident._device = device_resident._device.fallback_device 299 300 def visit_array(self, arr): 301 assert isinstance(arr, chainer.get_array_types()) 302 return backend.from_chx(arr) 303 304 def visit_variable(self, param): 305 assert isinstance(param, chainer.Variable) 306 param.from_chx() 307 [end of chainer/device_resident.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/device_resident.py b/chainer/device_resident.py --- a/chainer/device_resident.py +++ b/chainer/device_resident.py @@ -63,7 +63,8 @@ visitor = _ToDeviceVisitor( backend.CpuDevice(), entry_method_info=('to_cpu', {}), - skip_between_cupy_devices=True) + skip_between_cupy_devices=True, + starting_device_resident=self) self.__to_device(visitor) return self @@ -91,7 +92,8 @@ visitor = _ToDeviceVisitor( device, entry_method_info=('to_gpu', {'device': device.device}), - skip_between_cupy_devices=True) + skip_between_cupy_devices=True, + starting_device_resident=self) self.__to_device(visitor) return self @@ -101,7 +103,8 @@ intel64.check_ideep_available() visitor = _ToDeviceVisitor( chainer.get_device(intel64.Intel64Device()), - entry_method_info=('to_intel64', {})) + entry_method_info=('to_intel64', {}), + starting_device_resident=self) self.__to_device(visitor) return self @@ -205,7 +208,8 @@ def __init__( self, device, entry_method_info=None, - skip_between_cupy_devices=False): + skip_between_cupy_devices=False, + starting_device_resident=None): assert isinstance(device, chainer.backend.Device) @@ -219,15 +223,29 @@ assert len(entry_method_info) == 2 assert entry_method_info[0] in ('to_cpu', 'to_gpu', 'to_intel64') + # starting_device_resident is also for backward compatibility + # workaround for overridden methods. + # It is a DeviceResident if to_xxx methods were initially called + # on this visitor. This is used to avoid infinite accept-visit loop + # that would occur by calling to_xxx methods. + assert (starting_device_resident is None + or isinstance(starting_device_resident, DeviceResident)) + self._device = device self._entry_method_info = entry_method_info self._skip_between_cupy_devices = skip_between_cupy_devices + self._starting_device_resident = starting_device_resident def visit_device_resident(self, device_resident): device_resident._device = self._device # Backward compatibility workaround for overridden methods if device_resident._overridden_to_methods: + # Skip this device resident, if the visitor was initially triggered + # from it. + if device_resident is self._starting_device_resident: + return + if self._entry_method_info is not None: # Deprecated method is being called: e.g. to_cpu and to_gpu. method_name, kwargs = self._entry_method_info
{"golden_diff": "diff --git a/chainer/device_resident.py b/chainer/device_resident.py\n--- a/chainer/device_resident.py\n+++ b/chainer/device_resident.py\n@@ -63,7 +63,8 @@\n visitor = _ToDeviceVisitor(\n backend.CpuDevice(),\n entry_method_info=('to_cpu', {}),\n- skip_between_cupy_devices=True)\n+ skip_between_cupy_devices=True,\n+ starting_device_resident=self)\n self.__to_device(visitor)\n return self\n \n@@ -91,7 +92,8 @@\n visitor = _ToDeviceVisitor(\n device,\n entry_method_info=('to_gpu', {'device': device.device}),\n- skip_between_cupy_devices=True)\n+ skip_between_cupy_devices=True,\n+ starting_device_resident=self)\n self.__to_device(visitor)\n return self\n \n@@ -101,7 +103,8 @@\n intel64.check_ideep_available()\n visitor = _ToDeviceVisitor(\n chainer.get_device(intel64.Intel64Device()),\n- entry_method_info=('to_intel64', {}))\n+ entry_method_info=('to_intel64', {}),\n+ starting_device_resident=self)\n self.__to_device(visitor)\n return self\n \n@@ -205,7 +208,8 @@\n \n def __init__(\n self, device, entry_method_info=None,\n- skip_between_cupy_devices=False):\n+ skip_between_cupy_devices=False,\n+ starting_device_resident=None):\n \n assert isinstance(device, chainer.backend.Device)\n \n@@ -219,15 +223,29 @@\n assert len(entry_method_info) == 2\n assert entry_method_info[0] in ('to_cpu', 'to_gpu', 'to_intel64')\n \n+ # starting_device_resident is also for backward compatibility\n+ # workaround for overridden methods.\n+ # It is a DeviceResident if to_xxx methods were initially called\n+ # on this visitor. This is used to avoid infinite accept-visit loop\n+ # that would occur by calling to_xxx methods.\n+ assert (starting_device_resident is None\n+ or isinstance(starting_device_resident, DeviceResident))\n+\n self._device = device\n self._entry_method_info = entry_method_info\n self._skip_between_cupy_devices = skip_between_cupy_devices\n+ self._starting_device_resident = starting_device_resident\n \n def visit_device_resident(self, device_resident):\n device_resident._device = self._device\n \n # Backward compatibility workaround for overridden methods\n if device_resident._overridden_to_methods:\n+ # Skip this device resident, if the visitor was initially triggered\n+ # from it.\n+ if device_resident is self._starting_device_resident:\n+ return\n+\n if self._entry_method_info is not None:\n # Deprecated method is being called: e.g. to_cpu and to_gpu.\n method_name, kwargs = self._entry_method_info\n", "issue": "Stack overflow when `to_cpu`-family is overridden\n### Code\r\n```py\r\nimport chainer\r\n\r\n\r\nclass MyNet(chainer.Link):\r\n def to_cpu(self):\r\n super(MyNet, self).to_cpu()\r\n\r\n\r\nnet = MyNet()\r\nnet.to_cpu()\r\n```\r\n\r\n### Result\r\n```\r\n:\r\n File \"/data2/work/w/repos/chainer/chainer/link.py\", line 447, in device_resident_accept\r\n super(Link, self).device_resident_accept(visitor)\r\n File \"/data2/work/w/repos/chainer/chainer/device_resident.py\", line 31, in device_resident_accept\r\n visitor.visit_device_resident(self)\r\n File \"/data2/work/w/repos/chainer/chainer/device_resident.py\", line 240, in visit_device_resident\r\n to_method(**kwargs)\r\n File \"/home/niboshi/t/a.py\", line 6, in to_cpu\r\n super(MyNet, self).to_cpu()\r\n File \"/data2/work/w/repos/chainer/chainer/device_resident.py\", line 67, in to_cpu\r\n self.__to_device(visitor)\r\n File \"/data2/work/w/repos/chainer/chainer/device_resident.py\", line 139, in __to_device\r\n self.device_resident_accept(to_device_visitor)\r\n File \"/data2/work/w/repos/chainer/chainer/link.py\", line 447, in device_resident_accept\r\n super(Link, self).device_resident_accept(visitor)\r\n File \"/data2/work/w/repos/chainer/chainer/device_resident.py\", line 31, in device_resident_accept\r\n visitor.visit_device_resident(self)\r\nRecursionError: maximum recursion depth exceeded\r\n```\n", "before_files": [{"content": "import abc\nimport sys\nimport typing as tp # NOQA\n\nimport numpy\n\nimport chainer\nfrom chainer import backend\nfrom chainer.backends import _cpu\nfrom chainer.backends import cuda\nfrom chainer.backends import intel64\nfrom chainer import types # NOQA\nfrom chainer import utils\nimport chainerx\n\n\nclass DeviceResident(utils.enable_final(meta_base=abc.ABCMeta)):\n\n \"\"\"A base class of objects with multi-device hierarchy.\"\"\"\n\n _device = _cpu.CpuDevice()\n\n def __init__(self):\n # Store overridden to_device family method names.\n self._overridden_to_methods = tuple([\n m for m in ('to_cpu', 'to_gpu', 'to_intel64')\n if _is_to_device_method_overridden(self, m)])\n\n def device_resident_accept(self, visitor):\n \"\"\"Applies the visitor to all the device objects in this instance.\"\"\"\n visitor.visit_device_resident(self)\n\n @property\n def device(self):\n \"\"\"Returns the device\"\"\"\n return self._device\n\n @property\n def xp(self):\n # type: () -> types.Xp\n \"\"\"Array module for this link.\n\n Depending on which of CPU/GPU this link is on, this property returns\n :mod:`numpy` or :mod:`cupy`.\n\n \"\"\"\n device = self.device\n if device is None:\n return None\n return device.xp\n\n def to_cpu(self):\n # type: () -> 'DeviceResident'\n \"\"\"Copies parameter variables and persistent values to CPU.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to CPU, the link implementation must\n override :meth:`Link.to_device` to do so.\n\n Returns: self\n\n \"\"\"\n visitor = _ToDeviceVisitor(\n backend.CpuDevice(),\n entry_method_info=('to_cpu', {}),\n skip_between_cupy_devices=True)\n self.__to_device(visitor)\n return self\n\n def to_gpu(\n self,\n device=None, # type: tp.Optional[types.CudaDeviceSpec]\n ):\n # type: (...) -> 'DeviceResident'\n \"\"\"Copies parameter variables and persistent values to GPU.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to GPU, the link implementation must\n override :meth:`Link.to_device` to do so.\n\n Args:\n device: Target device specifier. If omitted, the current device is\n used.\n\n Returns: self\n\n \"\"\"\n cuda.check_cuda_available()\n cuda_device = cuda._get_device_or_current(device)\n device = chainer.backends.cuda.GpuDevice(cuda_device)\n visitor = _ToDeviceVisitor(\n device,\n entry_method_info=('to_gpu', {'device': device.device}),\n skip_between_cupy_devices=True)\n self.__to_device(visitor)\n return self\n\n def to_intel64(self):\n # type: () -> 'DeviceResident'\n \"\"\"Copies parameter variables and persistent values to CPU.\"\"\"\n intel64.check_ideep_available()\n visitor = _ToDeviceVisitor(\n chainer.get_device(intel64.Intel64Device()),\n entry_method_info=('to_intel64', {}))\n self.__to_device(visitor)\n return self\n\n @utils.final\n def to_chx(self):\n \"\"\"Converts parameter variables and persistent values to ChainerX \\\nwithout any copy.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to ChainerX, the link implementation must\n override this method to do so.\n\n Returns: self\n \"\"\"\n if not chainerx.is_available():\n raise RuntimeError('ChainerX is not available.')\n\n if self.xp is chainerx:\n return self\n\n self.device_resident_accept(_ToChxVisitor())\n return self\n\n @utils.final\n def from_chx(self):\n \"\"\"Converts parameter variables and persistent values from ChainerX \\\nto NumPy/CuPy devices without any copy.\"\"\"\n if isinstance(self._device, backend.ChainerxDevice):\n self._device = self._device.fallback_device\n\n self.device_resident_accept(_FromChxVisitor())\n return self\n\n def __to_device(self, to_device_visitor):\n self.device_resident_accept(to_device_visitor)\n\n @utils.final\n def to_device(\n self,\n device # type: types.DeviceSpec\n ):\n # type: (...) -> 'DeviceResident'\n \"\"\"Copies parameter variables and persistent values to the specified \\\ndevice.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to the device, the link implementation must\n override this method to do so.\n\n Args:\n device: Target device specifier. See\n :func:`~chainer.get_device` for available values.\n\n Returns: self\n\n \"\"\"\n device = chainer.get_device(device)\n self.__to_device(_ToDeviceVisitor(device))\n return self\n\n\ndef _is_to_device_method_overridden(device_resident, method_name):\n # Returns whether the specified to_device family method is overridden.\n to_method = getattr(device_resident, method_name, None).__func__\n to_method_orig = getattr(DeviceResident, method_name)\n if sys.version_info < (3,):\n to_method_orig = to_method_orig.__func__\n if to_method is not to_method_orig:\n return True # overridden\n return False\n\n\nclass DeviceResidentsVisitor(object):\n\n \"\"\"Base class of visitors that visits device resident objects recursively.\n \"\"\"\n\n def visit_device_resident(self, device_resident):\n \"\"\"Processes a :class:`DeviceResident` instance.\"\"\"\n raise NotImplementedError()\n\n def visit_array(self, arr):\n \"\"\"Processes an array and returns a new one.\n\n If the visitor does not create a new array, it can simply return the\n original array.\n \"\"\"\n raise NotImplementedError()\n\n def visit_variable(self, param):\n \"\"\"Processes a variable or a parameter.\"\"\"\n raise NotImplementedError()\n\n\nclass _ToDeviceVisitor(DeviceResidentsVisitor):\n # A visitor that implements recursive to_device().\n # For backward compatibility, if any of to_cpu/to_gpu/to_intel64 are\n # overridden on a device resident, this visitor calls it instead of\n # `visit_device_resident`. That's true even if `to_device` was originally\n # called.\n\n def __init__(\n self, device, entry_method_info=None,\n skip_between_cupy_devices=False):\n\n assert isinstance(device, chainer.backend.Device)\n\n # `entry_method_info` is for backward compatibility workaround for\n # overridden methods.\n # It indicates which method originally causes this visitor.\n # If it is any of the to_??? method names, descendant resident's\n # respective method will be called if it's overridden\n # (instead of `device_resident_accept`).\n if entry_method_info is not None:\n assert len(entry_method_info) == 2\n assert entry_method_info[0] in ('to_cpu', 'to_gpu', 'to_intel64')\n\n self._device = device\n self._entry_method_info = entry_method_info\n self._skip_between_cupy_devices = skip_between_cupy_devices\n\n def visit_device_resident(self, device_resident):\n device_resident._device = self._device\n\n # Backward compatibility workaround for overridden methods\n if device_resident._overridden_to_methods:\n if self._entry_method_info is not None:\n # Deprecated method is being called: e.g. to_cpu and to_gpu.\n method_name, kwargs = self._entry_method_info\n else:\n # to_device is being called\n method_name, kwargs = (\n self._device_to_method_name_and_kwargs(self._device))\n if method_name in device_resident._overridden_to_methods:\n to_method = getattr(device_resident, method_name)\n to_method(**kwargs)\n return\n\n def _device_to_method_name_and_kwargs(self, device):\n # Converts a device instance to the corresponding combination of\n # to_??? method name and kwargs.\n\n # chainerx\n if device.xp is chainerx:\n return None, {}\n # cupy\n if device.xp is cuda.cupy:\n return 'to_gpu', {'device': device.device.id}\n # numpy\n assert device.xp is numpy\n if isinstance(device, _cpu.CpuDevice):\n return 'to_cpu', {}\n # intel64\n assert isinstance(device, intel64.Intel64Device)\n return 'to_intel64', {}\n\n def visit_array(self, arr):\n assert isinstance(arr, chainer.get_array_types())\n if not (self._skip_between_cupy_devices\n and self._device.xp is cuda.cupy\n and isinstance(arr, cuda.ndarray)):\n return self._device.send(arr)\n return arr\n\n def visit_variable(self, param):\n assert isinstance(param, chainer.Variable)\n if not (self._skip_between_cupy_devices\n and self._device.xp is cuda.cupy\n and param.device.xp is cuda.cupy):\n param.to_device(self._device)\n\n\nclass _ToChxVisitor(DeviceResidentsVisitor):\n # A visitor that recursively calls to_chx().\n\n def visit_device_resident(self, device_resident):\n device_resident._device = backend.ChainerxDevice.from_fallback_device(\n device_resident._device)\n\n def visit_array(self, arr):\n assert isinstance(arr, chainer.get_array_types())\n return backend.to_chx(arr)\n\n def visit_variable(self, param):\n assert isinstance(param, chainer.Variable)\n param.to_chx()\n\n\nclass _FromChxVisitor(DeviceResidentsVisitor):\n # A visitor that recursively calls from_chx().\n\n def visit_device_resident(self, device_resident):\n if isinstance(device_resident._device, backend.ChainerxDevice):\n device_resident._device = device_resident._device.fallback_device\n\n def visit_array(self, arr):\n assert isinstance(arr, chainer.get_array_types())\n return backend.from_chx(arr)\n\n def visit_variable(self, param):\n assert isinstance(param, chainer.Variable)\n param.from_chx()\n", "path": "chainer/device_resident.py"}]}
3,962
659
gh_patches_debug_303
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-2347
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> gi._gobject.option is not part of pygobject The [GObject hook](https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/hook-gi.repository.GObject.py) adds a `hiddenimport` for `gi._gobject.option` however `gi/_gobject/option.py` is not part of pygobject. This leads to the following warning when packaging a Gtk application: ``` 4813 INFO: Loading module hook "hook-gi.py"... 4818 INFO: Loading module hook "hook-gi.repository.GObject.py"... 4926 INFO: Processing pre-safe import module hook gi.repository.GLib 4963 WARNING: Hidden import "gi._gobject.option" not found! ``` Browsing through the [pygobject git history](https://git.gnome.org/browse/pygobject/), I find commit [8afd7e8](https://git.gnome.org/browse/pygobject/commit/gi/_option.py?id=8afd7e880a72a44e6ea46c763bab82146fd75c96) which moved `gi/_glib/option.py` into `gi/_option.py` Replacing the `hiddenimport` to `hiddenimports += ['gi._option', 'gi._gobject']` silences the issue. However, I do not yet understand enough about pygobject and pyinstaller to know if this is the right thing to do. </issue> <code> [start of PyInstaller/hooks/hook-gi.repository.GObject.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2005-2016, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License with exception 5 # for distributing bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 #----------------------------------------------------------------------------- 9 """ 10 Import hook for GObject https://developer.gnome.org/gobject/stable/ from the GLib 11 library https://wiki.gnome.org/Projects/GLib introspected through PyGobject https://wiki.gnome.org/PyGObject 12 via the GObject Introspection middleware layer https://wiki.gnome.org/Projects/GObjectIntrospection 13 14 Tested with GLib 2.44.1, PyGObject 3.16.2, and GObject Introspection 1.44.0 on Mac OS X 10.10 and 15 GLib 2.42.2, PyGObject 3.14.0, and GObject Introspection 1.42 on Windows 7 16 """ 17 18 from PyInstaller.utils.hooks import get_gi_typelibs 19 20 binaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0') 21 22 hiddenimports += ['gi._gobject.option', 'gi._gobject'] 23 [end of PyInstaller/hooks/hook-gi.repository.GObject.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PyInstaller/hooks/hook-gi.repository.GObject.py b/PyInstaller/hooks/hook-gi.repository.GObject.py --- a/PyInstaller/hooks/hook-gi.repository.GObject.py +++ b/PyInstaller/hooks/hook-gi.repository.GObject.py @@ -19,4 +19,4 @@ binaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0') -hiddenimports += ['gi._gobject.option', 'gi._gobject'] +hiddenimports += ['gi._gobject']
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-gi.repository.GObject.py b/PyInstaller/hooks/hook-gi.repository.GObject.py\n--- a/PyInstaller/hooks/hook-gi.repository.GObject.py\n+++ b/PyInstaller/hooks/hook-gi.repository.GObject.py\n@@ -19,4 +19,4 @@\n \n binaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')\n \n-hiddenimports += ['gi._gobject.option', 'gi._gobject']\n+hiddenimports += ['gi._gobject']\n", "issue": "gi._gobject.option is not part of pygobject\nThe [GObject hook](https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/hook-gi.repository.GObject.py) adds a `hiddenimport` for `gi._gobject.option` however `gi/_gobject/option.py` is not part of pygobject.\r\n\r\nThis leads to the following warning when packaging a Gtk application:\r\n```\r\n4813 INFO: Loading module hook \"hook-gi.py\"...\r\n4818 INFO: Loading module hook \"hook-gi.repository.GObject.py\"...\r\n4926 INFO: Processing pre-safe import module hook gi.repository.GLib\r\n4963 WARNING: Hidden import \"gi._gobject.option\" not found!\r\n```\r\n\r\nBrowsing through the [pygobject git history](https://git.gnome.org/browse/pygobject/), I find commit [8afd7e8](https://git.gnome.org/browse/pygobject/commit/gi/_option.py?id=8afd7e880a72a44e6ea46c763bab82146fd75c96) which moved `gi/_glib/option.py` into `gi/_option.py`\r\n\r\nReplacing the `hiddenimport` to `hiddenimports += ['gi._option', 'gi._gobject']` silences the issue. However, I do not yet understand enough about pygobject and pyinstaller to know if this is the right thing to do.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\"\"\"\nImport hook for GObject https://developer.gnome.org/gobject/stable/ from the GLib\nlibrary https://wiki.gnome.org/Projects/GLib introspected through PyGobject https://wiki.gnome.org/PyGObject\nvia the GObject Introspection middleware layer https://wiki.gnome.org/Projects/GObjectIntrospection\n\nTested with GLib 2.44.1, PyGObject 3.16.2, and GObject Introspection 1.44.0 on Mac OS X 10.10 and\nGLib 2.42.2, PyGObject 3.14.0, and GObject Introspection 1.42 on Windows 7\n\"\"\"\n\nfrom PyInstaller.utils.hooks import get_gi_typelibs\n\nbinaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')\n\nhiddenimports += ['gi._gobject.option', 'gi._gobject']\n", "path": "PyInstaller/hooks/hook-gi.repository.GObject.py"}]}
1,181
122
gh_patches_debug_45143
rasdani/github-patches
git_diff
NVIDIA__NVFlare-353
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> stop_fl.sh does not terminate client side worker process The stop_fl.sh just does `touch shutdown.fl` , wait 5 seconds, then kill the process of pid.fl. The worker process of client is not killed / terminated. Steps to reproduce: 1. Launch server (start.sh) 2. Launch client (start.sh) 3. Using Admin: Upload app 4. Using Admin: Deploy app 5. Using Admin: Start app 6. Use stop_fl.sh </issue> <code> [start of nvflare/private/fed/app/client/worker_process.py] 1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Provides a command line interface for a federated client trainer.""" 16 17 import argparse 18 import os 19 import sys 20 import traceback 21 22 from nvflare.apis.fl_constant import FLContextKey 23 from nvflare.apis.workspace import Workspace 24 from nvflare.fuel.sec.security_content_service import SecurityContentService 25 from nvflare.fuel.utils.argument_utils import parse_vars 26 from nvflare.private.defs import EngineConstant 27 from nvflare.private.fed.app.fl_conf import FLClientStarterConfiger 28 from nvflare.private.fed.client.client_json_config import ClientJsonConfigurator 29 from nvflare.private.fed.client.client_run_manager import ClientRunManager 30 from nvflare.private.fed.client.client_runner import ClientRunner 31 from nvflare.private.fed.client.client_status import ClientStatus 32 from nvflare.private.fed.client.command_agent import CommandAgent 33 34 35 def main(): 36 """Worker_process start program.""" 37 parser = argparse.ArgumentParser() 38 parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True) 39 parser.add_argument("--startup", "-w", type=str, help="startup folder", required=True) 40 41 parser.add_argument( 42 "--fed_client", "-s", type=str, help="an aggregation server specification json file", required=True 43 ) 44 45 parser.add_argument("--set", metavar="KEY=VALUE", nargs="*") 46 47 parser.add_argument("--local_rank", type=int, default=0) 48 49 args = parser.parse_args() 50 kv_list = parse_vars(args.set) 51 52 args.train_config = os.path.join("config", "config_train.json") 53 config_folder = kv_list.get("config_folder", "") 54 secure_train = kv_list.get("secure_train", True) 55 if config_folder == "": 56 args.client_config = "config_fed_client.json" 57 else: 58 args.client_config = os.path.join(config_folder, "config_fed_client.json") 59 args.config_folder = config_folder 60 args.env = os.path.join("config", "environment.json") 61 62 try: 63 remove_restart_file(args) 64 except BaseException: 65 print("Could not remove the restart.fl / shutdown.fl file. Please check your system before starting FL.") 66 sys.exit(-1) 67 68 restart_file = os.path.join(args.workspace, "restart.fl") 69 if os.path.exists(restart_file): 70 os.remove(restart_file) 71 72 print("starting the client .....") 73 74 deployer = None 75 command_agent = None 76 77 startup = os.path.join(args.workspace, "startup") 78 SecurityContentService.initialize(content_folder=startup) 79 80 try: 81 token_file = os.path.join(args.workspace, EngineConstant.CLIENT_TOKEN_FILE) 82 with open(token_file, "r") as f: 83 token = f.readline().strip() 84 run_number = f.readline().strip() 85 client_name = f.readline().strip() 86 listen_port = f.readline().strip() 87 print( 88 "token is: {} run_number is: {} client_name: {} listen_port: {}".format( 89 token, run_number, client_name, listen_port 90 ) 91 ) 92 93 startup = args.startup 94 app_root = os.path.join(args.workspace, "run_" + str(run_number), "app_" + client_name) 95 96 app_log_config = os.path.join(app_root, config_folder, "log.config") 97 if os.path.exists(app_log_config): 98 args.log_config = app_log_config 99 else: 100 args.log_config = os.path.join(startup, "log.config") 101 102 conf = FLClientStarterConfiger( 103 app_root=startup, 104 client_config_file_name=args.fed_client, 105 log_config_file_name=args.log_config, 106 kv_list=args.set, 107 ) 108 conf.configure() 109 110 deployer = conf.base_deployer 111 federated_client = deployer.create_fed_client() 112 federated_client.status = ClientStatus.STARTING 113 114 federated_client.token = token 115 federated_client.client_name = client_name 116 federated_client.fl_ctx.set_prop(FLContextKey.CLIENT_NAME, client_name, private=False) 117 federated_client.fl_ctx.set_prop(EngineConstant.FL_TOKEN, token, private=False) 118 federated_client.fl_ctx.set_prop(FLContextKey.WORKSPACE_ROOT, args.workspace, private=True) 119 120 client_config_file_name = os.path.join(app_root, args.client_config) 121 conf = ClientJsonConfigurator( 122 config_file_name=client_config_file_name, 123 ) 124 conf.configure() 125 126 workspace = Workspace(args.workspace, client_name, config_folder) 127 run_manager = ClientRunManager( 128 client_name=client_name, 129 run_num=int(run_number), 130 workspace=workspace, 131 client=federated_client, 132 components=conf.runner_config.components, 133 handlers=conf.runner_config.handlers, 134 conf=conf, 135 ) 136 federated_client.run_manager = run_manager 137 138 with run_manager.new_context() as fl_ctx: 139 fl_ctx.set_prop(FLContextKey.CLIENT_NAME, client_name, private=False) 140 fl_ctx.set_prop(EngineConstant.FL_TOKEN, token, private=False) 141 fl_ctx.set_prop(FLContextKey.WORKSPACE_ROOT, args.workspace, private=True) 142 fl_ctx.set_prop(FLContextKey.ARGS, args, sticky=True) 143 fl_ctx.set_prop(FLContextKey.APP_ROOT, app_root, private=True, sticky=True) 144 fl_ctx.set_prop(FLContextKey.WORKSPACE_OBJECT, workspace, private=True) 145 fl_ctx.set_prop(FLContextKey.SECURE_MODE, secure_train, private=True, sticky=True) 146 147 client_runner = ClientRunner(config=conf.runner_config, run_num=int(run_number), engine=run_manager) 148 run_manager.add_handler(client_runner) 149 fl_ctx.set_prop(FLContextKey.RUNNER, client_runner, private=True) 150 151 # # Start the thread for responding the inquire 152 # federated_client.stop_listen = False 153 # thread = threading.Thread(target=listen_command, args=[federated_client, int(listen_port), client_runner]) 154 # thread.start() 155 # Start the command agent 156 command_agent = CommandAgent(federated_client, int(listen_port), client_runner) 157 command_agent.start(fl_ctx) 158 159 federated_client.status = ClientStatus.STARTED 160 client_runner.run(app_root, args) 161 162 except BaseException as e: 163 traceback.print_exc() 164 print("FL client execution exception: " + str(e)) 165 finally: 166 # if federated_client: 167 # federated_client.stop_listen = True 168 # thread.join() 169 if command_agent: 170 command_agent.shutdown() 171 if deployer: 172 deployer.close() 173 # address = ('localhost', 6000) 174 # conn_client = Client(address, authkey='client process secret password'.encode()) 175 # conn_client.send('bye') 176 177 178 def remove_restart_file(args): 179 """To remove the restart.fl file. 180 181 Args: 182 args: command args 183 184 """ 185 restart_file = os.path.join(args.workspace, "restart.fl") 186 if os.path.exists(restart_file): 187 os.remove(restart_file) 188 restart_file = os.path.join(args.workspace, "shutdown.fl") 189 if os.path.exists(restart_file): 190 os.remove(restart_file) 191 192 193 if __name__ == "__main__": 194 """ 195 This is the program when starting the child process for running the NVIDIA FLARE executor. 196 """ 197 198 main() 199 [end of nvflare/private/fed/app/client/worker_process.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nvflare/private/fed/app/client/worker_process.py b/nvflare/private/fed/app/client/worker_process.py --- a/nvflare/private/fed/app/client/worker_process.py +++ b/nvflare/private/fed/app/client/worker_process.py @@ -17,8 +17,12 @@ import argparse import os import sys +import threading +import time import traceback +import psutil + from nvflare.apis.fl_constant import FLContextKey from nvflare.apis.workspace import Workspace from nvflare.fuel.sec.security_content_service import SecurityContentService @@ -32,23 +36,33 @@ from nvflare.private.fed.client.command_agent import CommandAgent +def check_parent_alive(parent_pid, stop_event: threading.Event): + while True: + if stop_event.is_set(): + break + if not psutil.pid_exists(parent_pid): + # if parent is not alive, kill its worker process + os.killpg(os.getpgid(os.getpid()), 9) + break + time.sleep(1) + + def main(): - """Worker_process start program.""" + """Worker process start program.""" parser = argparse.ArgumentParser() parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True) parser.add_argument("--startup", "-w", type=str, help="startup folder", required=True) - parser.add_argument( "--fed_client", "-s", type=str, help="an aggregation server specification json file", required=True ) - parser.add_argument("--set", metavar="KEY=VALUE", nargs="*") - parser.add_argument("--local_rank", type=int, default=0) - args = parser.parse_args() kv_list = parse_vars(args.set) + # get parent process id + parent_pid = os.getppid() + args.train_config = os.path.join("config", "config_train.json") config_folder = kv_list.get("config_folder", "") secure_train = kv_list.get("secure_train", True) @@ -68,7 +82,6 @@ restart_file = os.path.join(args.workspace, "restart.fl") if os.path.exists(restart_file): os.remove(restart_file) - print("starting the client .....") deployer = None @@ -77,7 +90,13 @@ startup = os.path.join(args.workspace, "startup") SecurityContentService.initialize(content_folder=startup) + federated_client = None + thread = None + stop_event = threading.Event() try: + # start parent process checking thread + thread = threading.Thread(target=check_parent_alive, args=(parent_pid, stop_event)) + thread.start() token_file = os.path.join(args.workspace, EngineConstant.CLIENT_TOKEN_FILE) with open(token_file, "r") as f: token = f.readline().strip() @@ -158,7 +177,6 @@ federated_client.status = ClientStatus.STARTED client_runner.run(app_root, args) - except BaseException as e: traceback.print_exc() print("FL client execution exception: " + str(e)) @@ -166,10 +184,16 @@ # if federated_client: # federated_client.stop_listen = True # thread.join() + stop_event.set() if command_agent: command_agent.shutdown() if deployer: deployer.close() + if federated_client: + federated_client.close() + if thread and thread.is_alive(): + thread.join() + # address = ('localhost', 6000) # conn_client = Client(address, authkey='client process secret password'.encode()) # conn_client.send('bye') @@ -177,10 +201,8 @@ def remove_restart_file(args): """To remove the restart.fl file. - Args: args: command args - """ restart_file = os.path.join(args.workspace, "restart.fl") if os.path.exists(restart_file):
{"golden_diff": "diff --git a/nvflare/private/fed/app/client/worker_process.py b/nvflare/private/fed/app/client/worker_process.py\n--- a/nvflare/private/fed/app/client/worker_process.py\n+++ b/nvflare/private/fed/app/client/worker_process.py\n@@ -17,8 +17,12 @@\n import argparse\n import os\n import sys\n+import threading\n+import time\n import traceback\n \n+import psutil\n+\n from nvflare.apis.fl_constant import FLContextKey\n from nvflare.apis.workspace import Workspace\n from nvflare.fuel.sec.security_content_service import SecurityContentService\n@@ -32,23 +36,33 @@\n from nvflare.private.fed.client.command_agent import CommandAgent\n \n \n+def check_parent_alive(parent_pid, stop_event: threading.Event):\n+ while True:\n+ if stop_event.is_set():\n+ break\n+ if not psutil.pid_exists(parent_pid):\n+ # if parent is not alive, kill its worker process\n+ os.killpg(os.getpgid(os.getpid()), 9)\n+ break\n+ time.sleep(1)\n+\n+\n def main():\n- \"\"\"Worker_process start program.\"\"\"\n+ \"\"\"Worker process start program.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--workspace\", \"-m\", type=str, help=\"WORKSPACE folder\", required=True)\n parser.add_argument(\"--startup\", \"-w\", type=str, help=\"startup folder\", required=True)\n-\n parser.add_argument(\n \"--fed_client\", \"-s\", type=str, help=\"an aggregation server specification json file\", required=True\n )\n-\n parser.add_argument(\"--set\", metavar=\"KEY=VALUE\", nargs=\"*\")\n-\n parser.add_argument(\"--local_rank\", type=int, default=0)\n-\n args = parser.parse_args()\n kv_list = parse_vars(args.set)\n \n+ # get parent process id\n+ parent_pid = os.getppid()\n+\n args.train_config = os.path.join(\"config\", \"config_train.json\")\n config_folder = kv_list.get(\"config_folder\", \"\")\n secure_train = kv_list.get(\"secure_train\", True)\n@@ -68,7 +82,6 @@\n restart_file = os.path.join(args.workspace, \"restart.fl\")\n if os.path.exists(restart_file):\n os.remove(restart_file)\n-\n print(\"starting the client .....\")\n \n deployer = None\n@@ -77,7 +90,13 @@\n startup = os.path.join(args.workspace, \"startup\")\n SecurityContentService.initialize(content_folder=startup)\n \n+ federated_client = None\n+ thread = None\n+ stop_event = threading.Event()\n try:\n+ # start parent process checking thread\n+ thread = threading.Thread(target=check_parent_alive, args=(parent_pid, stop_event))\n+ thread.start()\n token_file = os.path.join(args.workspace, EngineConstant.CLIENT_TOKEN_FILE)\n with open(token_file, \"r\") as f:\n token = f.readline().strip()\n@@ -158,7 +177,6 @@\n \n federated_client.status = ClientStatus.STARTED\n client_runner.run(app_root, args)\n-\n except BaseException as e:\n traceback.print_exc()\n print(\"FL client execution exception: \" + str(e))\n@@ -166,10 +184,16 @@\n # if federated_client:\n # federated_client.stop_listen = True\n # thread.join()\n+ stop_event.set()\n if command_agent:\n command_agent.shutdown()\n if deployer:\n deployer.close()\n+ if federated_client:\n+ federated_client.close()\n+ if thread and thread.is_alive():\n+ thread.join()\n+\n # address = ('localhost', 6000)\n # conn_client = Client(address, authkey='client process secret password'.encode())\n # conn_client.send('bye')\n@@ -177,10 +201,8 @@\n \n def remove_restart_file(args):\n \"\"\"To remove the restart.fl file.\n-\n Args:\n args: command args\n-\n \"\"\"\n restart_file = os.path.join(args.workspace, \"restart.fl\")\n if os.path.exists(restart_file):\n", "issue": "stop_fl.sh does not terminate client side worker process\nThe stop_fl.sh just does `touch shutdown.fl` , wait 5 seconds, then kill the process of pid.fl.\r\n\r\n\r\nThe worker process of client is not killed / terminated.\r\n\r\n\r\nSteps to reproduce:\r\n1. Launch server (start.sh)\r\n2. Launch client (start.sh)\r\n3. Using Admin: Upload app\r\n4. Using Admin: Deploy app\r\n5. Using Admin: Start app\r\n6. Use stop_fl.sh\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Provides a command line interface for a federated client trainer.\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport traceback\n\nfrom nvflare.apis.fl_constant import FLContextKey\nfrom nvflare.apis.workspace import Workspace\nfrom nvflare.fuel.sec.security_content_service import SecurityContentService\nfrom nvflare.fuel.utils.argument_utils import parse_vars\nfrom nvflare.private.defs import EngineConstant\nfrom nvflare.private.fed.app.fl_conf import FLClientStarterConfiger\nfrom nvflare.private.fed.client.client_json_config import ClientJsonConfigurator\nfrom nvflare.private.fed.client.client_run_manager import ClientRunManager\nfrom nvflare.private.fed.client.client_runner import ClientRunner\nfrom nvflare.private.fed.client.client_status import ClientStatus\nfrom nvflare.private.fed.client.command_agent import CommandAgent\n\n\ndef main():\n \"\"\"Worker_process start program.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--workspace\", \"-m\", type=str, help=\"WORKSPACE folder\", required=True)\n parser.add_argument(\"--startup\", \"-w\", type=str, help=\"startup folder\", required=True)\n\n parser.add_argument(\n \"--fed_client\", \"-s\", type=str, help=\"an aggregation server specification json file\", required=True\n )\n\n parser.add_argument(\"--set\", metavar=\"KEY=VALUE\", nargs=\"*\")\n\n parser.add_argument(\"--local_rank\", type=int, default=0)\n\n args = parser.parse_args()\n kv_list = parse_vars(args.set)\n\n args.train_config = os.path.join(\"config\", \"config_train.json\")\n config_folder = kv_list.get(\"config_folder\", \"\")\n secure_train = kv_list.get(\"secure_train\", True)\n if config_folder == \"\":\n args.client_config = \"config_fed_client.json\"\n else:\n args.client_config = os.path.join(config_folder, \"config_fed_client.json\")\n args.config_folder = config_folder\n args.env = os.path.join(\"config\", \"environment.json\")\n\n try:\n remove_restart_file(args)\n except BaseException:\n print(\"Could not remove the restart.fl / shutdown.fl file. Please check your system before starting FL.\")\n sys.exit(-1)\n\n restart_file = os.path.join(args.workspace, \"restart.fl\")\n if os.path.exists(restart_file):\n os.remove(restart_file)\n\n print(\"starting the client .....\")\n\n deployer = None\n command_agent = None\n\n startup = os.path.join(args.workspace, \"startup\")\n SecurityContentService.initialize(content_folder=startup)\n\n try:\n token_file = os.path.join(args.workspace, EngineConstant.CLIENT_TOKEN_FILE)\n with open(token_file, \"r\") as f:\n token = f.readline().strip()\n run_number = f.readline().strip()\n client_name = f.readline().strip()\n listen_port = f.readline().strip()\n print(\n \"token is: {} run_number is: {} client_name: {} listen_port: {}\".format(\n token, run_number, client_name, listen_port\n )\n )\n\n startup = args.startup\n app_root = os.path.join(args.workspace, \"run_\" + str(run_number), \"app_\" + client_name)\n\n app_log_config = os.path.join(app_root, config_folder, \"log.config\")\n if os.path.exists(app_log_config):\n args.log_config = app_log_config\n else:\n args.log_config = os.path.join(startup, \"log.config\")\n\n conf = FLClientStarterConfiger(\n app_root=startup,\n client_config_file_name=args.fed_client,\n log_config_file_name=args.log_config,\n kv_list=args.set,\n )\n conf.configure()\n\n deployer = conf.base_deployer\n federated_client = deployer.create_fed_client()\n federated_client.status = ClientStatus.STARTING\n\n federated_client.token = token\n federated_client.client_name = client_name\n federated_client.fl_ctx.set_prop(FLContextKey.CLIENT_NAME, client_name, private=False)\n federated_client.fl_ctx.set_prop(EngineConstant.FL_TOKEN, token, private=False)\n federated_client.fl_ctx.set_prop(FLContextKey.WORKSPACE_ROOT, args.workspace, private=True)\n\n client_config_file_name = os.path.join(app_root, args.client_config)\n conf = ClientJsonConfigurator(\n config_file_name=client_config_file_name,\n )\n conf.configure()\n\n workspace = Workspace(args.workspace, client_name, config_folder)\n run_manager = ClientRunManager(\n client_name=client_name,\n run_num=int(run_number),\n workspace=workspace,\n client=federated_client,\n components=conf.runner_config.components,\n handlers=conf.runner_config.handlers,\n conf=conf,\n )\n federated_client.run_manager = run_manager\n\n with run_manager.new_context() as fl_ctx:\n fl_ctx.set_prop(FLContextKey.CLIENT_NAME, client_name, private=False)\n fl_ctx.set_prop(EngineConstant.FL_TOKEN, token, private=False)\n fl_ctx.set_prop(FLContextKey.WORKSPACE_ROOT, args.workspace, private=True)\n fl_ctx.set_prop(FLContextKey.ARGS, args, sticky=True)\n fl_ctx.set_prop(FLContextKey.APP_ROOT, app_root, private=True, sticky=True)\n fl_ctx.set_prop(FLContextKey.WORKSPACE_OBJECT, workspace, private=True)\n fl_ctx.set_prop(FLContextKey.SECURE_MODE, secure_train, private=True, sticky=True)\n\n client_runner = ClientRunner(config=conf.runner_config, run_num=int(run_number), engine=run_manager)\n run_manager.add_handler(client_runner)\n fl_ctx.set_prop(FLContextKey.RUNNER, client_runner, private=True)\n\n # # Start the thread for responding the inquire\n # federated_client.stop_listen = False\n # thread = threading.Thread(target=listen_command, args=[federated_client, int(listen_port), client_runner])\n # thread.start()\n # Start the command agent\n command_agent = CommandAgent(federated_client, int(listen_port), client_runner)\n command_agent.start(fl_ctx)\n\n federated_client.status = ClientStatus.STARTED\n client_runner.run(app_root, args)\n\n except BaseException as e:\n traceback.print_exc()\n print(\"FL client execution exception: \" + str(e))\n finally:\n # if federated_client:\n # federated_client.stop_listen = True\n # thread.join()\n if command_agent:\n command_agent.shutdown()\n if deployer:\n deployer.close()\n # address = ('localhost', 6000)\n # conn_client = Client(address, authkey='client process secret password'.encode())\n # conn_client.send('bye')\n\n\ndef remove_restart_file(args):\n \"\"\"To remove the restart.fl file.\n\n Args:\n args: command args\n\n \"\"\"\n restart_file = os.path.join(args.workspace, \"restart.fl\")\n if os.path.exists(restart_file):\n os.remove(restart_file)\n restart_file = os.path.join(args.workspace, \"shutdown.fl\")\n if os.path.exists(restart_file):\n os.remove(restart_file)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n This is the program when starting the child process for running the NVIDIA FLARE executor.\n \"\"\"\n\n main()\n", "path": "nvflare/private/fed/app/client/worker_process.py"}]}
2,841
909
gh_patches_debug_3782
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-2314
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Normalize stored ISNI The `ISNI` field should always be in the same format (without spaces), but right now sometimes the field is stored with spaces between the digits. There should be validation when the author is saved that cleans this value, similar to how ISBNs are validated </issue> <code> [start of bookwyrm/models/author.py] 1 """ database schema for info about authors """ 2 import re 3 from django.contrib.postgres.indexes import GinIndex 4 from django.core.cache import cache 5 from django.core.cache.utils import make_template_fragment_key 6 from django.db import models 7 8 from bookwyrm import activitypub 9 from bookwyrm.settings import DOMAIN 10 11 from .book import BookDataModel 12 from . import fields 13 14 15 class Author(BookDataModel): 16 """basic biographic info""" 17 18 wikipedia_link = fields.CharField( 19 max_length=255, blank=True, null=True, deduplication_field=True 20 ) 21 isni = fields.CharField( 22 max_length=255, blank=True, null=True, deduplication_field=True 23 ) 24 gutenberg_id = fields.CharField( 25 max_length=255, blank=True, null=True, deduplication_field=True 26 ) 27 # idk probably other keys would be useful here? 28 born = fields.DateTimeField(blank=True, null=True) 29 died = fields.DateTimeField(blank=True, null=True) 30 name = fields.CharField(max_length=255) 31 aliases = fields.ArrayField( 32 models.CharField(max_length=255), blank=True, default=list 33 ) 34 bio = fields.HtmlField(null=True, blank=True) 35 36 def save(self, *args, **kwargs): 37 """clear related template caches""" 38 # clear template caches 39 if self.id: 40 cache_keys = [ 41 make_template_fragment_key("titleby", [book]) 42 for book in self.book_set.values_list("id", flat=True) 43 ] 44 cache.delete_many(cache_keys) 45 return super().save(*args, **kwargs) 46 47 @property 48 def isni_link(self): 49 """generate the url from the isni id""" 50 clean_isni = re.sub(r"\s", "", self.isni) 51 return f"https://isni.org/isni/{clean_isni}" 52 53 @property 54 def openlibrary_link(self): 55 """generate the url from the openlibrary id""" 56 return f"https://openlibrary.org/authors/{self.openlibrary_key}" 57 58 def get_remote_id(self): 59 """editions and works both use "book" instead of model_name""" 60 return f"https://{DOMAIN}/author/{self.id}" 61 62 activity_serializer = activitypub.Author 63 64 class Meta: 65 """sets up postgres GIN index field""" 66 67 indexes = (GinIndex(fields=["search_vector"]),) 68 [end of bookwyrm/models/author.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bookwyrm/models/author.py b/bookwyrm/models/author.py --- a/bookwyrm/models/author.py +++ b/bookwyrm/models/author.py @@ -42,6 +42,11 @@ for book in self.book_set.values_list("id", flat=True) ] cache.delete_many(cache_keys) + + # normalize isni format + if self.isni: + self.isni = re.sub(r"\s", "", self.isni) + return super().save(*args, **kwargs) @property
{"golden_diff": "diff --git a/bookwyrm/models/author.py b/bookwyrm/models/author.py\n--- a/bookwyrm/models/author.py\n+++ b/bookwyrm/models/author.py\n@@ -42,6 +42,11 @@\n for book in self.book_set.values_list(\"id\", flat=True)\n ]\n cache.delete_many(cache_keys)\n+\n+ # normalize isni format\n+ if self.isni:\n+ self.isni = re.sub(r\"\\s\", \"\", self.isni)\n+\n return super().save(*args, **kwargs)\n \n @property\n", "issue": "Normalize stored ISNI\nThe `ISNI` field should always be in the same format (without spaces), but right now sometimes the field is stored with spaces between the digits. There should be validation when the author is saved that cleans this value, similar to how ISBNs are validated\n", "before_files": [{"content": "\"\"\" database schema for info about authors \"\"\"\nimport re\nfrom django.contrib.postgres.indexes import GinIndex\nfrom django.core.cache import cache\nfrom django.core.cache.utils import make_template_fragment_key\nfrom django.db import models\n\nfrom bookwyrm import activitypub\nfrom bookwyrm.settings import DOMAIN\n\nfrom .book import BookDataModel\nfrom . import fields\n\n\nclass Author(BookDataModel):\n \"\"\"basic biographic info\"\"\"\n\n wikipedia_link = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n isni = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n gutenberg_id = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n # idk probably other keys would be useful here?\n born = fields.DateTimeField(blank=True, null=True)\n died = fields.DateTimeField(blank=True, null=True)\n name = fields.CharField(max_length=255)\n aliases = fields.ArrayField(\n models.CharField(max_length=255), blank=True, default=list\n )\n bio = fields.HtmlField(null=True, blank=True)\n\n def save(self, *args, **kwargs):\n \"\"\"clear related template caches\"\"\"\n # clear template caches\n if self.id:\n cache_keys = [\n make_template_fragment_key(\"titleby\", [book])\n for book in self.book_set.values_list(\"id\", flat=True)\n ]\n cache.delete_many(cache_keys)\n return super().save(*args, **kwargs)\n\n @property\n def isni_link(self):\n \"\"\"generate the url from the isni id\"\"\"\n clean_isni = re.sub(r\"\\s\", \"\", self.isni)\n return f\"https://isni.org/isni/{clean_isni}\"\n\n @property\n def openlibrary_link(self):\n \"\"\"generate the url from the openlibrary id\"\"\"\n return f\"https://openlibrary.org/authors/{self.openlibrary_key}\"\n\n def get_remote_id(self):\n \"\"\"editions and works both use \"book\" instead of model_name\"\"\"\n return f\"https://{DOMAIN}/author/{self.id}\"\n\n activity_serializer = activitypub.Author\n\n class Meta:\n \"\"\"sets up postgres GIN index field\"\"\"\n\n indexes = (GinIndex(fields=[\"search_vector\"]),)\n", "path": "bookwyrm/models/author.py"}]}
1,237
127
gh_patches_debug_35845
rasdani/github-patches
git_diff
dask__distributed-210
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pandas is now a mandatory dependency for dscheduler If pandas is not installed I get an infinite loop with the following logs: ``` tornado.httpclient.HTTPError: HTTP 500: Internal Server Error bokeh.util.tornado - ERROR - Error thrown from periodic callback: HTTPError(500, 'Internal Server Error', HTTPResponse(_body=None,buffer=<_io.BytesIO object at 0x7f17202a1e08>,code=500,effective_url='http://localhost:9786/tasks.json',error=HTTPError(...),headers=<tornado.httputil.HTTPHeaders object at 0x7f17204ffe48>,reason='Internal Server Error',request=<tornado.httpclient.HTTPRequest object at 0x7f1720578da0>,request_time=0.005105733871459961,time_info={})) distributed.utils - ERROR - No module named 'pandas' Traceback (most recent call last): File "/volatile/ogrisel/code/distributed/distributed/utils.py", line 229, in log_errors yield File "/volatile/ogrisel/code/distributed/distributed/http/scheduler.py", line 79, in get from ..diagnostics.scheduler import tasks File "/volatile/ogrisel/code/distributed/distributed/diagnostics/scheduler.py", line 6, in <module> import pandas as pd ImportError: No module named 'pandas' ``` If think this is not intended? The diagnostics callback should probably be disabled when pandas is not available, no? </issue> <code> [start of distributed/formats/csv.py] 1 from __future__ import print_function, division, absolute_import 2 3 from io import BytesIO 4 5 from dask import do 6 from dask.dataframe import from_imperative 7 import pandas as pd 8 9 from .compression import compressors, decompressors 10 11 from ..executor import default_executor, ensure_default_get 12 from ..utils import ensure_bytes, log_errors 13 14 15 def bytes_read_csv(b, header, kwargs): 16 """ Convert a block of bytes to a Pandas DataFrame 17 18 Parameters 19 ---------- 20 b: bytestring 21 The content to be parsed with pandas.read_csv 22 header: bytestring 23 An optional header to prepend to b 24 kwargs: dict 25 A dictionary of keyword arguments to be passed to pandas.read_csv 26 27 See Also: 28 distributed.formats.csv.read_csv 29 """ 30 with log_errors(): 31 compression = kwargs.pop('compression', None) 32 b2 = decompressors[compression](b) 33 bio = BytesIO() 34 if header: 35 if not header.endswith(b'\n') and not header.endswith(b'\r'): 36 header = header + ensure_bytes(kwargs.get('lineterminator', b'\n')) 37 bio.write(header) 38 bio.write(b2) 39 bio.seek(0) 40 return pd.read_csv(bio, **kwargs) 41 42 43 def read_csv(block_lists, header, head, kwargs, lazy=True, collection=True, 44 executor=None): 45 """ Convert blocks of bytes to a dask.dataframe or other high-level object 46 47 This accepts a list of lists of futures/values of bytes where each list 48 corresponds to one file, and the futures/values of bytes concatenate to 49 comprise the entire file, in order. 50 51 Parameters 52 ---------- 53 block_lists: list of lists of futures of bytes 54 The lists of bytestrings with each list corresponding to one logical file 55 header: bytestring 56 The header, found at the front of the first file, to be prepended to 57 all blocks 58 head: pd.DataFrame 59 An example Pandas DataFrame to be used for metadata 60 kwargs: dict 61 Keyword arguments to pass down to ``pd.read_csv`` 62 lazy: boolean, optional (defaults to True) 63 collection: boolean, optional (defaults to True) 64 65 Returns 66 ------- 67 A dask.dataframe, or list of futures or values, depending on the value of 68 lazy and collection. 69 """ 70 executor = default_executor(executor) 71 72 dfs1 = [[do(bytes_read_csv)(blocks[0], '', kwargs)] + 73 [do(bytes_read_csv)(b, header, kwargs) 74 for b in blocks[1:]] 75 for blocks in block_lists] 76 dfs2 = sum(dfs1, []) 77 78 ensure_default_get(executor) 79 80 if collection: 81 result = from_imperative(dfs2, head) 82 else: 83 result = dfs2 84 85 if not lazy: 86 if collection: 87 result = executor.persist(result) 88 else: 89 result = executor.compute(result) 90 91 return result 92 [end of distributed/formats/csv.py] [start of distributed/diagnostics/scheduler.py] 1 from __future__ import print_function, division, absolute_import 2 3 from datetime import datetime 4 import os 5 6 import pandas as pd 7 from toolz import countby, concat, dissoc 8 9 from ..utils import key_split 10 11 12 def tasks(s): 13 """ Task and worker status of scheduler """ 14 processing = sum(map(len, s.processing.values())) 15 16 return {'processing': processing, 17 'total': len(s.tasks), 18 'in-memory': len(s.who_has), 19 'ready': len(s.ready) 20 + sum(map(len, s.stacks.values())), 21 'waiting': len(s.waiting), 22 'failed': len(s.exceptions_blame)} 23 24 25 def workers(s): 26 """ Information about workers 27 28 Examples 29 -------- 30 >>> workers(my_scheduler) # doctest: +SKIP 31 {'127.0.0.1': {'cores': 3, 32 'cpu': 0.0, 33 'last-seen': 0.003068, 34 'latency': 0.01584628690034151, 35 'ports': ['54871', '50943'], 36 'processing': {'inc': 2, 'add': 1}, 37 'disk-read': 1234, 38 'disk-write': 1234, 39 'network-send': 1234, 40 'network-recv': 1234, 41 'memory': 16701911040, 42 'memory-percent': 85}} 43 """ 44 hosts = {host: ['%s:%s' % (host, port) for port in d['ports']] 45 for host, d in s.host_info.items()} 46 47 processing = {host: countby(key_split, concat(s.processing[w] for w in addrs)) 48 for host, addrs in hosts.items()} 49 50 now = datetime.now() 51 52 result = {} 53 for host, info in s.host_info.items(): 54 info = dissoc(info, 'heartbeat', 'heartbeat-port') 55 info['processing'] = processing[host] 56 result[host] = info 57 info['ports'] = list(info['ports']) 58 if 'last-seen' in info: 59 info['last-seen'] = (now - info['last-seen']).total_seconds() 60 61 return result 62 63 64 def scheduler_progress_df(d): 65 """ Convert status response to DataFrame of total progress 66 67 Consumes dictionary from status.json route 68 69 Examples 70 -------- 71 >>> d = {"ready": 5, "in-memory": 30, "waiting": 20, 72 ... "tasks": 70, "failed": 9, 73 ... "processing": 6, 74 ... "other-keys-are-fine-too": ''} 75 76 >>> scheduler_progress_df(d) # doctest: +SKIP 77 Count Progress 78 Tasks 79 waiting 20 +++++++++++ 80 ready 5 ++ 81 failed 9 +++++ 82 processing 6 +++ 83 in-memory 30 +++++++++++++++++ 84 total 70 ++++++++++++++++++++++++++++++++++++++++ 85 """ 86 d = d.copy() 87 d['total'] = d.pop('tasks') 88 names = ['waiting', 'ready', 'failed', 'processing', 'in-memory', 'total'] 89 df = pd.DataFrame(pd.Series({k: d[k] for k in names}, 90 index=names, name='Count')) 91 if d['total']: 92 barlength = (40 * df.Count / d['total']).astype(int) 93 df['Progress'] = barlength.apply(lambda n: ('%-40s' % (n * '+').rstrip(' '))) 94 else: 95 df['Progress'] = 0 96 97 df.index.name = 'Tasks' 98 99 return df 100 101 102 def worker_status_df(d): 103 """ Status of workers as a Pandas DataFrame 104 105 Consumes data from status.json route. 106 107 Examples 108 -------- 109 >>> d = {"other-keys-are-fine-too": '', 110 ... "ncores": {"192.168.1.107": 4, 111 ... "192.168.1.108": 4}, 112 ... "processing": {"192.168.1.108": {'inc': 3, 'add': 1}, 113 ... "192.168.1.107": {'inc': 2}}, 114 ... "bytes": {"192.168.1.108": 1000, 115 ... "192.168.1.107": 2000}} 116 117 >>> worker_status_df(d) 118 Ncores Bytes Processing 119 Workers 120 192.168.1.107 4 2000 [inc] 121 192.168.1.108 4 1000 [add, inc] 122 """ 123 names = ['ncores', 'bytes', 'processing'] 124 df = pd.DataFrame({k: d[k] for k in names}, columns=names) 125 df['processing'] = df['processing'].apply(sorted) 126 df.columns = df.columns.map(str.title) 127 df.index.name = 'Workers' 128 df = df.sort_index() 129 return df 130 [end of distributed/diagnostics/scheduler.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/distributed/diagnostics/scheduler.py b/distributed/diagnostics/scheduler.py --- a/distributed/diagnostics/scheduler.py +++ b/distributed/diagnostics/scheduler.py @@ -3,7 +3,6 @@ from datetime import datetime import os -import pandas as pd from toolz import countby, concat, dissoc from ..utils import key_split @@ -83,6 +82,7 @@ in-memory 30 +++++++++++++++++ total 70 ++++++++++++++++++++++++++++++++++++++++ """ + import pandas as pd d = d.copy() d['total'] = d.pop('tasks') names = ['waiting', 'ready', 'failed', 'processing', 'in-memory', 'total'] @@ -120,6 +120,7 @@ 192.168.1.107 4 2000 [inc] 192.168.1.108 4 1000 [add, inc] """ + import pandas as pd names = ['ncores', 'bytes', 'processing'] df = pd.DataFrame({k: d[k] for k in names}, columns=names) df['processing'] = df['processing'].apply(sorted) diff --git a/distributed/formats/csv.py b/distributed/formats/csv.py --- a/distributed/formats/csv.py +++ b/distributed/formats/csv.py @@ -3,8 +3,6 @@ from io import BytesIO from dask import do -from dask.dataframe import from_imperative -import pandas as pd from .compression import compressors, decompressors @@ -27,6 +25,7 @@ See Also: distributed.formats.csv.read_csv """ + import pandas as pd with log_errors(): compression = kwargs.pop('compression', None) b2 = decompressors[compression](b) @@ -67,6 +66,7 @@ A dask.dataframe, or list of futures or values, depending on the value of lazy and collection. """ + from dask.dataframe import from_imperative executor = default_executor(executor) dfs1 = [[do(bytes_read_csv)(blocks[0], '', kwargs)] +
{"golden_diff": "diff --git a/distributed/diagnostics/scheduler.py b/distributed/diagnostics/scheduler.py\n--- a/distributed/diagnostics/scheduler.py\n+++ b/distributed/diagnostics/scheduler.py\n@@ -3,7 +3,6 @@\n from datetime import datetime\n import os\n \n-import pandas as pd\n from toolz import countby, concat, dissoc\n \n from ..utils import key_split\n@@ -83,6 +82,7 @@\n in-memory 30 +++++++++++++++++\n total 70 ++++++++++++++++++++++++++++++++++++++++\n \"\"\"\n+ import pandas as pd\n d = d.copy()\n d['total'] = d.pop('tasks')\n names = ['waiting', 'ready', 'failed', 'processing', 'in-memory', 'total']\n@@ -120,6 +120,7 @@\n 192.168.1.107 4 2000 [inc]\n 192.168.1.108 4 1000 [add, inc]\n \"\"\"\n+ import pandas as pd\n names = ['ncores', 'bytes', 'processing']\n df = pd.DataFrame({k: d[k] for k in names}, columns=names)\n df['processing'] = df['processing'].apply(sorted)\ndiff --git a/distributed/formats/csv.py b/distributed/formats/csv.py\n--- a/distributed/formats/csv.py\n+++ b/distributed/formats/csv.py\n@@ -3,8 +3,6 @@\n from io import BytesIO\n \n from dask import do\n-from dask.dataframe import from_imperative\n-import pandas as pd\n \n from .compression import compressors, decompressors\n \n@@ -27,6 +25,7 @@\n See Also:\n distributed.formats.csv.read_csv\n \"\"\"\n+ import pandas as pd\n with log_errors():\n compression = kwargs.pop('compression', None)\n b2 = decompressors[compression](b)\n@@ -67,6 +66,7 @@\n A dask.dataframe, or list of futures or values, depending on the value of\n lazy and collection.\n \"\"\"\n+ from dask.dataframe import from_imperative\n executor = default_executor(executor)\n \n dfs1 = [[do(bytes_read_csv)(blocks[0], '', kwargs)] +\n", "issue": "pandas is now a mandatory dependency for dscheduler\nIf pandas is not installed I get an infinite loop with the following logs:\n\n```\ntornado.httpclient.HTTPError: HTTP 500: Internal Server Error\nbokeh.util.tornado - ERROR - Error thrown from periodic callback: HTTPError(500, 'Internal Server Error', HTTPResponse(_body=None,buffer=<_io.BytesIO object at 0x7f17202a1e08>,code=500,effective_url='http://localhost:9786/tasks.json',error=HTTPError(...),headers=<tornado.httputil.HTTPHeaders object at 0x7f17204ffe48>,reason='Internal Server Error',request=<tornado.httpclient.HTTPRequest object at 0x7f1720578da0>,request_time=0.005105733871459961,time_info={}))\ndistributed.utils - ERROR - No module named 'pandas'\nTraceback (most recent call last):\n File \"/volatile/ogrisel/code/distributed/distributed/utils.py\", line 229, in log_errors\n yield\n File \"/volatile/ogrisel/code/distributed/distributed/http/scheduler.py\", line 79, in get\n from ..diagnostics.scheduler import tasks\n File \"/volatile/ogrisel/code/distributed/distributed/diagnostics/scheduler.py\", line 6, in <module>\n import pandas as pd\nImportError: No module named 'pandas'\n```\n\nIf think this is not intended? The diagnostics callback should probably be disabled when pandas is not available, no?\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom io import BytesIO\n\nfrom dask import do\nfrom dask.dataframe import from_imperative\nimport pandas as pd\n\nfrom .compression import compressors, decompressors\n\nfrom ..executor import default_executor, ensure_default_get\nfrom ..utils import ensure_bytes, log_errors\n\n\ndef bytes_read_csv(b, header, kwargs):\n \"\"\" Convert a block of bytes to a Pandas DataFrame\n\n Parameters\n ----------\n b: bytestring\n The content to be parsed with pandas.read_csv\n header: bytestring\n An optional header to prepend to b\n kwargs: dict\n A dictionary of keyword arguments to be passed to pandas.read_csv\n\n See Also:\n distributed.formats.csv.read_csv\n \"\"\"\n with log_errors():\n compression = kwargs.pop('compression', None)\n b2 = decompressors[compression](b)\n bio = BytesIO()\n if header:\n if not header.endswith(b'\\n') and not header.endswith(b'\\r'):\n header = header + ensure_bytes(kwargs.get('lineterminator', b'\\n'))\n bio.write(header)\n bio.write(b2)\n bio.seek(0)\n return pd.read_csv(bio, **kwargs)\n\n\ndef read_csv(block_lists, header, head, kwargs, lazy=True, collection=True,\n executor=None):\n \"\"\" Convert blocks of bytes to a dask.dataframe or other high-level object\n\n This accepts a list of lists of futures/values of bytes where each list\n corresponds to one file, and the futures/values of bytes concatenate to\n comprise the entire file, in order.\n\n Parameters\n ----------\n block_lists: list of lists of futures of bytes\n The lists of bytestrings with each list corresponding to one logical file\n header: bytestring\n The header, found at the front of the first file, to be prepended to\n all blocks\n head: pd.DataFrame\n An example Pandas DataFrame to be used for metadata\n kwargs: dict\n Keyword arguments to pass down to ``pd.read_csv``\n lazy: boolean, optional (defaults to True)\n collection: boolean, optional (defaults to True)\n\n Returns\n -------\n A dask.dataframe, or list of futures or values, depending on the value of\n lazy and collection.\n \"\"\"\n executor = default_executor(executor)\n\n dfs1 = [[do(bytes_read_csv)(blocks[0], '', kwargs)] +\n [do(bytes_read_csv)(b, header, kwargs)\n for b in blocks[1:]]\n for blocks in block_lists]\n dfs2 = sum(dfs1, [])\n\n ensure_default_get(executor)\n\n if collection:\n result = from_imperative(dfs2, head)\n else:\n result = dfs2\n\n if not lazy:\n if collection:\n result = executor.persist(result)\n else:\n result = executor.compute(result)\n\n return result\n", "path": "distributed/formats/csv.py"}, {"content": "from __future__ import print_function, division, absolute_import\n\nfrom datetime import datetime\nimport os\n\nimport pandas as pd\nfrom toolz import countby, concat, dissoc\n\nfrom ..utils import key_split\n\n\ndef tasks(s):\n \"\"\" Task and worker status of scheduler \"\"\"\n processing = sum(map(len, s.processing.values()))\n\n return {'processing': processing,\n 'total': len(s.tasks),\n 'in-memory': len(s.who_has),\n 'ready': len(s.ready)\n + sum(map(len, s.stacks.values())),\n 'waiting': len(s.waiting),\n 'failed': len(s.exceptions_blame)}\n\n\ndef workers(s):\n \"\"\" Information about workers\n\n Examples\n --------\n >>> workers(my_scheduler) # doctest: +SKIP\n {'127.0.0.1': {'cores': 3,\n 'cpu': 0.0,\n 'last-seen': 0.003068,\n 'latency': 0.01584628690034151,\n 'ports': ['54871', '50943'],\n 'processing': {'inc': 2, 'add': 1},\n 'disk-read': 1234,\n 'disk-write': 1234,\n 'network-send': 1234,\n 'network-recv': 1234,\n 'memory': 16701911040,\n 'memory-percent': 85}}\n \"\"\"\n hosts = {host: ['%s:%s' % (host, port) for port in d['ports']]\n for host, d in s.host_info.items()}\n\n processing = {host: countby(key_split, concat(s.processing[w] for w in addrs))\n for host, addrs in hosts.items()}\n\n now = datetime.now()\n\n result = {}\n for host, info in s.host_info.items():\n info = dissoc(info, 'heartbeat', 'heartbeat-port')\n info['processing'] = processing[host]\n result[host] = info\n info['ports'] = list(info['ports'])\n if 'last-seen' in info:\n info['last-seen'] = (now - info['last-seen']).total_seconds()\n\n return result\n\n\ndef scheduler_progress_df(d):\n \"\"\" Convert status response to DataFrame of total progress\n\n Consumes dictionary from status.json route\n\n Examples\n --------\n >>> d = {\"ready\": 5, \"in-memory\": 30, \"waiting\": 20,\n ... \"tasks\": 70, \"failed\": 9,\n ... \"processing\": 6,\n ... \"other-keys-are-fine-too\": ''}\n\n >>> scheduler_progress_df(d) # doctest: +SKIP\n Count Progress\n Tasks\n waiting 20 +++++++++++\n ready 5 ++\n failed 9 +++++\n processing 6 +++\n in-memory 30 +++++++++++++++++\n total 70 ++++++++++++++++++++++++++++++++++++++++\n \"\"\"\n d = d.copy()\n d['total'] = d.pop('tasks')\n names = ['waiting', 'ready', 'failed', 'processing', 'in-memory', 'total']\n df = pd.DataFrame(pd.Series({k: d[k] for k in names},\n index=names, name='Count'))\n if d['total']:\n barlength = (40 * df.Count / d['total']).astype(int)\n df['Progress'] = barlength.apply(lambda n: ('%-40s' % (n * '+').rstrip(' ')))\n else:\n df['Progress'] = 0\n\n df.index.name = 'Tasks'\n\n return df\n\n\ndef worker_status_df(d):\n \"\"\" Status of workers as a Pandas DataFrame\n\n Consumes data from status.json route.\n\n Examples\n --------\n >>> d = {\"other-keys-are-fine-too\": '',\n ... \"ncores\": {\"192.168.1.107\": 4,\n ... \"192.168.1.108\": 4},\n ... \"processing\": {\"192.168.1.108\": {'inc': 3, 'add': 1},\n ... \"192.168.1.107\": {'inc': 2}},\n ... \"bytes\": {\"192.168.1.108\": 1000,\n ... \"192.168.1.107\": 2000}}\n\n >>> worker_status_df(d)\n Ncores Bytes Processing\n Workers\n 192.168.1.107 4 2000 [inc]\n 192.168.1.108 4 1000 [add, inc]\n \"\"\"\n names = ['ncores', 'bytes', 'processing']\n df = pd.DataFrame({k: d[k] for k in names}, columns=names)\n df['processing'] = df['processing'].apply(sorted)\n df.columns = df.columns.map(str.title)\n df.index.name = 'Workers'\n df = df.sort_index()\n return df\n", "path": "distributed/diagnostics/scheduler.py"}]}
3,222
516
gh_patches_debug_25599
rasdani/github-patches
git_diff
mdn__kuma-6973
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ISE when trying to submit a translation **Summary** _What is the problem?_ Submitting a translation in Italian for https://wiki.developer.mozilla.org/en-US/docs/Learn/Getting_started_with_the_web/Publishing_your_website$translate?tolocale=it fails and gives an ISE **Steps To Reproduce (STR)** 1. Went to https://wiki.developer.mozilla.org/en-US/docs/Learn/Getting_started_with_the_web/Publishing_your_website$translate?tolocale=it 2. Added / Changed a bunch of words 3. Clicked "Publish" **Actual behavior** An ISE / error 500 happened, preventing to save the work. **Expected behavior** The localization is save, no error, the reader is able to read a new document **Additional context** * First report here: https://discourse.mozilla.org/t/50-internal-server-error-while-saving/58804 * I was not able to reproduce this on another locale (pl) for this page </issue> <code> [start of kuma/wiki/views/translate.py] 1 from urllib.parse import urlencode 2 3 from csp.decorators import csp_update 4 from django.conf import settings 5 from django.core.exceptions import ObjectDoesNotExist 6 from django.http import Http404, JsonResponse 7 from django.shortcuts import get_object_or_404, redirect, render 8 from django.utils.safestring import mark_safe 9 from django.utils.translation import gettext_lazy as _ 10 from django.views.decorators.cache import never_cache 11 12 import kuma.wiki.content 13 from kuma.attachments.forms import AttachmentRevisionForm 14 from kuma.core.decorators import block_user_agents, ensure_wiki_domain, login_required 15 from kuma.core.i18n import get_language_mapping 16 from kuma.core.urlresolvers import reverse 17 from kuma.core.utils import get_object_or_none, smart_int, urlparams 18 19 from .utils import document_form_initial, split_slug 20 from ..decorators import check_readonly, prevent_indexing, process_document_path 21 from ..forms import DocumentForm, RevisionForm 22 from ..models import Document, Revision 23 24 25 @ensure_wiki_domain 26 @never_cache 27 @block_user_agents 28 @login_required 29 @process_document_path 30 def select_locale(request, document_slug, document_locale): 31 """ 32 Select a locale to translate the document to. 33 """ 34 doc = get_object_or_404(Document, locale=document_locale, slug=document_slug) 35 return render(request, "wiki/select_locale.html", {"document": doc}) 36 37 38 @ensure_wiki_domain 39 @never_cache 40 @block_user_agents 41 @login_required 42 @csp_update(SCRIPT_SRC="'unsafe-eval'") # Required until CKEditor 4.7 43 @process_document_path 44 @check_readonly 45 @prevent_indexing 46 def translate(request, document_slug, document_locale): 47 """ 48 Create a new translation of a wiki document. 49 50 * document_slug is for the default locale 51 * translation is to the request locale 52 """ 53 # TODO: Refactor this view into two views? (new, edit) 54 # That might help reduce the headache-inducing branchiness. 55 56 # The parent document to translate from 57 try: 58 # Use '.all_objects' because the parent might have been soft deleted. 59 # And if we don't respect that fact, it would become impossible to 60 # edit a the child of it. 61 parent_doc = Document.all_objects.get( 62 locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug 63 ) 64 except Document.DoesNotExist: 65 raise Http404("Parent document does not exist") 66 67 # Get the mapping here and now so it can be used for input validation 68 language_mapping = get_language_mapping() 69 70 # HACK: Seems weird, but sticking the translate-to locale in a query 71 # param is the best way to avoid the MindTouch-legacy locale 72 # redirection logic. 73 document_locale = request.GET.get("tolocale", document_locale) 74 if document_locale.lower() not in language_mapping: 75 # The 'tolocale' query string parameters aren't free-text. They're 76 # explicitly listed on the "Select language" page (`...$locales`) 77 # If a locale was entered that wasn't a link it's a user bug. 78 raise Http404 79 80 # Set a "Discard Changes" page 81 discard_href = "" 82 83 if settings.WIKI_DEFAULT_LANGUAGE == document_locale: 84 # Don't translate to the default language. 85 return redirect( 86 reverse( 87 "wiki.edit", 88 locale=settings.WIKI_DEFAULT_LANGUAGE, 89 args=[parent_doc.slug], 90 ) 91 ) 92 93 if not parent_doc.is_localizable: 94 message = _("You cannot translate this document.") 95 context = {"message": message} 96 return render(request, "handlers/400.html", context, status=400) 97 98 based_on_rev = parent_doc.current_or_latest_revision() 99 100 disclose_description = bool(request.GET.get("opendescription")) 101 102 try: 103 doc = parent_doc.translations.get(locale=document_locale) 104 slug_dict = split_slug(doc.slug) 105 except Document.DoesNotExist: 106 doc = None 107 disclose_description = True 108 slug_dict = split_slug(document_slug) 109 110 # Find the "real" parent topic, which is its translation 111 if parent_doc.parent_topic: 112 try: 113 parent_topic_translated_doc = parent_doc.parent_topic.translations.get( 114 locale=document_locale 115 ) 116 slug_dict = split_slug( 117 parent_topic_translated_doc.slug + "/" + slug_dict["specific"] 118 ) 119 except ObjectDoesNotExist: 120 pass 121 122 user_has_doc_perm = (not doc) or (doc and doc.allows_editing_by(request.user)) 123 124 doc_form = None 125 if user_has_doc_perm: 126 if doc: 127 # If there's an existing doc, populate form from it. 128 discard_href = doc.get_absolute_url() 129 doc.slug = slug_dict["specific"] 130 doc_initial = document_form_initial(doc) 131 else: 132 # If no existing doc, bring over the original title and slug. 133 discard_href = parent_doc.get_absolute_url() 134 doc_initial = {"title": based_on_rev.title, "slug": slug_dict["specific"]} 135 doc_form = DocumentForm(initial=doc_initial, parent_slug=slug_dict["parent"]) 136 137 initial = { 138 "based_on": based_on_rev.id, 139 "current_rev": doc.current_or_latest_revision().id if doc else None, 140 "comment": "", 141 "toc_depth": based_on_rev.toc_depth, 142 "localization_tags": ["inprogress"], 143 } 144 content = None 145 if not doc: 146 content = based_on_rev.content 147 if content: 148 # TODO: There will be no need to "filterEditorSafety" when the code 149 # that calls "clean_content" on Revision.save is deployed to 150 # production, AND the current revisions of all docs have had 151 # their content cleaned with "clean_content". 152 initial.update( 153 content=kuma.wiki.content.parse(content).filterEditorSafety().serialize() 154 ) 155 instance = doc and doc.current_or_latest_revision() 156 rev_form = RevisionForm( 157 request=request, 158 instance=instance, 159 initial=initial, 160 parent_slug=slug_dict["parent"], 161 ) 162 163 if request.method == "POST": 164 which_form = request.POST.get("form-type", "both") 165 doc_form_invalid = False 166 167 # Grab the posted slug value in case it's invalid 168 posted_slug = request.POST.get("slug", slug_dict["specific"]) 169 170 if user_has_doc_perm and which_form in ["doc", "both"]: 171 disclose_description = True 172 post_data = request.POST.copy() 173 174 post_data.update({"locale": document_locale}) 175 176 doc_form = DocumentForm( 177 post_data, instance=doc, parent_slug=slug_dict["parent"] 178 ) 179 doc_form.instance.locale = document_locale 180 doc_form.instance.parent = parent_doc 181 182 if which_form == "both": 183 # Sending a new copy of post so the slug change above 184 # doesn't cause problems during validation 185 rev_form = RevisionForm( 186 request=request, data=post_data, parent_slug=slug_dict["parent"] 187 ) 188 189 # If we are submitting the whole form, we need to check that 190 # the Revision is valid before saving the Document. 191 if doc_form.is_valid() and (which_form == "doc" or rev_form.is_valid()): 192 193 # If the document you're about to save already exists, as a 194 # soft-delete, then really delete it first. 195 for soft_deleted_document in Document.deleted_objects.filter( 196 locale=doc_form.cleaned_data["locale"], 197 slug=doc_form.cleaned_data["slug"], 198 ): 199 soft_deleted_document.delete(purge=True) 200 201 doc = doc_form.save(parent=parent_doc) 202 203 if which_form == "doc": 204 url = urlparams(doc.get_edit_url(), opendescription=1) 205 return redirect(url) 206 else: 207 doc_form.data["slug"] = posted_slug 208 doc_form_invalid = True 209 210 if doc and which_form in ["rev", "both"]: 211 post_data = request.POST.copy() 212 if "slug" not in post_data: 213 post_data["slug"] = posted_slug 214 215 # update the post data with the toc_depth of original 216 post_data["toc_depth"] = based_on_rev.toc_depth 217 218 # Pass in the locale for the akistmet "blog_lang". 219 post_data["locale"] = document_locale 220 221 rev_form = RevisionForm( 222 request=request, data=post_data, parent_slug=slug_dict["parent"] 223 ) 224 rev_form.instance.document = doc # for rev_form.clean() 225 226 if rev_form.is_valid() and not doc_form_invalid: 227 parent_id = request.POST.get("parent_id", "") 228 229 # Attempt to set a parent 230 if parent_id: 231 try: 232 try: 233 parent_doc = Document.all_objects.get(id=parent_id) 234 except Document.DoesNotExist: 235 raise Http404("Parent document does not exist") 236 rev_form.instance.document.parent = parent_doc 237 doc.parent = parent_doc 238 rev_form.instance.based_on.document = doc.original 239 except Document.DoesNotExist: 240 pass 241 242 rev_form.save(doc) 243 # If this is an Ajax POST, then return a JsonResponse 244 if request.is_ajax(): 245 data = { 246 "error": False, 247 "new_revision_id": rev_form.instance.id, 248 } 249 250 return JsonResponse(data) 251 252 # Construct the redirect URL, adding any needed parameters 253 url = doc.get_absolute_url() 254 params = {} 255 # Parameter for the document saved, so that we can delete the cached draft on load 256 params["rev_saved"] = request.POST.get("current_rev", "") 257 url = "%s?%s" % (url, urlencode(params)) 258 return redirect(url) 259 else: 260 # If this is an Ajax POST, then return a JsonResponse with error 261 if request.is_ajax(): 262 if "current_rev" in rev_form._errors: 263 # Make the error message safe so the '<' and '>' don't 264 # get turned into '&lt;' and '&gt;', respectively 265 rev_form.errors["current_rev"][0] = mark_safe( 266 rev_form.errors["current_rev"][0] 267 ) 268 errors = [rev_form.errors[key][0] for key in rev_form.errors.keys()] 269 data = { 270 "error": True, 271 "error_message": errors, 272 "new_revision_id": rev_form.instance.id, 273 } 274 return JsonResponse(data=data) 275 276 if doc: 277 from_id = smart_int(request.GET.get("from"), None) 278 to_id = smart_int(request.GET.get("to"), None) 279 280 revision_from = get_object_or_none(Revision, pk=from_id, document=doc.parent) 281 revision_to = get_object_or_none(Revision, pk=to_id, document=doc.parent) 282 else: 283 revision_from = revision_to = None 284 285 parent_split = split_slug(parent_doc.slug) 286 287 language = language_mapping[document_locale.lower()] 288 default_locale = language_mapping[settings.WIKI_DEFAULT_LANGUAGE.lower()] 289 290 context = { 291 "parent": parent_doc, 292 "document": doc, 293 "document_form": doc_form, 294 "revision_form": rev_form, 295 "locale": document_locale, 296 "default_locale": default_locale, 297 "language": language, 298 "based_on": based_on_rev, 299 "disclose_description": disclose_description, 300 "discard_href": discard_href, 301 "attachment_form": AttachmentRevisionForm(), 302 "specific_slug": parent_split["specific"], 303 "parent_slug": parent_split["parent"], 304 "revision_from": revision_from, 305 "revision_to": revision_to, 306 } 307 return render(request, "wiki/translate.html", context) 308 [end of kuma/wiki/views/translate.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kuma/wiki/views/translate.py b/kuma/wiki/views/translate.py --- a/kuma/wiki/views/translate.py +++ b/kuma/wiki/views/translate.py @@ -3,6 +3,7 @@ from csp.decorators import csp_update from django.conf import settings from django.core.exceptions import ObjectDoesNotExist +from django.db.models import Q from django.http import Http404, JsonResponse from django.shortcuts import get_object_or_404, redirect, render from django.utils.safestring import mark_safe @@ -192,10 +193,10 @@ # If the document you're about to save already exists, as a # soft-delete, then really delete it first. - for soft_deleted_document in Document.deleted_objects.filter( - locale=doc_form.cleaned_data["locale"], - slug=doc_form.cleaned_data["slug"], - ): + previously_deleted_documents = Document.deleted_objects.filter( + locale=doc_form.cleaned_data["locale"] + ).filter(Q(slug=doc_form.cleaned_data["slug"]) | Q(parent=parent_doc)) + for soft_deleted_document in previously_deleted_documents: soft_deleted_document.delete(purge=True) doc = doc_form.save(parent=parent_doc)
{"golden_diff": "diff --git a/kuma/wiki/views/translate.py b/kuma/wiki/views/translate.py\n--- a/kuma/wiki/views/translate.py\n+++ b/kuma/wiki/views/translate.py\n@@ -3,6 +3,7 @@\n from csp.decorators import csp_update\n from django.conf import settings\n from django.core.exceptions import ObjectDoesNotExist\n+from django.db.models import Q\n from django.http import Http404, JsonResponse\n from django.shortcuts import get_object_or_404, redirect, render\n from django.utils.safestring import mark_safe\n@@ -192,10 +193,10 @@\n \n # If the document you're about to save already exists, as a\n # soft-delete, then really delete it first.\n- for soft_deleted_document in Document.deleted_objects.filter(\n- locale=doc_form.cleaned_data[\"locale\"],\n- slug=doc_form.cleaned_data[\"slug\"],\n- ):\n+ previously_deleted_documents = Document.deleted_objects.filter(\n+ locale=doc_form.cleaned_data[\"locale\"]\n+ ).filter(Q(slug=doc_form.cleaned_data[\"slug\"]) | Q(parent=parent_doc))\n+ for soft_deleted_document in previously_deleted_documents:\n soft_deleted_document.delete(purge=True)\n \n doc = doc_form.save(parent=parent_doc)\n", "issue": "ISE when trying to submit a translation\n**Summary**\r\n_What is the problem?_\r\nSubmitting a translation in Italian for https://wiki.developer.mozilla.org/en-US/docs/Learn/Getting_started_with_the_web/Publishing_your_website$translate?tolocale=it fails and gives an ISE\r\n\r\n**Steps To Reproduce (STR)**\r\n1. Went to https://wiki.developer.mozilla.org/en-US/docs/Learn/Getting_started_with_the_web/Publishing_your_website$translate?tolocale=it\r\n2. Added / Changed a bunch of words\r\n3. Clicked \"Publish\"\r\n\r\n\r\n**Actual behavior**\r\nAn ISE / error 500 happened, preventing to save the work.\r\n\r\n\r\n**Expected behavior**\r\nThe localization is save, no error, the reader is able to read a new document\r\n\r\n\r\n**Additional context**\r\n\r\n* First report here: https://discourse.mozilla.org/t/50-internal-server-error-while-saving/58804\r\n* I was not able to reproduce this on another locale (pl) for this page\r\n\n", "before_files": [{"content": "from urllib.parse import urlencode\n\nfrom csp.decorators import csp_update\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import Http404, JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.cache import never_cache\n\nimport kuma.wiki.content\nfrom kuma.attachments.forms import AttachmentRevisionForm\nfrom kuma.core.decorators import block_user_agents, ensure_wiki_domain, login_required\nfrom kuma.core.i18n import get_language_mapping\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import get_object_or_none, smart_int, urlparams\n\nfrom .utils import document_form_initial, split_slug\nfrom ..decorators import check_readonly, prevent_indexing, process_document_path\nfrom ..forms import DocumentForm, RevisionForm\nfrom ..models import Document, Revision\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@process_document_path\ndef select_locale(request, document_slug, document_locale):\n \"\"\"\n Select a locale to translate the document to.\n \"\"\"\n doc = get_object_or_404(Document, locale=document_locale, slug=document_slug)\n return render(request, \"wiki/select_locale.html\", {\"document\": doc})\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@csp_update(SCRIPT_SRC=\"'unsafe-eval'\") # Required until CKEditor 4.7\n@process_document_path\n@check_readonly\n@prevent_indexing\ndef translate(request, document_slug, document_locale):\n \"\"\"\n Create a new translation of a wiki document.\n\n * document_slug is for the default locale\n * translation is to the request locale\n \"\"\"\n # TODO: Refactor this view into two views? (new, edit)\n # That might help reduce the headache-inducing branchiness.\n\n # The parent document to translate from\n try:\n # Use '.all_objects' because the parent might have been soft deleted.\n # And if we don't respect that fact, it would become impossible to\n # edit a the child of it.\n parent_doc = Document.all_objects.get(\n locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug\n )\n except Document.DoesNotExist:\n raise Http404(\"Parent document does not exist\")\n\n # Get the mapping here and now so it can be used for input validation\n language_mapping = get_language_mapping()\n\n # HACK: Seems weird, but sticking the translate-to locale in a query\n # param is the best way to avoid the MindTouch-legacy locale\n # redirection logic.\n document_locale = request.GET.get(\"tolocale\", document_locale)\n if document_locale.lower() not in language_mapping:\n # The 'tolocale' query string parameters aren't free-text. They're\n # explicitly listed on the \"Select language\" page (`...$locales`)\n # If a locale was entered that wasn't a link it's a user bug.\n raise Http404\n\n # Set a \"Discard Changes\" page\n discard_href = \"\"\n\n if settings.WIKI_DEFAULT_LANGUAGE == document_locale:\n # Don't translate to the default language.\n return redirect(\n reverse(\n \"wiki.edit\",\n locale=settings.WIKI_DEFAULT_LANGUAGE,\n args=[parent_doc.slug],\n )\n )\n\n if not parent_doc.is_localizable:\n message = _(\"You cannot translate this document.\")\n context = {\"message\": message}\n return render(request, \"handlers/400.html\", context, status=400)\n\n based_on_rev = parent_doc.current_or_latest_revision()\n\n disclose_description = bool(request.GET.get(\"opendescription\"))\n\n try:\n doc = parent_doc.translations.get(locale=document_locale)\n slug_dict = split_slug(doc.slug)\n except Document.DoesNotExist:\n doc = None\n disclose_description = True\n slug_dict = split_slug(document_slug)\n\n # Find the \"real\" parent topic, which is its translation\n if parent_doc.parent_topic:\n try:\n parent_topic_translated_doc = parent_doc.parent_topic.translations.get(\n locale=document_locale\n )\n slug_dict = split_slug(\n parent_topic_translated_doc.slug + \"/\" + slug_dict[\"specific\"]\n )\n except ObjectDoesNotExist:\n pass\n\n user_has_doc_perm = (not doc) or (doc and doc.allows_editing_by(request.user))\n\n doc_form = None\n if user_has_doc_perm:\n if doc:\n # If there's an existing doc, populate form from it.\n discard_href = doc.get_absolute_url()\n doc.slug = slug_dict[\"specific\"]\n doc_initial = document_form_initial(doc)\n else:\n # If no existing doc, bring over the original title and slug.\n discard_href = parent_doc.get_absolute_url()\n doc_initial = {\"title\": based_on_rev.title, \"slug\": slug_dict[\"specific\"]}\n doc_form = DocumentForm(initial=doc_initial, parent_slug=slug_dict[\"parent\"])\n\n initial = {\n \"based_on\": based_on_rev.id,\n \"current_rev\": doc.current_or_latest_revision().id if doc else None,\n \"comment\": \"\",\n \"toc_depth\": based_on_rev.toc_depth,\n \"localization_tags\": [\"inprogress\"],\n }\n content = None\n if not doc:\n content = based_on_rev.content\n if content:\n # TODO: There will be no need to \"filterEditorSafety\" when the code\n # that calls \"clean_content\" on Revision.save is deployed to\n # production, AND the current revisions of all docs have had\n # their content cleaned with \"clean_content\".\n initial.update(\n content=kuma.wiki.content.parse(content).filterEditorSafety().serialize()\n )\n instance = doc and doc.current_or_latest_revision()\n rev_form = RevisionForm(\n request=request,\n instance=instance,\n initial=initial,\n parent_slug=slug_dict[\"parent\"],\n )\n\n if request.method == \"POST\":\n which_form = request.POST.get(\"form-type\", \"both\")\n doc_form_invalid = False\n\n # Grab the posted slug value in case it's invalid\n posted_slug = request.POST.get(\"slug\", slug_dict[\"specific\"])\n\n if user_has_doc_perm and which_form in [\"doc\", \"both\"]:\n disclose_description = True\n post_data = request.POST.copy()\n\n post_data.update({\"locale\": document_locale})\n\n doc_form = DocumentForm(\n post_data, instance=doc, parent_slug=slug_dict[\"parent\"]\n )\n doc_form.instance.locale = document_locale\n doc_form.instance.parent = parent_doc\n\n if which_form == \"both\":\n # Sending a new copy of post so the slug change above\n # doesn't cause problems during validation\n rev_form = RevisionForm(\n request=request, data=post_data, parent_slug=slug_dict[\"parent\"]\n )\n\n # If we are submitting the whole form, we need to check that\n # the Revision is valid before saving the Document.\n if doc_form.is_valid() and (which_form == \"doc\" or rev_form.is_valid()):\n\n # If the document you're about to save already exists, as a\n # soft-delete, then really delete it first.\n for soft_deleted_document in Document.deleted_objects.filter(\n locale=doc_form.cleaned_data[\"locale\"],\n slug=doc_form.cleaned_data[\"slug\"],\n ):\n soft_deleted_document.delete(purge=True)\n\n doc = doc_form.save(parent=parent_doc)\n\n if which_form == \"doc\":\n url = urlparams(doc.get_edit_url(), opendescription=1)\n return redirect(url)\n else:\n doc_form.data[\"slug\"] = posted_slug\n doc_form_invalid = True\n\n if doc and which_form in [\"rev\", \"both\"]:\n post_data = request.POST.copy()\n if \"slug\" not in post_data:\n post_data[\"slug\"] = posted_slug\n\n # update the post data with the toc_depth of original\n post_data[\"toc_depth\"] = based_on_rev.toc_depth\n\n # Pass in the locale for the akistmet \"blog_lang\".\n post_data[\"locale\"] = document_locale\n\n rev_form = RevisionForm(\n request=request, data=post_data, parent_slug=slug_dict[\"parent\"]\n )\n rev_form.instance.document = doc # for rev_form.clean()\n\n if rev_form.is_valid() and not doc_form_invalid:\n parent_id = request.POST.get(\"parent_id\", \"\")\n\n # Attempt to set a parent\n if parent_id:\n try:\n try:\n parent_doc = Document.all_objects.get(id=parent_id)\n except Document.DoesNotExist:\n raise Http404(\"Parent document does not exist\")\n rev_form.instance.document.parent = parent_doc\n doc.parent = parent_doc\n rev_form.instance.based_on.document = doc.original\n except Document.DoesNotExist:\n pass\n\n rev_form.save(doc)\n # If this is an Ajax POST, then return a JsonResponse\n if request.is_ajax():\n data = {\n \"error\": False,\n \"new_revision_id\": rev_form.instance.id,\n }\n\n return JsonResponse(data)\n\n # Construct the redirect URL, adding any needed parameters\n url = doc.get_absolute_url()\n params = {}\n # Parameter for the document saved, so that we can delete the cached draft on load\n params[\"rev_saved\"] = request.POST.get(\"current_rev\", \"\")\n url = \"%s?%s\" % (url, urlencode(params))\n return redirect(url)\n else:\n # If this is an Ajax POST, then return a JsonResponse with error\n if request.is_ajax():\n if \"current_rev\" in rev_form._errors:\n # Make the error message safe so the '<' and '>' don't\n # get turned into '&lt;' and '&gt;', respectively\n rev_form.errors[\"current_rev\"][0] = mark_safe(\n rev_form.errors[\"current_rev\"][0]\n )\n errors = [rev_form.errors[key][0] for key in rev_form.errors.keys()]\n data = {\n \"error\": True,\n \"error_message\": errors,\n \"new_revision_id\": rev_form.instance.id,\n }\n return JsonResponse(data=data)\n\n if doc:\n from_id = smart_int(request.GET.get(\"from\"), None)\n to_id = smart_int(request.GET.get(\"to\"), None)\n\n revision_from = get_object_or_none(Revision, pk=from_id, document=doc.parent)\n revision_to = get_object_or_none(Revision, pk=to_id, document=doc.parent)\n else:\n revision_from = revision_to = None\n\n parent_split = split_slug(parent_doc.slug)\n\n language = language_mapping[document_locale.lower()]\n default_locale = language_mapping[settings.WIKI_DEFAULT_LANGUAGE.lower()]\n\n context = {\n \"parent\": parent_doc,\n \"document\": doc,\n \"document_form\": doc_form,\n \"revision_form\": rev_form,\n \"locale\": document_locale,\n \"default_locale\": default_locale,\n \"language\": language,\n \"based_on\": based_on_rev,\n \"disclose_description\": disclose_description,\n \"discard_href\": discard_href,\n \"attachment_form\": AttachmentRevisionForm(),\n \"specific_slug\": parent_split[\"specific\"],\n \"parent_slug\": parent_split[\"parent\"],\n \"revision_from\": revision_from,\n \"revision_to\": revision_to,\n }\n return render(request, \"wiki/translate.html\", context)\n", "path": "kuma/wiki/views/translate.py"}]}
4,075
275
gh_patches_debug_20722
rasdani/github-patches
git_diff
netket__netket-470
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Getting acceptance of numpy metropolis_hastings The method defining the acceptance for `numpy/metropolis_hastings` in v3.0 is ```python @property def acceptance(self): """The measured acceptance probability.""" return _mean(self._accepted_samples) / _mean(self._total_samples) ``` But `self._accepted_samples` and `self._total_samples` are ints. Don't know if this has been fixed in a PR, but if it's not, maybe it should be included. </issue> <code> [start of netket/stats/_sum_inplace.py] 1 from functools import singledispatch 2 import numpy as _np 3 4 from netket.utils import mpi_available as _mpi_available, n_nodes as _n_nodes 5 6 if _mpi_available: 7 from netket.utils import MPI_comm as _MPI_comm 8 from netket.utils import MPI as _MPI 9 10 11 @singledispatch 12 def sum_inplace(x): 13 """ 14 Computes the elementwie sum of an array or a scalar across all MPI processes. 15 Attempts to perform this sum inplace if possible, but for some types a copy 16 might be returned. 17 18 Args: 19 a: The input array, which will usually be overwritten in place. 20 Returns: 21 out: The reduced array. 22 """ 23 raise TypeError("Unknown type to perform dispatch upon: {}".format(type(x))) 24 25 26 ####### 27 # Scalar 28 @sum_inplace.register(complex) 29 @sum_inplace.register(_np.float64) 30 @sum_inplace.register(_np.float32) 31 @sum_inplace.register(_np.complex64) 32 @sum_inplace.register(_np.complex128) 33 @sum_inplace.register(float) 34 def sum_inplace_scalar(a): 35 ar = _np.asarray(a) 36 37 if _n_nodes > 1: 38 _MPI_comm.Allreduce(_MPI.IN_PLACE, ar.reshape(-1), op=_MPI.SUM) 39 40 return ar 41 42 43 ############## 44 # Numpy Array 45 # 46 @sum_inplace.register(_np.ndarray) 47 def sum_inplace_MPI(a): 48 """ 49 Computes the elementwise sum of a numpy array over all MPI processes. 50 51 Args: 52 a (numpy.ndarray): The input array, which will be overwritten in place. 53 """ 54 if _n_nodes > 1: 55 _MPI_comm.Allreduce(_MPI.IN_PLACE, a.reshape(-1), op=_MPI.SUM) 56 57 return a 58 59 60 ############## 61 # Jax 62 # 63 from netket.utils import jax_available 64 65 if jax_available: 66 import numpy as _np 67 import jax 68 69 @sum_inplace.register(jax.interpreters.xla.DeviceArray) 70 def sum_inplace_jax(x): 71 if not isinstance(x, jax.interpreters.xla.DeviceArray): 72 raise TypeError( 73 "Argument to sum_inplace_jax must be a DeviceArray, got {}".format( 74 type(x) 75 ) 76 ) 77 78 if _n_nodes == 1: 79 return x 80 81 # This below only works on cpus... 82 # we should make this work for gpus too.. 83 # TODO: unsafe_buffer_pointer is considered not yet definitive interface 84 ptr = x.block_until_ready().device_buffer.unsafe_buffer_pointer() 85 86 # The above is faster. 87 # This below should work more often, but might copy. 88 # Depending on future changes in jaxlib, we might have to switch to 89 # this below. 90 # see Google/jax #2123 and #1009 91 # _x = jax.xla._force(x.block_until_ready()) 92 # ptr = _x.device_buffer.unsafe_buffer_pointer() 93 94 # using native numpy because jax's numpy does not have ctypeslib 95 data_pointer = _np.ctypeslib.ndpointer(x.dtype, shape=x.shape) 96 97 # wrap jax data into a standard numpy array which is handled by MPI 98 arr = data_pointer(ptr).contents 99 _MPI_comm.Allreduce(_MPI.IN_PLACE, arr.reshape(-1), op=_MPI.SUM) 100 101 return x 102 103 @sum_inplace.register(jax.interpreters.partial_eval.JaxprTracer) 104 @sum_inplace.register(jax.interpreters.ad.JVPTracer) 105 def sum_inplace_jax_jittracer(x): 106 if _n_nodes == 1: 107 return x 108 else: 109 raise RuntimError( 110 "Cannot jit through sum_inplace when running with multiple MPI processes." 111 ) 112 [end of netket/stats/_sum_inplace.py] [start of netket/sampler/numpy/metropolis_hastings.py] 1 from ..abstract_sampler import AbstractSampler 2 from ...stats import mean as _mean 3 from netket import random as _random 4 5 import math 6 import numpy as _np 7 from numba import jit, int64, float64 8 from ..._jitclass import jitclass 9 10 11 class MetropolisHastings(AbstractSampler): 12 def __init__(self, machine, kernel, n_chains=16, sweep_size=None): 13 14 super().__init__(machine, n_chains) 15 16 self.n_chains = n_chains 17 18 self.sweep_size = sweep_size 19 20 self._kernel = kernel 21 22 self.machine_pow = 2.0 23 self.reset(True) 24 25 @property 26 def n_chains(self): 27 return self._n_chains 28 29 @n_chains.setter 30 def n_chains(self, n_chains): 31 if n_chains < 0: 32 raise ValueError("Expected a positive integer for n_chains ") 33 34 self._n_chains = n_chains 35 36 self._state = _np.zeros((n_chains, self._input_size)) 37 self._state1 = _np.copy(self._state) 38 39 self._log_values = _np.zeros(n_chains, dtype=_np.complex128) 40 self._log_values_1 = _np.zeros(n_chains, dtype=_np.complex128) 41 self._log_prob_corr = _np.zeros(n_chains) 42 43 @property 44 def machine_pow(self): 45 return self._machine_pow 46 47 @machine_pow.setter 48 def machine_pow(self, m_power): 49 if not _np.isscalar(m_power): 50 raise ValueError("machine_pow should be a scalar.") 51 self._machine_pow = m_power 52 53 @property 54 def sweep_size(self): 55 return self._sweep_size 56 57 @sweep_size.setter 58 def sweep_size(self, sweep_size): 59 self._sweep_size = sweep_size if sweep_size != None else self._input_size 60 if self._sweep_size < 0: 61 raise ValueError("Expected a positive integer for sweep_size ") 62 63 def reset(self, init_random=False): 64 if init_random: 65 self._kernel.random_state(self._state) 66 self._log_values = self.machine.log_val(self._state, out=self._log_values) 67 68 self._accepted_samples = 0 69 self._total_samples = 0 70 71 @staticmethod 72 @jit(nopython=True) 73 def acceptance_kernel( 74 state, state1, log_values, log_values_1, log_prob_corr, machine_pow 75 ): 76 accepted = 0 77 78 for i in range(state.shape[0]): 79 prob = _np.exp( 80 machine_pow * (log_values_1[i] - log_values[i] + log_prob_corr[i]).real 81 ) 82 assert not math.isnan(prob) 83 84 if prob > _random.uniform(0, 1): 85 log_values[i] = log_values_1[i] 86 state[i] = state1[i] 87 accepted += 1 88 89 return accepted 90 91 def __next__(self): 92 93 _log_val = self.machine.log_val 94 _acc_kernel = self.acceptance_kernel 95 _state = self._state 96 _state1 = self._state1 97 _log_values = self._log_values 98 _log_values_1 = self._log_values_1 99 _log_prob_corr = self._log_prob_corr 100 _machine_pow = self._machine_pow 101 _t_kernel = self._kernel.transition 102 103 accepted = 0 104 105 for sweep in range(self.sweep_size): 106 107 # Propose a new state using the transition kernel 108 _t_kernel(_state, _state1, _log_prob_corr) 109 110 _log_values_1 = _log_val(_state1, out=_log_values_1) 111 112 # Acceptance Kernel 113 accepted += _acc_kernel( 114 _state, 115 _state1, 116 _log_values, 117 _log_values_1, 118 _log_prob_corr, 119 _machine_pow, 120 ) 121 122 self._total_samples += self.sweep_size * self.n_chains 123 self._accepted_samples += accepted 124 125 return self._state 126 127 @property 128 def acceptance(self): 129 """The measured acceptance probability.""" 130 return _mean(self._accepted_samples) / _mean(self._total_samples) 131 [end of netket/sampler/numpy/metropolis_hastings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/netket/sampler/numpy/metropolis_hastings.py b/netket/sampler/numpy/metropolis_hastings.py --- a/netket/sampler/numpy/metropolis_hastings.py +++ b/netket/sampler/numpy/metropolis_hastings.py @@ -1,7 +1,8 @@ from ..abstract_sampler import AbstractSampler -from ...stats import mean as _mean from netket import random as _random +from netket.stats import sum_inplace as _sum_inplace + import math import numpy as _np from numba import jit, int64, float64 @@ -127,4 +128,4 @@ @property def acceptance(self): """The measured acceptance probability.""" - return _mean(self._accepted_samples) / _mean(self._total_samples) + return _sum_inplace(self._accepted_samples) / _sum_inplace(self._total_samples) diff --git a/netket/stats/_sum_inplace.py b/netket/stats/_sum_inplace.py --- a/netket/stats/_sum_inplace.py +++ b/netket/stats/_sum_inplace.py @@ -31,6 +31,7 @@ @sum_inplace.register(_np.complex64) @sum_inplace.register(_np.complex128) @sum_inplace.register(float) +@sum_inplace.register(int) def sum_inplace_scalar(a): ar = _np.asarray(a)
{"golden_diff": "diff --git a/netket/sampler/numpy/metropolis_hastings.py b/netket/sampler/numpy/metropolis_hastings.py\n--- a/netket/sampler/numpy/metropolis_hastings.py\n+++ b/netket/sampler/numpy/metropolis_hastings.py\n@@ -1,7 +1,8 @@\n from ..abstract_sampler import AbstractSampler\n-from ...stats import mean as _mean\n from netket import random as _random\n \n+from netket.stats import sum_inplace as _sum_inplace\n+\n import math\n import numpy as _np\n from numba import jit, int64, float64\n@@ -127,4 +128,4 @@\n @property\n def acceptance(self):\n \"\"\"The measured acceptance probability.\"\"\"\n- return _mean(self._accepted_samples) / _mean(self._total_samples)\n+ return _sum_inplace(self._accepted_samples) / _sum_inplace(self._total_samples)\ndiff --git a/netket/stats/_sum_inplace.py b/netket/stats/_sum_inplace.py\n--- a/netket/stats/_sum_inplace.py\n+++ b/netket/stats/_sum_inplace.py\n@@ -31,6 +31,7 @@\n @sum_inplace.register(_np.complex64)\n @sum_inplace.register(_np.complex128)\n @sum_inplace.register(float)\n+@sum_inplace.register(int)\n def sum_inplace_scalar(a):\n ar = _np.asarray(a)\n", "issue": "Getting acceptance of numpy metropolis_hastings\nThe method defining the acceptance for `numpy/metropolis_hastings` in v3.0 is\r\n\r\n```python\r\n@property\r\ndef acceptance(self):\r\n \"\"\"The measured acceptance probability.\"\"\"\r\n return _mean(self._accepted_samples) / _mean(self._total_samples)\r\n```\r\n\r\nBut `self._accepted_samples` and `self._total_samples` are ints. Don't know if this has been fixed in a PR, but if it's not, maybe it should be included.\n", "before_files": [{"content": "from functools import singledispatch\nimport numpy as _np\n\nfrom netket.utils import mpi_available as _mpi_available, n_nodes as _n_nodes\n\nif _mpi_available:\n from netket.utils import MPI_comm as _MPI_comm\n from netket.utils import MPI as _MPI\n\n\n@singledispatch\ndef sum_inplace(x):\n \"\"\"\n Computes the elementwie sum of an array or a scalar across all MPI processes.\n Attempts to perform this sum inplace if possible, but for some types a copy\n might be returned.\n\n Args:\n a: The input array, which will usually be overwritten in place.\n Returns:\n out: The reduced array.\n \"\"\"\n raise TypeError(\"Unknown type to perform dispatch upon: {}\".format(type(x)))\n\n\n#######\n# Scalar\n@sum_inplace.register(complex)\n@sum_inplace.register(_np.float64)\n@sum_inplace.register(_np.float32)\n@sum_inplace.register(_np.complex64)\n@sum_inplace.register(_np.complex128)\n@sum_inplace.register(float)\ndef sum_inplace_scalar(a):\n ar = _np.asarray(a)\n\n if _n_nodes > 1:\n _MPI_comm.Allreduce(_MPI.IN_PLACE, ar.reshape(-1), op=_MPI.SUM)\n\n return ar\n\n\n##############\n# Numpy Array\n#\n@sum_inplace.register(_np.ndarray)\ndef sum_inplace_MPI(a):\n \"\"\"\n Computes the elementwise sum of a numpy array over all MPI processes.\n\n Args:\n a (numpy.ndarray): The input array, which will be overwritten in place.\n \"\"\"\n if _n_nodes > 1:\n _MPI_comm.Allreduce(_MPI.IN_PLACE, a.reshape(-1), op=_MPI.SUM)\n\n return a\n\n\n##############\n# Jax\n#\nfrom netket.utils import jax_available\n\nif jax_available:\n import numpy as _np\n import jax\n\n @sum_inplace.register(jax.interpreters.xla.DeviceArray)\n def sum_inplace_jax(x):\n if not isinstance(x, jax.interpreters.xla.DeviceArray):\n raise TypeError(\n \"Argument to sum_inplace_jax must be a DeviceArray, got {}\".format(\n type(x)\n )\n )\n\n if _n_nodes == 1:\n return x\n\n # This below only works on cpus...\n # we should make this work for gpus too..\n # TODO: unsafe_buffer_pointer is considered not yet definitive interface\n ptr = x.block_until_ready().device_buffer.unsafe_buffer_pointer()\n\n # The above is faster.\n # This below should work more often, but might copy.\n # Depending on future changes in jaxlib, we might have to switch to\n # this below.\n # see Google/jax #2123 and #1009\n # _x = jax.xla._force(x.block_until_ready())\n # ptr = _x.device_buffer.unsafe_buffer_pointer()\n\n # using native numpy because jax's numpy does not have ctypeslib\n data_pointer = _np.ctypeslib.ndpointer(x.dtype, shape=x.shape)\n\n # wrap jax data into a standard numpy array which is handled by MPI\n arr = data_pointer(ptr).contents\n _MPI_comm.Allreduce(_MPI.IN_PLACE, arr.reshape(-1), op=_MPI.SUM)\n\n return x\n\n @sum_inplace.register(jax.interpreters.partial_eval.JaxprTracer)\n @sum_inplace.register(jax.interpreters.ad.JVPTracer)\n def sum_inplace_jax_jittracer(x):\n if _n_nodes == 1:\n return x\n else:\n raise RuntimError(\n \"Cannot jit through sum_inplace when running with multiple MPI processes.\"\n )\n", "path": "netket/stats/_sum_inplace.py"}, {"content": "from ..abstract_sampler import AbstractSampler\nfrom ...stats import mean as _mean\nfrom netket import random as _random\n\nimport math\nimport numpy as _np\nfrom numba import jit, int64, float64\nfrom ..._jitclass import jitclass\n\n\nclass MetropolisHastings(AbstractSampler):\n def __init__(self, machine, kernel, n_chains=16, sweep_size=None):\n\n super().__init__(machine, n_chains)\n\n self.n_chains = n_chains\n\n self.sweep_size = sweep_size\n\n self._kernel = kernel\n\n self.machine_pow = 2.0\n self.reset(True)\n\n @property\n def n_chains(self):\n return self._n_chains\n\n @n_chains.setter\n def n_chains(self, n_chains):\n if n_chains < 0:\n raise ValueError(\"Expected a positive integer for n_chains \")\n\n self._n_chains = n_chains\n\n self._state = _np.zeros((n_chains, self._input_size))\n self._state1 = _np.copy(self._state)\n\n self._log_values = _np.zeros(n_chains, dtype=_np.complex128)\n self._log_values_1 = _np.zeros(n_chains, dtype=_np.complex128)\n self._log_prob_corr = _np.zeros(n_chains)\n\n @property\n def machine_pow(self):\n return self._machine_pow\n\n @machine_pow.setter\n def machine_pow(self, m_power):\n if not _np.isscalar(m_power):\n raise ValueError(\"machine_pow should be a scalar.\")\n self._machine_pow = m_power\n\n @property\n def sweep_size(self):\n return self._sweep_size\n\n @sweep_size.setter\n def sweep_size(self, sweep_size):\n self._sweep_size = sweep_size if sweep_size != None else self._input_size\n if self._sweep_size < 0:\n raise ValueError(\"Expected a positive integer for sweep_size \")\n\n def reset(self, init_random=False):\n if init_random:\n self._kernel.random_state(self._state)\n self._log_values = self.machine.log_val(self._state, out=self._log_values)\n\n self._accepted_samples = 0\n self._total_samples = 0\n\n @staticmethod\n @jit(nopython=True)\n def acceptance_kernel(\n state, state1, log_values, log_values_1, log_prob_corr, machine_pow\n ):\n accepted = 0\n\n for i in range(state.shape[0]):\n prob = _np.exp(\n machine_pow * (log_values_1[i] - log_values[i] + log_prob_corr[i]).real\n )\n assert not math.isnan(prob)\n\n if prob > _random.uniform(0, 1):\n log_values[i] = log_values_1[i]\n state[i] = state1[i]\n accepted += 1\n\n return accepted\n\n def __next__(self):\n\n _log_val = self.machine.log_val\n _acc_kernel = self.acceptance_kernel\n _state = self._state\n _state1 = self._state1\n _log_values = self._log_values\n _log_values_1 = self._log_values_1\n _log_prob_corr = self._log_prob_corr\n _machine_pow = self._machine_pow\n _t_kernel = self._kernel.transition\n\n accepted = 0\n\n for sweep in range(self.sweep_size):\n\n # Propose a new state using the transition kernel\n _t_kernel(_state, _state1, _log_prob_corr)\n\n _log_values_1 = _log_val(_state1, out=_log_values_1)\n\n # Acceptance Kernel\n accepted += _acc_kernel(\n _state,\n _state1,\n _log_values,\n _log_values_1,\n _log_prob_corr,\n _machine_pow,\n )\n\n self._total_samples += self.sweep_size * self.n_chains\n self._accepted_samples += accepted\n\n return self._state\n\n @property\n def acceptance(self):\n \"\"\"The measured acceptance probability.\"\"\"\n return _mean(self._accepted_samples) / _mean(self._total_samples)\n", "path": "netket/sampler/numpy/metropolis_hastings.py"}]}
2,978
316
gh_patches_debug_10632
rasdani/github-patches
git_diff
PaddlePaddle__models-477
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> text_classification reader error Get follow error log while running `train.py` of text_classification model: ``` [INFO 2017-11-16 17:19:51,458 train.py:44] please wait to build the word dictionary ... [INFO 2017-11-16 17:20:07,138 train.py:101] length of word dictionary is : 5147. I1116 17:20:07.168130 24638 Util.cpp:166] commandline: --use_gpu=False --trainer_count=1 I1116 17:20:07.176143 24638 GradientMachine.cpp:94] Initing parameters.. I1116 17:20:07.186369 24638 GradientMachine.cpp:101] Init parameters done. Traceback (most recent call last): File "train.py", line 178, in <module> main(args) File "train.py", line 169, in main model_save_dir=args.model_save_dir) File "train.py", line 150, in train num_passes=num_passes) File "/home/work/wanghaoshuang/paddle/python/install/lib/python2.7/site-packages/paddle/v2/trainer.py", line 162, in train for batch_id, data_batch in enumerate(reader()): File "/home/work/wanghaoshuang/paddle/python/install/lib/python2.7/site-packages/paddle/v2/minibatch.py", line 33, in batch_reader for instance in r: File "/home/work/wanghaoshuang/paddle/python/install/lib/python2.7/site-packages/paddle/v2/reader/decorator.py", line 67, in data_reader for e in reader(): TypeError: 'function' object is not iterable ``` </issue> <code> [start of text_classification/train.py] 1 import os 2 import sys 3 import gzip 4 5 import paddle.v2 as paddle 6 7 import reader 8 from utils import logger, parse_train_cmd, build_dict, load_dict 9 from network_conf import fc_net, convolution_net 10 11 12 def train(topology, 13 train_data_dir=None, 14 test_data_dir=None, 15 word_dict_path=None, 16 label_dict_path=None, 17 model_save_dir="models", 18 batch_size=32, 19 num_passes=10): 20 """ 21 train dnn model 22 23 24 :params train_data_path: path of training data, if this parameter 25 is not specified, paddle.dataset.imdb will be used to run this example 26 :type train_data_path: str 27 :params test_data_path: path of testing data, if this parameter 28 is not specified, paddle.dataset.imdb will be used to run this example 29 :type test_data_path: str 30 :params word_dict_path: path of training data, if this parameter 31 is not specified, paddle.dataset.imdb will be used to run this example 32 :type word_dict_path: str 33 :params num_pass: train pass number 34 :type num_pass: int 35 """ 36 if not os.path.exists(model_save_dir): 37 os.mkdir(model_save_dir) 38 39 use_default_data = (train_data_dir is None) 40 41 if use_default_data: 42 logger.info(("No training data are porivided, " 43 "use paddle.dataset.imdb to train the model.")) 44 logger.info("please wait to build the word dictionary ...") 45 46 word_dict = paddle.dataset.imdb.word_dict() 47 train_reader = paddle.batch( 48 paddle.reader.shuffle( 49 lambda: paddle.dataset.imdb.train(word_dict), buf_size=1000), 50 batch_size=100) 51 test_reader = paddle.batch( 52 lambda: paddle.dataset.imdb.test(word_dict), batch_size=100) 53 54 class_num = 2 55 else: 56 if word_dict_path is None or not os.path.exists(word_dict_path): 57 logger.info(("word dictionary is not given, the dictionary " 58 "is automatically built from the training data.")) 59 60 # build the word dictionary to map the original string-typed 61 # words into integer-typed index 62 build_dict( 63 data_dir=train_data_dir, 64 save_path=word_dict_path, 65 use_col=1, 66 cutoff_fre=5, 67 insert_extra_words=["<UNK>"]) 68 69 if not os.path.exists(label_dict_path): 70 logger.info(("label dictionary is not given, the dictionary " 71 "is automatically built from the training data.")) 72 # build the label dictionary to map the original string-typed 73 # label into integer-typed index 74 build_dict( 75 data_dir=train_data_dir, save_path=label_dict_path, use_col=0) 76 77 word_dict = load_dict(word_dict_path) 78 79 lbl_dict = load_dict(label_dict_path) 80 class_num = len(lbl_dict) 81 logger.info("class number is : %d." % (len(lbl_dict))) 82 83 train_reader = paddle.batch( 84 paddle.reader.shuffle( 85 reader.train_reader(train_data_dir, word_dict, lbl_dict), 86 buf_size=1000), 87 batch_size=batch_size) 88 89 if test_data_dir is not None: 90 # here, because training and testing data share a same format, 91 # we still use the reader.train_reader to read the testing data. 92 test_reader = paddle.batch( 93 paddle.reader.shuffle( 94 reader.train_reader(test_data_dir, word_dict, lbl_dict), 95 buf_size=1000), 96 batch_size=batch_size) 97 else: 98 test_reader = None 99 100 dict_dim = len(word_dict) 101 logger.info("length of word dictionary is : %d." % (dict_dim)) 102 103 paddle.init(use_gpu=False, trainer_count=1) 104 105 # network config 106 cost, prob, label = topology(dict_dim, class_num) 107 108 # create parameters 109 parameters = paddle.parameters.create(cost) 110 111 # create optimizer 112 adam_optimizer = paddle.optimizer.Adam( 113 learning_rate=1e-3, 114 regularization=paddle.optimizer.L2Regularization(rate=1e-3), 115 model_average=paddle.optimizer.ModelAverage(average_window=0.5)) 116 117 # create trainer 118 trainer = paddle.trainer.SGD( 119 cost=cost, 120 extra_layers=paddle.evaluator.auc(input=prob, label=label), 121 parameters=parameters, 122 update_equation=adam_optimizer) 123 124 # begin training network 125 feeding = {"word": 0, "label": 1} 126 127 def _event_handler(event): 128 """ 129 Define end batch and end pass event handler 130 """ 131 if isinstance(event, paddle.event.EndIteration): 132 if event.batch_id % 100 == 0: 133 logger.info("Pass %d, Batch %d, Cost %f, %s\n" % ( 134 event.pass_id, event.batch_id, event.cost, event.metrics)) 135 136 if isinstance(event, paddle.event.EndPass): 137 if test_reader is not None: 138 result = trainer.test(reader=test_reader, feeding=feeding) 139 logger.info("Test at Pass %d, %s \n" % (event.pass_id, 140 result.metrics)) 141 with gzip.open( 142 os.path.join(model_save_dir, "dnn_params_pass_%05d.tar.gz" % 143 event.pass_id), "w") as f: 144 trainer.save_parameter_to_tar(f) 145 146 trainer.train( 147 reader=train_reader, 148 event_handler=_event_handler, 149 feeding=feeding, 150 num_passes=num_passes) 151 152 logger.info("Training has finished.") 153 154 155 def main(args): 156 if args.nn_type == "dnn": 157 topology = fc_net 158 elif args.nn_type == "cnn": 159 topology = convolution_net 160 161 train( 162 topology=topology, 163 train_data_dir=args.train_data_dir, 164 test_data_dir=args.test_data_dir, 165 word_dict_path=args.word_dict, 166 label_dict_path=args.label_dict, 167 batch_size=args.batch_size, 168 num_passes=args.num_passes, 169 model_save_dir=args.model_save_dir) 170 171 172 if __name__ == "__main__": 173 args = parse_train_cmd() 174 if args.train_data_dir is not None: 175 assert args.word_dict and args.label_dict, ( 176 "the parameter train_data_dir, word_dict_path, and label_dict_path " 177 "should be set at the same time.") 178 main(args) 179 [end of text_classification/train.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/text_classification/train.py b/text_classification/train.py --- a/text_classification/train.py +++ b/text_classification/train.py @@ -46,10 +46,10 @@ word_dict = paddle.dataset.imdb.word_dict() train_reader = paddle.batch( paddle.reader.shuffle( - lambda: paddle.dataset.imdb.train(word_dict), buf_size=1000), + lambda: paddle.dataset.imdb.train(word_dict)(), buf_size=1000), batch_size=100) test_reader = paddle.batch( - lambda: paddle.dataset.imdb.test(word_dict), batch_size=100) + lambda: paddle.dataset.imdb.test(word_dict)(), batch_size=100) class_num = 2 else:
{"golden_diff": "diff --git a/text_classification/train.py b/text_classification/train.py\n--- a/text_classification/train.py\n+++ b/text_classification/train.py\n@@ -46,10 +46,10 @@\n word_dict = paddle.dataset.imdb.word_dict()\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n- lambda: paddle.dataset.imdb.train(word_dict), buf_size=1000),\n+ lambda: paddle.dataset.imdb.train(word_dict)(), buf_size=1000),\n batch_size=100)\n test_reader = paddle.batch(\n- lambda: paddle.dataset.imdb.test(word_dict), batch_size=100)\n+ lambda: paddle.dataset.imdb.test(word_dict)(), batch_size=100)\n \n class_num = 2\n else:\n", "issue": "text_classification reader error\nGet follow error log while running `train.py` of text_classification model:\r\n```\r\n[INFO 2017-11-16 17:19:51,458 train.py:44] please wait to build the word dictionary ...\r\n[INFO 2017-11-16 17:20:07,138 train.py:101] length of word dictionary is : 5147.\r\nI1116 17:20:07.168130 24638 Util.cpp:166] commandline: --use_gpu=False --trainer_count=1\r\nI1116 17:20:07.176143 24638 GradientMachine.cpp:94] Initing parameters..\r\nI1116 17:20:07.186369 24638 GradientMachine.cpp:101] Init parameters done.\r\nTraceback (most recent call last):\r\n File \"train.py\", line 178, in <module>\r\n main(args)\r\n File \"train.py\", line 169, in main\r\n model_save_dir=args.model_save_dir)\r\n File \"train.py\", line 150, in train\r\n num_passes=num_passes)\r\n File \"/home/work/wanghaoshuang/paddle/python/install/lib/python2.7/site-packages/paddle/v2/trainer.py\", line 162, in train\r\n for batch_id, data_batch in enumerate(reader()):\r\n File \"/home/work/wanghaoshuang/paddle/python/install/lib/python2.7/site-packages/paddle/v2/minibatch.py\", line 33, in batch_reader\r\n for instance in r:\r\n File \"/home/work/wanghaoshuang/paddle/python/install/lib/python2.7/site-packages/paddle/v2/reader/decorator.py\", line 67, in data_reader\r\n for e in reader():\r\nTypeError: 'function' object is not iterable\r\n```\n", "before_files": [{"content": "import os\nimport sys\nimport gzip\n\nimport paddle.v2 as paddle\n\nimport reader\nfrom utils import logger, parse_train_cmd, build_dict, load_dict\nfrom network_conf import fc_net, convolution_net\n\n\ndef train(topology,\n train_data_dir=None,\n test_data_dir=None,\n word_dict_path=None,\n label_dict_path=None,\n model_save_dir=\"models\",\n batch_size=32,\n num_passes=10):\n \"\"\"\n train dnn model\n\n\n :params train_data_path: path of training data, if this parameter\n is not specified, paddle.dataset.imdb will be used to run this example\n :type train_data_path: str\n :params test_data_path: path of testing data, if this parameter\n is not specified, paddle.dataset.imdb will be used to run this example\n :type test_data_path: str\n :params word_dict_path: path of training data, if this parameter\n is not specified, paddle.dataset.imdb will be used to run this example\n :type word_dict_path: str\n :params num_pass: train pass number\n :type num_pass: int\n \"\"\"\n if not os.path.exists(model_save_dir):\n os.mkdir(model_save_dir)\n\n use_default_data = (train_data_dir is None)\n\n if use_default_data:\n logger.info((\"No training data are porivided, \"\n \"use paddle.dataset.imdb to train the model.\"))\n logger.info(\"please wait to build the word dictionary ...\")\n\n word_dict = paddle.dataset.imdb.word_dict()\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n lambda: paddle.dataset.imdb.train(word_dict), buf_size=1000),\n batch_size=100)\n test_reader = paddle.batch(\n lambda: paddle.dataset.imdb.test(word_dict), batch_size=100)\n\n class_num = 2\n else:\n if word_dict_path is None or not os.path.exists(word_dict_path):\n logger.info((\"word dictionary is not given, the dictionary \"\n \"is automatically built from the training data.\"))\n\n # build the word dictionary to map the original string-typed\n # words into integer-typed index\n build_dict(\n data_dir=train_data_dir,\n save_path=word_dict_path,\n use_col=1,\n cutoff_fre=5,\n insert_extra_words=[\"<UNK>\"])\n\n if not os.path.exists(label_dict_path):\n logger.info((\"label dictionary is not given, the dictionary \"\n \"is automatically built from the training data.\"))\n # build the label dictionary to map the original string-typed\n # label into integer-typed index\n build_dict(\n data_dir=train_data_dir, save_path=label_dict_path, use_col=0)\n\n word_dict = load_dict(word_dict_path)\n\n lbl_dict = load_dict(label_dict_path)\n class_num = len(lbl_dict)\n logger.info(\"class number is : %d.\" % (len(lbl_dict)))\n\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n reader.train_reader(train_data_dir, word_dict, lbl_dict),\n buf_size=1000),\n batch_size=batch_size)\n\n if test_data_dir is not None:\n # here, because training and testing data share a same format,\n # we still use the reader.train_reader to read the testing data.\n test_reader = paddle.batch(\n paddle.reader.shuffle(\n reader.train_reader(test_data_dir, word_dict, lbl_dict),\n buf_size=1000),\n batch_size=batch_size)\n else:\n test_reader = None\n\n dict_dim = len(word_dict)\n logger.info(\"length of word dictionary is : %d.\" % (dict_dim))\n\n paddle.init(use_gpu=False, trainer_count=1)\n\n # network config\n cost, prob, label = topology(dict_dim, class_num)\n\n # create parameters\n parameters = paddle.parameters.create(cost)\n\n # create optimizer\n adam_optimizer = paddle.optimizer.Adam(\n learning_rate=1e-3,\n regularization=paddle.optimizer.L2Regularization(rate=1e-3),\n model_average=paddle.optimizer.ModelAverage(average_window=0.5))\n\n # create trainer\n trainer = paddle.trainer.SGD(\n cost=cost,\n extra_layers=paddle.evaluator.auc(input=prob, label=label),\n parameters=parameters,\n update_equation=adam_optimizer)\n\n # begin training network\n feeding = {\"word\": 0, \"label\": 1}\n\n def _event_handler(event):\n \"\"\"\n Define end batch and end pass event handler\n \"\"\"\n if isinstance(event, paddle.event.EndIteration):\n if event.batch_id % 100 == 0:\n logger.info(\"Pass %d, Batch %d, Cost %f, %s\\n\" % (\n event.pass_id, event.batch_id, event.cost, event.metrics))\n\n if isinstance(event, paddle.event.EndPass):\n if test_reader is not None:\n result = trainer.test(reader=test_reader, feeding=feeding)\n logger.info(\"Test at Pass %d, %s \\n\" % (event.pass_id,\n result.metrics))\n with gzip.open(\n os.path.join(model_save_dir, \"dnn_params_pass_%05d.tar.gz\" %\n event.pass_id), \"w\") as f:\n trainer.save_parameter_to_tar(f)\n\n trainer.train(\n reader=train_reader,\n event_handler=_event_handler,\n feeding=feeding,\n num_passes=num_passes)\n\n logger.info(\"Training has finished.\")\n\n\ndef main(args):\n if args.nn_type == \"dnn\":\n topology = fc_net\n elif args.nn_type == \"cnn\":\n topology = convolution_net\n\n train(\n topology=topology,\n train_data_dir=args.train_data_dir,\n test_data_dir=args.test_data_dir,\n word_dict_path=args.word_dict,\n label_dict_path=args.label_dict,\n batch_size=args.batch_size,\n num_passes=args.num_passes,\n model_save_dir=args.model_save_dir)\n\n\nif __name__ == \"__main__\":\n args = parse_train_cmd()\n if args.train_data_dir is not None:\n assert args.word_dict and args.label_dict, (\n \"the parameter train_data_dir, word_dict_path, and label_dict_path \"\n \"should be set at the same time.\")\n main(args)\n", "path": "text_classification/train.py"}]}
2,805
171
gh_patches_debug_2665
rasdani/github-patches
git_diff
opsdroid__opsdroid-946
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PyPI deployments are failing Looks like PyPI deployments are failing. `v0.15.1` and `v0.15.2` haven't gone out. ``` HTTPError: 400 Client Error: The description failed to render in the default format of reStructuredText. See https://pypi.org/help/#description-content-type for more information. for url: https://upload.pypi.org/legacy/ ``` PyPI deployments are failing Looks like PyPI deployments are failing. `v0.15.1` and `v0.15.2` haven't gone out. ``` HTTPError: 400 Client Error: The description failed to render in the default format of reStructuredText. See https://pypi.org/help/#description-content-type for more information. for url: https://upload.pypi.org/legacy/ ``` </issue> <code> [start of setup.py] 1 #!/usr/bin/env python3 2 import os 3 from setuptools import setup, find_packages 4 from setuptools.command.build_py import build_py 5 from setuptools.command.sdist import sdist 6 from setuptools.command.develop import develop 7 import versioneer 8 9 PACKAGE_NAME = 'opsdroid' 10 HERE = os.path.abspath(os.path.dirname(__file__)) 11 README = open(os.path.join(HERE, 'README.md'), encoding="utf8").read() 12 13 PACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules', 14 'modules.*', 'docs', 'docs.*']) 15 16 17 # For now we simply define the install_requires based on the contents 18 # of requirements.txt. In the future, install_requires may become much 19 # looser than the (automatically) resolved requirements.txt. 20 with open(os.path.join(HERE, 'requirements.txt'), 'r') as fh: 21 REQUIRES = [line.strip() for line in fh] 22 23 24 class Develop(develop): 25 """Custom `develop` command to always build mo files on install -e.""" 26 27 def run(self): 28 self.run_command('compile_catalog') 29 develop.run(self) # old style class 30 31 32 class BuildPy(build_py): 33 """Custom `build_py` command to always build mo files for wheels.""" 34 35 def run(self): 36 self.run_command('compile_catalog') 37 build_py.run(self) # old style class 38 39 40 class Sdist(sdist): 41 """Custom `sdist` command to ensure that mo files are always created.""" 42 43 def run(self): 44 self.run_command('compile_catalog') 45 sdist.run(self) # old style class 46 47 48 setup( 49 name=PACKAGE_NAME, 50 version=versioneer.get_version(), 51 license='Apache License 2.0', 52 url='https://opsdroid.github.io/', 53 download_url='https://github.com/opsdroid/opsdroid/releases', 54 author='Jacob Tomlinson', 55 author_email='[email protected]', 56 description='An open source ChatOps bot framework.', 57 long_description=README, 58 packages=PACKAGES, 59 include_package_data=True, 60 zip_safe=False, 61 platforms='any', 62 classifiers=[ 63 'Development Status :: 4 - Beta', 64 'Environment :: Console', 65 'Framework :: AsyncIO', 66 'Intended Audience :: Developers', 67 'Intended Audience :: System Administrators', 68 'Intended Audience :: Information Technology', 69 'License :: OSI Approved :: Apache Software License', 70 'Programming Language :: Python', 71 'Programming Language :: Python :: 3', 72 'Programming Language :: Python :: 3 :: Only', 73 'Programming Language :: Python :: 3.5', 74 'Programming Language :: Python :: 3.6', 75 'Programming Language :: Python :: 3.7', 76 'Topic :: Communications :: Chat', 77 'Topic :: Scientific/Engineering :: Artificial Intelligence', 78 'Topic :: Software Development :: Libraries :: Python Modules' 79 ], 80 install_requires=REQUIRES, 81 test_suite='tests', 82 keywords=[ 83 'bot', 84 'bot-framework', 85 'opsdroid', 86 'botkit', 87 'python3', 88 'asyncio', 89 'chatops', 90 'devops', 91 'nlu' 92 ], 93 setup_requires=['Babel'], 94 cmdclass=versioneer.get_cmdclass({'sdist': Sdist, 95 'build_py': BuildPy, 96 'develop': Develop}), 97 entry_points={ 98 'console_scripts': [ 99 'opsdroid = opsdroid.__main__:main' 100 ] 101 }, 102 ) 103 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -55,6 +55,7 @@ author_email='[email protected]', description='An open source ChatOps bot framework.', long_description=README, + long_description_content_type='text/markdown', packages=PACKAGES, include_package_data=True, zip_safe=False,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -55,6 +55,7 @@\n author_email='[email protected]',\n description='An open source ChatOps bot framework.',\n long_description=README,\n+ long_description_content_type='text/markdown',\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n", "issue": "PyPI deployments are failing\nLooks like PyPI deployments are failing. `v0.15.1` and `v0.15.2` haven't gone out.\r\n\r\n```\r\nHTTPError: 400 Client Error: The description failed to render in the default format of reStructuredText. See https://pypi.org/help/#description-content-type for more information. for url: https://upload.pypi.org/legacy/\r\n```\nPyPI deployments are failing\nLooks like PyPI deployments are failing. `v0.15.1` and `v0.15.2` haven't gone out.\r\n\r\n```\r\nHTTPError: 400 Client Error: The description failed to render in the default format of reStructuredText. See https://pypi.org/help/#description-content-type for more information. for url: https://upload.pypi.org/legacy/\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = 'opsdroid'\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, 'README.md'), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',\n 'modules.*', 'docs', 'docs.*'])\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, 'requirements.txt'), 'r') as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license='Apache License 2.0',\n url='https://opsdroid.github.io/',\n download_url='https://github.com/opsdroid/opsdroid/releases',\n author='Jacob Tomlinson',\n author_email='[email protected]',\n description='An open source ChatOps bot framework.',\n long_description=README,\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Framework :: AsyncIO',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Communications :: Chat',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n install_requires=REQUIRES,\n test_suite='tests',\n keywords=[\n 'bot',\n 'bot-framework',\n 'opsdroid',\n 'botkit',\n 'python3',\n 'asyncio',\n 'chatops',\n 'devops',\n 'nlu'\n ],\n setup_requires=['Babel'],\n cmdclass=versioneer.get_cmdclass({'sdist': Sdist,\n 'build_py': BuildPy,\n 'develop': Develop}),\n entry_points={\n 'console_scripts': [\n 'opsdroid = opsdroid.__main__:main'\n ]\n },\n)\n", "path": "setup.py"}]}
1,667
89
gh_patches_debug_64778
rasdani/github-patches
git_diff
SCons__scons-3556
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX'] **Describe the bug** From git head, just now. File: src/engine/SCons/Tool/textfile.py Line 165: _text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX'] Line 174: _subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX'] Looks like a cur/paste/edit error. My guess, from the rest of the code, is that 'TEXTFILESUFFIX' should be 'SUBSTFILESUFFIX' on line 174 </issue> <code> [start of src/engine/SCons/Tool/textfile.py] 1 # -*- python -*- 2 # 3 # __COPYRIGHT__ 4 # 5 # Permission is hereby granted, free of charge, to any person obtaining 6 # a copy of this software and associated documentation files (the 7 # "Software"), to deal in the Software without restriction, including 8 # without limitation the rights to use, copy, modify, merge, publish, 9 # distribute, sublicense, and/or sell copies of the Software, and to 10 # permit persons to whom the Software is furnished to do so, subject to 11 # the following conditions: 12 # 13 # The above copyright notice and this permission notice shall be included 14 # in all copies or substantial portions of the Software. 15 # 16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 17 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 18 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 20 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 # 24 25 __doc__ = """ 26 Textfile/Substfile builder for SCons. 27 28 Create file 'target' which typically is a textfile. The 'source' 29 may be any combination of strings, Nodes, or lists of same. A 30 'linesep' will be put between any part written and defaults to 31 os.linesep. 32 33 The only difference between the Textfile builder and the Substfile 34 builder is that strings are converted to Value() nodes for the 35 former and File() nodes for the latter. To insert files in the 36 former or strings in the latter, wrap them in a File() or Value(), 37 respectively. 38 39 The values of SUBST_DICT first have any construction variables 40 expanded (its keys are not expanded). If a value of SUBST_DICT is 41 a python callable function, it is called and the result is expanded 42 as the value. Values are substituted in a "random" order; if any 43 substitution could be further expanded by another substitution, it 44 is unpredictable whether the expansion will occur. 45 """ 46 47 __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" 48 49 import SCons 50 51 import os 52 import re 53 54 from SCons.Node import Node 55 from SCons.Node.Python import Value 56 from SCons.Util import is_String, is_Sequence, is_Dict, to_bytes 57 58 59 TEXTFILE_FILE_WRITE_MODE = 'w' 60 61 LINESEP = '\n' 62 63 def _do_subst(node, subs): 64 """ 65 Fetch the node contents and replace all instances of the keys with 66 their values. For example, if subs is 67 {'%VERSION%': '1.2345', '%BASE%': 'MyProg', '%prefix%': '/bin'}, 68 then all instances of %VERSION% in the file will be replaced with 69 1.2345 and so forth. 70 """ 71 contents = node.get_text_contents() 72 if subs: 73 for (k, val) in subs: 74 contents = contents.replace(k, val) 75 76 if 'b' in TEXTFILE_FILE_WRITE_MODE: 77 try: 78 contents = bytearray(contents, 'utf-8') 79 except UnicodeDecodeError: 80 # contents is already utf-8 encoded python 2 str i.e. a byte array 81 contents = bytearray(contents) 82 83 return contents 84 85 86 def _action(target, source, env): 87 88 # prepare the line separator 89 linesep = env['LINESEPARATOR'] 90 if linesep is None: 91 linesep = LINESEP # os.linesep 92 elif is_String(linesep): 93 pass 94 elif isinstance(linesep, Value): 95 linesep = linesep.get_text_contents() 96 else: 97 raise SCons.Errors.UserError('unexpected type/class for LINESEPARATOR: %s' 98 % repr(linesep), None) 99 100 if 'b' in TEXTFILE_FILE_WRITE_MODE: 101 linesep = to_bytes(linesep) 102 103 # create a dictionary to use for the substitutions 104 if 'SUBST_DICT' not in env: 105 subs = None # no substitutions 106 else: 107 subst_dict = env['SUBST_DICT'] 108 if is_Dict(subst_dict): 109 subst_dict = list(subst_dict.items()) 110 elif is_Sequence(subst_dict): 111 pass 112 else: 113 raise SCons.Errors.UserError('SUBST_DICT must be dict or sequence') 114 subs = [] 115 for (k, value) in subst_dict: 116 if callable(value): 117 value = value() 118 if is_String(value): 119 value = env.subst(value) 120 else: 121 value = str(value) 122 subs.append((k, value)) 123 124 # write the file 125 try: 126 target_file = open(target[0].get_path(), TEXTFILE_FILE_WRITE_MODE, newline='') 127 except (OSError, IOError): 128 raise SCons.Errors.UserError("Can't write target file %s" % target[0]) 129 130 # separate lines by 'linesep' only if linesep is not empty 131 lsep = None 132 for line in source: 133 if lsep: 134 target_file.write(lsep) 135 136 target_file.write(_do_subst(line, subs)) 137 lsep = linesep 138 target_file.close() 139 140 141 def _strfunc(target, source, env): 142 return "Creating '%s'" % target[0] 143 144 145 def _convert_list_R(newlist, sources): 146 for elem in sources: 147 if is_Sequence(elem): 148 _convert_list_R(newlist, elem) 149 elif isinstance(elem, Node): 150 newlist.append(elem) 151 else: 152 newlist.append(Value(elem)) 153 154 155 def _convert_list(target, source, env): 156 if len(target) != 1: 157 raise SCons.Errors.UserError("Only one target file allowed") 158 newlist = [] 159 _convert_list_R(newlist, source) 160 return target, newlist 161 162 163 _common_varlist = ['SUBST_DICT', 'LINESEPARATOR'] 164 165 _text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX'] 166 _text_builder = SCons.Builder.Builder( 167 action=SCons.Action.Action(_action, _strfunc, varlist=_text_varlist), 168 source_factory=Value, 169 emitter=_convert_list, 170 prefix='$TEXTFILEPREFIX', 171 suffix='$TEXTFILESUFFIX', 172 ) 173 174 _subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX'] 175 _subst_builder = SCons.Builder.Builder( 176 action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist), 177 source_factory=SCons.Node.FS.File, 178 emitter=_convert_list, 179 prefix='$SUBSTFILEPREFIX', 180 suffix='$SUBSTFILESUFFIX', 181 src_suffix=['.in'], 182 ) 183 184 185 def generate(env): 186 env['LINESEPARATOR'] = LINESEP # os.linesep 187 env['BUILDERS']['Textfile'] = _text_builder 188 env['TEXTFILEPREFIX'] = '' 189 env['TEXTFILESUFFIX'] = '.txt' 190 env['BUILDERS']['Substfile'] = _subst_builder 191 env['SUBSTFILEPREFIX'] = '' 192 env['SUBSTFILESUFFIX'] = '' 193 194 195 def exists(env): 196 return 1 197 198 # Local Variables: 199 # tab-width:4 200 # indent-tabs-mode:nil 201 # End: 202 # vim: set expandtab tabstop=4 shiftwidth=4: 203 [end of src/engine/SCons/Tool/textfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/engine/SCons/Tool/textfile.py b/src/engine/SCons/Tool/textfile.py --- a/src/engine/SCons/Tool/textfile.py +++ b/src/engine/SCons/Tool/textfile.py @@ -171,7 +171,7 @@ suffix='$TEXTFILESUFFIX', ) -_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX'] +_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'SUBSTFILESUFFIX'] _subst_builder = SCons.Builder.Builder( action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist), source_factory=SCons.Node.FS.File,
{"golden_diff": "diff --git a/src/engine/SCons/Tool/textfile.py b/src/engine/SCons/Tool/textfile.py\n--- a/src/engine/SCons/Tool/textfile.py\n+++ b/src/engine/SCons/Tool/textfile.py\n@@ -171,7 +171,7 @@\n suffix='$TEXTFILESUFFIX',\n )\n \n-_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']\n+_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'SUBSTFILESUFFIX']\n _subst_builder = SCons.Builder.Builder(\n action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist),\n source_factory=SCons.Node.FS.File,\n", "issue": "['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']\n\r\n**Describe the bug**\r\nFrom git head, just now. \r\nFile: src/engine/SCons/Tool/textfile.py\r\nLine 165:\r\n_text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']\r\n\r\nLine 174:\r\n_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']\r\n\r\nLooks like a cur/paste/edit error. My guess, from the rest of the code, is that\r\n 'TEXTFILESUFFIX' should be 'SUBSTFILESUFFIX' on line 174\r\n\n", "before_files": [{"content": "# -*- python -*-\n#\n# __COPYRIGHT__\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\n__doc__ = \"\"\"\nTextfile/Substfile builder for SCons.\n\n Create file 'target' which typically is a textfile. The 'source'\n may be any combination of strings, Nodes, or lists of same. A\n 'linesep' will be put between any part written and defaults to\n os.linesep.\n\n The only difference between the Textfile builder and the Substfile\n builder is that strings are converted to Value() nodes for the\n former and File() nodes for the latter. To insert files in the\n former or strings in the latter, wrap them in a File() or Value(),\n respectively.\n\n The values of SUBST_DICT first have any construction variables\n expanded (its keys are not expanded). If a value of SUBST_DICT is\n a python callable function, it is called and the result is expanded\n as the value. Values are substituted in a \"random\" order; if any\n substitution could be further expanded by another substitution, it\n is unpredictable whether the expansion will occur.\n\"\"\"\n\n__revision__ = \"__FILE__ __REVISION__ __DATE__ __DEVELOPER__\"\n\nimport SCons\n\nimport os\nimport re\n\nfrom SCons.Node import Node\nfrom SCons.Node.Python import Value\nfrom SCons.Util import is_String, is_Sequence, is_Dict, to_bytes\n\n\nTEXTFILE_FILE_WRITE_MODE = 'w'\n\nLINESEP = '\\n'\n\ndef _do_subst(node, subs):\n \"\"\"\n Fetch the node contents and replace all instances of the keys with\n their values. For example, if subs is\n {'%VERSION%': '1.2345', '%BASE%': 'MyProg', '%prefix%': '/bin'},\n then all instances of %VERSION% in the file will be replaced with\n 1.2345 and so forth.\n \"\"\"\n contents = node.get_text_contents()\n if subs:\n for (k, val) in subs:\n contents = contents.replace(k, val)\n\n if 'b' in TEXTFILE_FILE_WRITE_MODE:\n try:\n contents = bytearray(contents, 'utf-8')\n except UnicodeDecodeError:\n # contents is already utf-8 encoded python 2 str i.e. a byte array\n contents = bytearray(contents)\n\n return contents\n\n\ndef _action(target, source, env):\n\n # prepare the line separator\n linesep = env['LINESEPARATOR']\n if linesep is None:\n linesep = LINESEP # os.linesep\n elif is_String(linesep):\n pass\n elif isinstance(linesep, Value):\n linesep = linesep.get_text_contents()\n else:\n raise SCons.Errors.UserError('unexpected type/class for LINESEPARATOR: %s'\n % repr(linesep), None)\n\n if 'b' in TEXTFILE_FILE_WRITE_MODE:\n linesep = to_bytes(linesep)\n\n # create a dictionary to use for the substitutions\n if 'SUBST_DICT' not in env:\n subs = None # no substitutions\n else:\n subst_dict = env['SUBST_DICT']\n if is_Dict(subst_dict):\n subst_dict = list(subst_dict.items())\n elif is_Sequence(subst_dict):\n pass\n else:\n raise SCons.Errors.UserError('SUBST_DICT must be dict or sequence')\n subs = []\n for (k, value) in subst_dict:\n if callable(value):\n value = value()\n if is_String(value):\n value = env.subst(value)\n else:\n value = str(value)\n subs.append((k, value))\n\n # write the file\n try:\n target_file = open(target[0].get_path(), TEXTFILE_FILE_WRITE_MODE, newline='')\n except (OSError, IOError):\n raise SCons.Errors.UserError(\"Can't write target file %s\" % target[0])\n\n # separate lines by 'linesep' only if linesep is not empty\n lsep = None\n for line in source:\n if lsep:\n target_file.write(lsep)\n\n target_file.write(_do_subst(line, subs))\n lsep = linesep\n target_file.close()\n\n\ndef _strfunc(target, source, env):\n return \"Creating '%s'\" % target[0]\n\n\ndef _convert_list_R(newlist, sources):\n for elem in sources:\n if is_Sequence(elem):\n _convert_list_R(newlist, elem)\n elif isinstance(elem, Node):\n newlist.append(elem)\n else:\n newlist.append(Value(elem))\n\n\ndef _convert_list(target, source, env):\n if len(target) != 1:\n raise SCons.Errors.UserError(\"Only one target file allowed\")\n newlist = []\n _convert_list_R(newlist, source)\n return target, newlist\n\n\n_common_varlist = ['SUBST_DICT', 'LINESEPARATOR']\n\n_text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']\n_text_builder = SCons.Builder.Builder(\n action=SCons.Action.Action(_action, _strfunc, varlist=_text_varlist),\n source_factory=Value,\n emitter=_convert_list,\n prefix='$TEXTFILEPREFIX',\n suffix='$TEXTFILESUFFIX',\n)\n\n_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']\n_subst_builder = SCons.Builder.Builder(\n action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist),\n source_factory=SCons.Node.FS.File,\n emitter=_convert_list,\n prefix='$SUBSTFILEPREFIX',\n suffix='$SUBSTFILESUFFIX',\n src_suffix=['.in'],\n)\n\n\ndef generate(env):\n env['LINESEPARATOR'] = LINESEP # os.linesep\n env['BUILDERS']['Textfile'] = _text_builder\n env['TEXTFILEPREFIX'] = ''\n env['TEXTFILESUFFIX'] = '.txt'\n env['BUILDERS']['Substfile'] = _subst_builder\n env['SUBSTFILEPREFIX'] = ''\n env['SUBSTFILESUFFIX'] = ''\n\n\ndef exists(env):\n return 1\n\n# Local Variables:\n# tab-width:4\n# indent-tabs-mode:nil\n# End:\n# vim: set expandtab tabstop=4 shiftwidth=4:\n", "path": "src/engine/SCons/Tool/textfile.py"}]}
2,785
153
gh_patches_debug_16005
rasdani/github-patches
git_diff
paperless-ngx__paperless-ngx-2566
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Added Last x Days not including documents added today ### Description The "Added" filter is not including documents added today when using any of the "Last x" filter options. I have a "Recently Added" view which does not include the most recent documents that were just processed. If I change the filter to a specific date, for example, `After: 1/1/2023`, then the documents are included in the view. If I change to "Last 7 days", "Last Month", "Last 3 Months" or "Last Year", then the most recent document in the view is one I scanned 3 days ago. ### Steps to reproduce 1) Add a new document to Paperless and let it complete processing 2) Open a document view 3) Set the sort to "Added" and descending (z-a) 4) Set the Added filter to "Last 7 days" - The document added does not show in the view 5) Set the Added filter to "After: 1/1/2023" - The document added shows in the view ### Webserver logs ```bash No server messages logged pertaining to the issue ``` ### Browser logs ```bash No browser messages logged pertaining to the issue ``` ### Paperless-ngx version 1.12.2 ### Host OS Debian 11 x86_64 ### Installation method Docker - official image ### Browser Chrome ### Configuration changes PAPERLESS_TIME_ZONE set to "America/Denver", Date order set to "MDY" ### Other _No response_ </issue> <code> [start of src/documents/index.py] 1 import logging 2 import math 3 import os 4 from contextlib import contextmanager 5 6 from dateutil.parser import isoparse 7 from django.conf import settings 8 from documents.models import Comment 9 from documents.models import Document 10 from whoosh import classify 11 from whoosh import highlight 12 from whoosh import query 13 from whoosh.fields import BOOLEAN 14 from whoosh.fields import DATETIME 15 from whoosh.fields import KEYWORD 16 from whoosh.fields import NUMERIC 17 from whoosh.fields import Schema 18 from whoosh.fields import TEXT 19 from whoosh.highlight import HtmlFormatter 20 from whoosh.index import create_in 21 from whoosh.index import exists_in 22 from whoosh.index import open_dir 23 from whoosh.qparser import MultifieldParser 24 from whoosh.qparser.dateparse import DateParserPlugin 25 from whoosh.searching import ResultsPage 26 from whoosh.searching import Searcher 27 from whoosh.writing import AsyncWriter 28 29 logger = logging.getLogger("paperless.index") 30 31 32 def get_schema(): 33 return Schema( 34 id=NUMERIC(stored=True, unique=True), 35 title=TEXT(sortable=True), 36 content=TEXT(), 37 asn=NUMERIC(sortable=True, signed=False), 38 correspondent=TEXT(sortable=True), 39 correspondent_id=NUMERIC(), 40 has_correspondent=BOOLEAN(), 41 tag=KEYWORD(commas=True, scorable=True, lowercase=True), 42 tag_id=KEYWORD(commas=True, scorable=True), 43 has_tag=BOOLEAN(), 44 type=TEXT(sortable=True), 45 type_id=NUMERIC(), 46 has_type=BOOLEAN(), 47 created=DATETIME(sortable=True), 48 modified=DATETIME(sortable=True), 49 added=DATETIME(sortable=True), 50 path=TEXT(sortable=True), 51 path_id=NUMERIC(), 52 has_path=BOOLEAN(), 53 comments=TEXT(), 54 ) 55 56 57 def open_index(recreate=False): 58 try: 59 if exists_in(settings.INDEX_DIR) and not recreate: 60 return open_dir(settings.INDEX_DIR, schema=get_schema()) 61 except Exception: 62 logger.exception("Error while opening the index, recreating.") 63 64 if not os.path.isdir(settings.INDEX_DIR): 65 os.makedirs(settings.INDEX_DIR, exist_ok=True) 66 return create_in(settings.INDEX_DIR, get_schema()) 67 68 69 @contextmanager 70 def open_index_writer(optimize=False): 71 writer = AsyncWriter(open_index()) 72 73 try: 74 yield writer 75 except Exception as e: 76 logger.exception(str(e)) 77 writer.cancel() 78 finally: 79 writer.commit(optimize=optimize) 80 81 82 @contextmanager 83 def open_index_searcher(): 84 searcher = open_index().searcher() 85 86 try: 87 yield searcher 88 finally: 89 searcher.close() 90 91 92 def update_document(writer, doc): 93 tags = ",".join([t.name for t in doc.tags.all()]) 94 tags_ids = ",".join([str(t.id) for t in doc.tags.all()]) 95 comments = ",".join([str(c.comment) for c in Comment.objects.filter(document=doc)]) 96 writer.update_document( 97 id=doc.pk, 98 title=doc.title, 99 content=doc.content, 100 correspondent=doc.correspondent.name if doc.correspondent else None, 101 correspondent_id=doc.correspondent.id if doc.correspondent else None, 102 has_correspondent=doc.correspondent is not None, 103 tag=tags if tags else None, 104 tag_id=tags_ids if tags_ids else None, 105 has_tag=len(tags) > 0, 106 type=doc.document_type.name if doc.document_type else None, 107 type_id=doc.document_type.id if doc.document_type else None, 108 has_type=doc.document_type is not None, 109 created=doc.created, 110 added=doc.added, 111 asn=doc.archive_serial_number, 112 modified=doc.modified, 113 path=doc.storage_path.name if doc.storage_path else None, 114 path_id=doc.storage_path.id if doc.storage_path else None, 115 has_path=doc.storage_path is not None, 116 comments=comments, 117 ) 118 119 120 def remove_document(writer, doc): 121 remove_document_by_id(writer, doc.pk) 122 123 124 def remove_document_by_id(writer, doc_id): 125 writer.delete_by_term("id", doc_id) 126 127 128 def add_or_update_document(document): 129 with open_index_writer() as writer: 130 update_document(writer, document) 131 132 133 def remove_document_from_index(document): 134 with open_index_writer() as writer: 135 remove_document(writer, document) 136 137 138 class DelayedQuery: 139 def _get_query(self): 140 raise NotImplementedError() 141 142 def _get_query_filter(self): 143 criterias = [] 144 for k, v in self.query_params.items(): 145 if k == "correspondent__id": 146 criterias.append(query.Term("correspondent_id", v)) 147 elif k == "tags__id__all": 148 for tag_id in v.split(","): 149 criterias.append(query.Term("tag_id", tag_id)) 150 elif k == "tags__id__none": 151 for tag_id in v.split(","): 152 criterias.append(query.Not(query.Term("tag_id", tag_id))) 153 elif k == "document_type__id": 154 criterias.append(query.Term("type_id", v)) 155 elif k == "correspondent__isnull": 156 criterias.append(query.Term("has_correspondent", v == "false")) 157 elif k == "is_tagged": 158 criterias.append(query.Term("has_tag", v == "true")) 159 elif k == "document_type__isnull": 160 criterias.append(query.Term("has_type", v == "false")) 161 elif k == "created__date__lt": 162 criterias.append( 163 query.DateRange("created", start=None, end=isoparse(v)), 164 ) 165 elif k == "created__date__gt": 166 criterias.append( 167 query.DateRange("created", start=isoparse(v), end=None), 168 ) 169 elif k == "added__date__gt": 170 criterias.append(query.DateRange("added", start=isoparse(v), end=None)) 171 elif k == "added__date__lt": 172 criterias.append(query.DateRange("added", start=None, end=isoparse(v))) 173 elif k == "storage_path__id": 174 criterias.append(query.Term("path_id", v)) 175 elif k == "storage_path__isnull": 176 criterias.append(query.Term("has_path", v == "false")) 177 178 if len(criterias) > 0: 179 return query.And(criterias) 180 else: 181 return None 182 183 def _get_query_sortedby(self): 184 if "ordering" not in self.query_params: 185 return None, False 186 187 field: str = self.query_params["ordering"] 188 189 sort_fields_map = { 190 "created": "created", 191 "modified": "modified", 192 "added": "added", 193 "title": "title", 194 "correspondent__name": "correspondent", 195 "document_type__name": "type", 196 "archive_serial_number": "asn", 197 } 198 199 if field.startswith("-"): 200 field = field[1:] 201 reverse = True 202 else: 203 reverse = False 204 205 if field not in sort_fields_map: 206 return None, False 207 else: 208 return sort_fields_map[field], reverse 209 210 def __init__(self, searcher: Searcher, query_params, page_size): 211 self.searcher = searcher 212 self.query_params = query_params 213 self.page_size = page_size 214 self.saved_results = dict() 215 self.first_score = None 216 217 def __len__(self): 218 page = self[0:1] 219 return len(page) 220 221 def __getitem__(self, item): 222 if item.start in self.saved_results: 223 return self.saved_results[item.start] 224 225 q, mask = self._get_query() 226 sortedby, reverse = self._get_query_sortedby() 227 228 page: ResultsPage = self.searcher.search_page( 229 q, 230 mask=mask, 231 filter=self._get_query_filter(), 232 pagenum=math.floor(item.start / self.page_size) + 1, 233 pagelen=self.page_size, 234 sortedby=sortedby, 235 reverse=reverse, 236 ) 237 page.results.fragmenter = highlight.ContextFragmenter(surround=50) 238 page.results.formatter = HtmlFormatter(tagname="span", between=" ... ") 239 240 if not self.first_score and len(page.results) > 0 and sortedby is None: 241 self.first_score = page.results[0].score 242 243 page.results.top_n = list( 244 map( 245 lambda hit: ( 246 (hit[0] / self.first_score) if self.first_score else None, 247 hit[1], 248 ), 249 page.results.top_n, 250 ), 251 ) 252 253 self.saved_results[item.start] = page 254 255 return page 256 257 258 class DelayedFullTextQuery(DelayedQuery): 259 def _get_query(self): 260 q_str = self.query_params["query"] 261 qp = MultifieldParser( 262 ["content", "title", "correspondent", "tag", "type", "comments"], 263 self.searcher.ixreader.schema, 264 ) 265 qp.add_plugin(DateParserPlugin()) 266 q = qp.parse(q_str) 267 268 corrected = self.searcher.correct_query(q, q_str) 269 if corrected.query != q: 270 corrected.query = corrected.string 271 272 return q, None 273 274 275 class DelayedMoreLikeThisQuery(DelayedQuery): 276 def _get_query(self): 277 more_like_doc_id = int(self.query_params["more_like_id"]) 278 content = Document.objects.get(id=more_like_doc_id).content 279 280 docnum = self.searcher.document_number(id=more_like_doc_id) 281 kts = self.searcher.key_terms_from_text( 282 "content", 283 content, 284 numterms=20, 285 model=classify.Bo1Model, 286 normalize=False, 287 ) 288 q = query.Or( 289 [query.Term("content", word, boost=weight) for word, weight in kts], 290 ) 291 mask = {docnum} 292 293 return q, mask 294 295 296 def autocomplete(ix, term, limit=10): 297 with ix.reader() as reader: 298 terms = [] 299 for (score, t) in reader.most_distinctive_terms( 300 "content", 301 number=limit, 302 prefix=term.lower(), 303 ): 304 terms.append(t) 305 return terms 306 [end of src/documents/index.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/documents/index.py b/src/documents/index.py --- a/src/documents/index.py +++ b/src/documents/index.py @@ -5,6 +5,7 @@ from dateutil.parser import isoparse from django.conf import settings +from django.utils import timezone from documents.models import Comment from documents.models import Document from whoosh import classify @@ -262,7 +263,7 @@ ["content", "title", "correspondent", "tag", "type", "comments"], self.searcher.ixreader.schema, ) - qp.add_plugin(DateParserPlugin()) + qp.add_plugin(DateParserPlugin(basedate=timezone.now())) q = qp.parse(q_str) corrected = self.searcher.correct_query(q, q_str)
{"golden_diff": "diff --git a/src/documents/index.py b/src/documents/index.py\n--- a/src/documents/index.py\n+++ b/src/documents/index.py\n@@ -5,6 +5,7 @@\n \n from dateutil.parser import isoparse\n from django.conf import settings\n+from django.utils import timezone\n from documents.models import Comment\n from documents.models import Document\n from whoosh import classify\n@@ -262,7 +263,7 @@\n [\"content\", \"title\", \"correspondent\", \"tag\", \"type\", \"comments\"],\n self.searcher.ixreader.schema,\n )\n- qp.add_plugin(DateParserPlugin())\n+ qp.add_plugin(DateParserPlugin(basedate=timezone.now()))\n q = qp.parse(q_str)\n \n corrected = self.searcher.correct_query(q, q_str)\n", "issue": "Added Last x Days not including documents added today\n### Description\n\nThe \"Added\" filter is not including documents added today when using any of the \"Last x\" filter options. I have a \"Recently Added\" view which does not include the most recent documents that were just processed. If I change the filter to a specific date, for example, `After: 1/1/2023`, then the documents are included in the view. If I change to \"Last 7 days\", \"Last Month\", \"Last 3 Months\" or \"Last Year\", then the most recent document in the view is one I scanned 3 days ago. \n\n### Steps to reproduce\n\n1) Add a new document to Paperless and let it complete processing\r\n2) Open a document view\r\n3) Set the sort to \"Added\" and descending (z-a)\r\n4) Set the Added filter to \"Last 7 days\"\r\n - The document added does not show in the view\r\n 5) Set the Added filter to \"After: 1/1/2023\"\r\n - The document added shows in the view\n\n### Webserver logs\n\n```bash\nNo server messages logged pertaining to the issue\n```\n\n\n### Browser logs\n\n```bash\nNo browser messages logged pertaining to the issue\n```\n\n\n### Paperless-ngx version\n\n1.12.2\n\n### Host OS\n\nDebian 11 x86_64\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nChrome\n\n### Configuration changes\n\nPAPERLESS_TIME_ZONE set to \"America/Denver\", Date order set to \"MDY\"\n\n### Other\n\n_No response_\n", "before_files": [{"content": "import logging\nimport math\nimport os\nfrom contextlib import contextmanager\n\nfrom dateutil.parser import isoparse\nfrom django.conf import settings\nfrom documents.models import Comment\nfrom documents.models import Document\nfrom whoosh import classify\nfrom whoosh import highlight\nfrom whoosh import query\nfrom whoosh.fields import BOOLEAN\nfrom whoosh.fields import DATETIME\nfrom whoosh.fields import KEYWORD\nfrom whoosh.fields import NUMERIC\nfrom whoosh.fields import Schema\nfrom whoosh.fields import TEXT\nfrom whoosh.highlight import HtmlFormatter\nfrom whoosh.index import create_in\nfrom whoosh.index import exists_in\nfrom whoosh.index import open_dir\nfrom whoosh.qparser import MultifieldParser\nfrom whoosh.qparser.dateparse import DateParserPlugin\nfrom whoosh.searching import ResultsPage\nfrom whoosh.searching import Searcher\nfrom whoosh.writing import AsyncWriter\n\nlogger = logging.getLogger(\"paperless.index\")\n\n\ndef get_schema():\n return Schema(\n id=NUMERIC(stored=True, unique=True),\n title=TEXT(sortable=True),\n content=TEXT(),\n asn=NUMERIC(sortable=True, signed=False),\n correspondent=TEXT(sortable=True),\n correspondent_id=NUMERIC(),\n has_correspondent=BOOLEAN(),\n tag=KEYWORD(commas=True, scorable=True, lowercase=True),\n tag_id=KEYWORD(commas=True, scorable=True),\n has_tag=BOOLEAN(),\n type=TEXT(sortable=True),\n type_id=NUMERIC(),\n has_type=BOOLEAN(),\n created=DATETIME(sortable=True),\n modified=DATETIME(sortable=True),\n added=DATETIME(sortable=True),\n path=TEXT(sortable=True),\n path_id=NUMERIC(),\n has_path=BOOLEAN(),\n comments=TEXT(),\n )\n\n\ndef open_index(recreate=False):\n try:\n if exists_in(settings.INDEX_DIR) and not recreate:\n return open_dir(settings.INDEX_DIR, schema=get_schema())\n except Exception:\n logger.exception(\"Error while opening the index, recreating.\")\n\n if not os.path.isdir(settings.INDEX_DIR):\n os.makedirs(settings.INDEX_DIR, exist_ok=True)\n return create_in(settings.INDEX_DIR, get_schema())\n\n\n@contextmanager\ndef open_index_writer(optimize=False):\n writer = AsyncWriter(open_index())\n\n try:\n yield writer\n except Exception as e:\n logger.exception(str(e))\n writer.cancel()\n finally:\n writer.commit(optimize=optimize)\n\n\n@contextmanager\ndef open_index_searcher():\n searcher = open_index().searcher()\n\n try:\n yield searcher\n finally:\n searcher.close()\n\n\ndef update_document(writer, doc):\n tags = \",\".join([t.name for t in doc.tags.all()])\n tags_ids = \",\".join([str(t.id) for t in doc.tags.all()])\n comments = \",\".join([str(c.comment) for c in Comment.objects.filter(document=doc)])\n writer.update_document(\n id=doc.pk,\n title=doc.title,\n content=doc.content,\n correspondent=doc.correspondent.name if doc.correspondent else None,\n correspondent_id=doc.correspondent.id if doc.correspondent else None,\n has_correspondent=doc.correspondent is not None,\n tag=tags if tags else None,\n tag_id=tags_ids if tags_ids else None,\n has_tag=len(tags) > 0,\n type=doc.document_type.name if doc.document_type else None,\n type_id=doc.document_type.id if doc.document_type else None,\n has_type=doc.document_type is not None,\n created=doc.created,\n added=doc.added,\n asn=doc.archive_serial_number,\n modified=doc.modified,\n path=doc.storage_path.name if doc.storage_path else None,\n path_id=doc.storage_path.id if doc.storage_path else None,\n has_path=doc.storage_path is not None,\n comments=comments,\n )\n\n\ndef remove_document(writer, doc):\n remove_document_by_id(writer, doc.pk)\n\n\ndef remove_document_by_id(writer, doc_id):\n writer.delete_by_term(\"id\", doc_id)\n\n\ndef add_or_update_document(document):\n with open_index_writer() as writer:\n update_document(writer, document)\n\n\ndef remove_document_from_index(document):\n with open_index_writer() as writer:\n remove_document(writer, document)\n\n\nclass DelayedQuery:\n def _get_query(self):\n raise NotImplementedError()\n\n def _get_query_filter(self):\n criterias = []\n for k, v in self.query_params.items():\n if k == \"correspondent__id\":\n criterias.append(query.Term(\"correspondent_id\", v))\n elif k == \"tags__id__all\":\n for tag_id in v.split(\",\"):\n criterias.append(query.Term(\"tag_id\", tag_id))\n elif k == \"tags__id__none\":\n for tag_id in v.split(\",\"):\n criterias.append(query.Not(query.Term(\"tag_id\", tag_id)))\n elif k == \"document_type__id\":\n criterias.append(query.Term(\"type_id\", v))\n elif k == \"correspondent__isnull\":\n criterias.append(query.Term(\"has_correspondent\", v == \"false\"))\n elif k == \"is_tagged\":\n criterias.append(query.Term(\"has_tag\", v == \"true\"))\n elif k == \"document_type__isnull\":\n criterias.append(query.Term(\"has_type\", v == \"false\"))\n elif k == \"created__date__lt\":\n criterias.append(\n query.DateRange(\"created\", start=None, end=isoparse(v)),\n )\n elif k == \"created__date__gt\":\n criterias.append(\n query.DateRange(\"created\", start=isoparse(v), end=None),\n )\n elif k == \"added__date__gt\":\n criterias.append(query.DateRange(\"added\", start=isoparse(v), end=None))\n elif k == \"added__date__lt\":\n criterias.append(query.DateRange(\"added\", start=None, end=isoparse(v)))\n elif k == \"storage_path__id\":\n criterias.append(query.Term(\"path_id\", v))\n elif k == \"storage_path__isnull\":\n criterias.append(query.Term(\"has_path\", v == \"false\"))\n\n if len(criterias) > 0:\n return query.And(criterias)\n else:\n return None\n\n def _get_query_sortedby(self):\n if \"ordering\" not in self.query_params:\n return None, False\n\n field: str = self.query_params[\"ordering\"]\n\n sort_fields_map = {\n \"created\": \"created\",\n \"modified\": \"modified\",\n \"added\": \"added\",\n \"title\": \"title\",\n \"correspondent__name\": \"correspondent\",\n \"document_type__name\": \"type\",\n \"archive_serial_number\": \"asn\",\n }\n\n if field.startswith(\"-\"):\n field = field[1:]\n reverse = True\n else:\n reverse = False\n\n if field not in sort_fields_map:\n return None, False\n else:\n return sort_fields_map[field], reverse\n\n def __init__(self, searcher: Searcher, query_params, page_size):\n self.searcher = searcher\n self.query_params = query_params\n self.page_size = page_size\n self.saved_results = dict()\n self.first_score = None\n\n def __len__(self):\n page = self[0:1]\n return len(page)\n\n def __getitem__(self, item):\n if item.start in self.saved_results:\n return self.saved_results[item.start]\n\n q, mask = self._get_query()\n sortedby, reverse = self._get_query_sortedby()\n\n page: ResultsPage = self.searcher.search_page(\n q,\n mask=mask,\n filter=self._get_query_filter(),\n pagenum=math.floor(item.start / self.page_size) + 1,\n pagelen=self.page_size,\n sortedby=sortedby,\n reverse=reverse,\n )\n page.results.fragmenter = highlight.ContextFragmenter(surround=50)\n page.results.formatter = HtmlFormatter(tagname=\"span\", between=\" ... \")\n\n if not self.first_score and len(page.results) > 0 and sortedby is None:\n self.first_score = page.results[0].score\n\n page.results.top_n = list(\n map(\n lambda hit: (\n (hit[0] / self.first_score) if self.first_score else None,\n hit[1],\n ),\n page.results.top_n,\n ),\n )\n\n self.saved_results[item.start] = page\n\n return page\n\n\nclass DelayedFullTextQuery(DelayedQuery):\n def _get_query(self):\n q_str = self.query_params[\"query\"]\n qp = MultifieldParser(\n [\"content\", \"title\", \"correspondent\", \"tag\", \"type\", \"comments\"],\n self.searcher.ixreader.schema,\n )\n qp.add_plugin(DateParserPlugin())\n q = qp.parse(q_str)\n\n corrected = self.searcher.correct_query(q, q_str)\n if corrected.query != q:\n corrected.query = corrected.string\n\n return q, None\n\n\nclass DelayedMoreLikeThisQuery(DelayedQuery):\n def _get_query(self):\n more_like_doc_id = int(self.query_params[\"more_like_id\"])\n content = Document.objects.get(id=more_like_doc_id).content\n\n docnum = self.searcher.document_number(id=more_like_doc_id)\n kts = self.searcher.key_terms_from_text(\n \"content\",\n content,\n numterms=20,\n model=classify.Bo1Model,\n normalize=False,\n )\n q = query.Or(\n [query.Term(\"content\", word, boost=weight) for word, weight in kts],\n )\n mask = {docnum}\n\n return q, mask\n\n\ndef autocomplete(ix, term, limit=10):\n with ix.reader() as reader:\n terms = []\n for (score, t) in reader.most_distinctive_terms(\n \"content\",\n number=limit,\n prefix=term.lower(),\n ):\n terms.append(t)\n return terms\n", "path": "src/documents/index.py"}]}
3,903
170
gh_patches_debug_27416
rasdani/github-patches
git_diff
biolab__orange3-text-380
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tweet Profiler source available? <!-- This is an issue template. Please fill in the relevant details in the sections below. --> ##### Text version 0.3.0 <!-- From menu _Options→Add-ons→Orange3-Text_ or code `orangecontrib.text.version.full_version` --> ##### Orange version <!-- From menu _Help→About→Version_ or code `Orange.version.full_version` --> 3.13 ##### Additional info (worksheets, data, screenshots, ...) Is the server code for the Tweet Profiler available somewhere? I just started working with the text addon recently and wanted to read how you implemented it and found out that it runs on a server. </issue> <code> [start of doc/conf.py] 1 #!/usr/bin/env python3 2 # -*- coding: utf-8 -*- 3 # 4 # Orange3 Text Mining documentation build configuration file, created by 5 # sphinx-quickstart on Fri May 8 15:18:26 2015. 6 # 7 # This file is execfile()d with the current directory set to its 8 # containing dir. 9 # 10 # Note that not all possible configuration values are present in this 11 # autogenerated file. 12 # 13 # All configuration values have a default; values that are commented out 14 # serve to show the default. 15 16 import sys 17 import os 18 19 # If extensions (or scripting to document with autodoc) are in another directory, 20 # add these directories to sys.path here. If the directory is relative to the 21 # documentation root, use os.path.abspath to make it absolute, like shown here. 22 sys.path.insert(0, os.path.abspath('..')) 23 24 # -- General configuration ------------------------------------------------ 25 26 # If your documentation needs a minimal Sphinx version, state it here. 27 #needs_sphinx = '1.0' 28 29 # Add any Sphinx extension module names here, as strings. They can be 30 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 31 # ones. 32 extensions = [ 33 'sphinx.ext.autodoc', 34 'sphinx.ext.doctest', 35 'sphinx.ext.todo', 36 'sphinx.ext.napoleon', 37 'sphinx.ext.viewcode', 38 'sphinx.ext.intersphinx', 39 ] 40 41 # Add any paths that contain templates here, relative to this directory. 42 # templates_path = ['_templates'] 43 44 # The suffix of source filenames. 45 source_suffix = '.rst' 46 47 # The encoding of source files. 48 source_encoding = 'utf-8' 49 50 # The master toctree document. 51 master_doc = 'index' 52 53 # General information about the project. 54 project = 'Orange3 Text Mining' 55 copyright = '2015, Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana' 56 57 # The version info for the project you're documenting, acts as replacement for 58 # |version| and |release|, also used in various other places throughout the 59 # built documents. 60 # 61 # The short X.Y version. 62 # version = '0.1' 63 # The full version, including alpha/beta/rc tags. 64 # release = '0.1.1' 65 66 # The language for content autogenerated by Sphinx. Refer to documentation 67 # for a list of supported languages. 68 #language = None 69 70 # There are two options for replacing |today|: either, you set today to some 71 # non-false value, then it is used: 72 #today = '' 73 # Else, today_fmt is used as the format for a strftime call. 74 #today_fmt = '%B %d, %Y' 75 76 # List of patterns, relative to source directory, that match files and 77 # directories to ignore when looking for source files. 78 exclude_patterns = ['_build'] 79 80 # The reST default role (used for this markup: `text`) to use for all 81 # documents. 82 #default_role = None 83 84 # If true, '()' will be appended to :func: etc. cross-reference text. 85 #add_function_parentheses = True 86 87 # If true, the current module name will be prepended to all description 88 # unit titles (such as .. function::). 89 #add_module_names = True 90 91 # If true, sectionauthor and moduleauthor directives will be shown in the 92 # output. They are ignored by default. 93 #show_authors = False 94 95 # The name of the Pygments (syntax highlighting) style to use. 96 pygments_style = 'sphinx' 97 98 # A list of ignored prefixes for module index sorting. 99 #modindex_common_prefix = [] 100 101 # If true, keep warnings as "system message" paragraphs in the built documents. 102 #keep_warnings = False 103 104 105 # -- Options for HTML output ---------------------------------------------- 106 107 # The theme to use for HTML and HTML Help pages. See the documentation for 108 # a list of builtin themes. 109 html_theme = 'nature' 110 111 # Theme options are theme-specific and customize the look and feel of a theme 112 # further. For a list of options available for each theme, see the 113 # documentation. 114 #html_theme_options = {} 115 116 # Add any paths that contain custom themes here, relative to this directory. 117 #html_theme_path = [] 118 119 # The name for this set of Sphinx documents. If None, it defaults to 120 # "<project> v<release> documentation". 121 #html_title = None 122 123 # A shorter title for the navigation bar. Default is the same as html_title. 124 #html_short_title = None 125 126 # The name of an image file (relative to this directory) to place at the top 127 # of the sidebar. 128 #html_logo = None 129 130 # The name of an image file (within the static path) to use as favicon of the 131 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 132 # pixels large. 133 #html_favicon = None 134 135 # Add any paths that contain custom static files (such as style sheets) here, 136 # relative to this directory. They are copied after the builtin static files, 137 # so a file named "default.css" will overwrite the builtin "default.css". 138 # html_static_path = ['_static'] 139 140 # Add any extra paths that contain custom files (such as robots.txt or 141 # .htaccess) here, relative to this directory. These files are copied 142 # directly to the root of the documentation. 143 #html_extra_path = [] 144 145 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 146 # using the given strftime format. 147 #html_last_updated_fmt = '%b %d, %Y' 148 149 # If true, SmartyPants will be used to convert quotes and dashes to 150 # typographically correct entities. 151 #html_use_smartypants = True 152 153 # Custom sidebar templates, maps document names to template names. 154 #html_sidebars = {} 155 156 # Additional templates that should be rendered to pages, maps page names to 157 # template names. 158 #html_additional_pages = {} 159 160 # If false, no module index is generated. 161 #html_domain_indices = True 162 163 # If false, no index is generated. 164 #html_use_index = True 165 166 # If true, the index is split into individual pages for each letter. 167 #html_split_index = False 168 169 # If true, links to the reST sources are added to the pages. 170 #html_show_sourcelink = True 171 172 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 173 #html_show_sphinx = True 174 175 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 176 #html_show_copyright = True 177 178 # If true, an OpenSearch description file will be output, and all pages will 179 # contain a <link> tag referring to it. The value of this option must be the 180 # base URL from which the finished HTML is served. 181 #html_use_opensearch = '' 182 183 # This is the file name suffix for HTML files (e.g. ".xhtml"). 184 #html_file_suffix = None 185 186 # Output file base name for HTML help builder. 187 htmlhelp_basename = 'Orange3TextMiningdoc' 188 189 190 # -- Options for LaTeX output --------------------------------------------- 191 192 latex_elements = { 193 # The paper size ('letterpaper' or 'a4paper'). 194 #'papersize': 'letterpaper', 195 196 # The font size ('10pt', '11pt' or '12pt'). 197 #'pointsize': '10pt', 198 199 # Additional stuff for the LaTeX preamble. 200 #'preamble': '', 201 } 202 203 # Grouping the document tree into LaTeX files. List of tuples 204 # (source start file, target name, title, 205 # author, documentclass [howto, manual, or own class]). 206 latex_documents = [ 207 ('index', 'Orange3TextMining.tex', 'Orange3 Text Mining Documentation', 208 'Biolab', 'manual'), 209 ] 210 211 # The name of an image file (relative to this directory) to place at the top of 212 # the title page. 213 #latex_logo = None 214 215 # For "manual" documents, if this is true, then toplevel headings are parts, 216 # not chapters. 217 #latex_use_parts = False 218 219 # If true, show page references after internal links. 220 #latex_show_pagerefs = False 221 222 # If true, show URL addresses after external links. 223 #latex_show_urls = False 224 225 # Documents to append as an appendix to all manuals. 226 #latex_appendices = [] 227 228 # If false, no module index is generated. 229 #latex_domain_indices = True 230 231 232 # -- Options for manual page output --------------------------------------- 233 234 # One entry per manual page. List of tuples 235 # (source start file, name, description, authors, manual section). 236 man_pages = [ 237 ('index', 'orange3textmining', 'Orange3 Text Mining Documentation', 238 ['Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'], 1) 239 ] 240 241 # If true, show URL addresses after external links. 242 #man_show_urls = False 243 244 245 # -- Options for Texinfo output ------------------------------------------- 246 247 # Grouping the document tree into Texinfo files. List of tuples 248 # (source start file, target name, title, author, 249 # dir menu entry, description, category) 250 texinfo_documents = [ 251 ('index', 'Orange3TextMining', 'Orange3 Text Mining Documentation', 252 'Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana', 'Orange3TextMining', 'One line description of project.', 253 'Miscellaneous'), 254 ] 255 256 # Documents to append as an appendix to all manuals. 257 #texinfo_appendices = [] 258 259 # If false, no module index is generated. 260 #texinfo_domain_indices = True 261 262 # How to display URL addresses: 'footnote', 'no', or 'inline'. 263 #texinfo_show_urls = 'footnote' 264 265 # If true, do not generate a @detailmenu in the "Top" node's menu. 266 #texinfo_no_detailmenu = False 267 268 # Mapping for external documentation in Sphinx. 269 #intersphinx_mapping = {'orange3': ('../../orange3/doc/visual-programming/build/html', 270 # '../../orange3/doc/visual-programming/build/html/objects.inv')} 271 [end of doc/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/doc/conf.py b/doc/conf.py --- a/doc/conf.py +++ b/doc/conf.py @@ -52,7 +52,7 @@ # General information about the project. project = 'Orange3 Text Mining' -copyright = '2015, Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana' +copyright = '2018, Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -106,7 +106,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'nature' +html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -268,3 +268,6 @@ # Mapping for external documentation in Sphinx. #intersphinx_mapping = {'orange3': ('../../orange3/doc/visual-programming/build/html', # '../../orange3/doc/visual-programming/build/html/objects.inv')} + +def setup(app): + app.add_stylesheet('style.css')
{"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -52,7 +52,7 @@\n \n # General information about the project.\n project = 'Orange3 Text Mining'\n-copyright = '2015, Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'\n+copyright = '2018, Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'\n \n # The version info for the project you're documenting, acts as replacement for\n # |version| and |release|, also used in various other places throughout the\n@@ -106,7 +106,7 @@\n \n # The theme to use for HTML and HTML Help pages. See the documentation for\n # a list of builtin themes.\n-html_theme = 'nature'\n+html_theme = 'alabaster'\n \n # Theme options are theme-specific and customize the look and feel of a theme\n # further. For a list of options available for each theme, see the\n@@ -268,3 +268,6 @@\n # Mapping for external documentation in Sphinx.\n #intersphinx_mapping = {'orange3': ('../../orange3/doc/visual-programming/build/html',\n # '../../orange3/doc/visual-programming/build/html/objects.inv')}\n+\n+def setup(app):\n+ app.add_stylesheet('style.css')\n", "issue": "Tweet Profiler source available?\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\n\r\n##### Text version\r\n\r\n0.3.0\r\n<!-- From menu _Options\u2192Add-ons\u2192Orange3-Text_ or code `orangecontrib.text.version.full_version` -->\r\n\r\n\r\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\n3.13\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\n\r\nIs the server code for the Tweet Profiler available somewhere? I just started working with the text addon recently and wanted to read how you implemented it and found out that it runs on a server.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Orange3 Text Mining documentation build configuration file, created by\n# sphinx-quickstart on Fri May 8 15:18:26 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\n# If extensions (or scripting to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.todo',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.intersphinx',\n]\n\n# Add any paths that contain templates here, relative to this directory.\n# templates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\nsource_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Orange3 Text Mining'\ncopyright = '2015, Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\n# version = '0.1'\n# The full version, including alpha/beta/rc tags.\n# release = '0.1.1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'nature'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Orange3TextMiningdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Orange3TextMining.tex', 'Orange3 Text Mining Documentation',\n 'Biolab', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'orange3textmining', 'Orange3 Text Mining Documentation',\n ['Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Orange3TextMining', 'Orange3 Text Mining Documentation',\n 'Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana', 'Orange3TextMining', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n# Mapping for external documentation in Sphinx.\n#intersphinx_mapping = {'orange3': ('../../orange3/doc/visual-programming/build/html',\n# '../../orange3/doc/visual-programming/build/html/objects.inv')}\n", "path": "doc/conf.py"}]}
3,586
304
gh_patches_debug_26907
rasdani/github-patches
git_diff
google__turbinia-696
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Generate version data from tags and commits Today we have a hard-coded value in `turbinia/__init__.py`, but it would be nice to generate the version number from the current TAG for releases and from the git commit id when there is no TAG (ie. when running from master or a different branch). </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright 2017 Google Inc. 5 # 6 # Licensed under the Apache License, Version 2.0 (the "License"); 7 # you may not use this file except in compliance with the License. 8 # You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, software 13 # distributed under the License is distributed on an "AS IS" BASIS, 14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 # See the License for the specific language governing permissions and 16 # limitations under the License. 17 """This is the setup file for the project.""" 18 19 # yapf: disable 20 21 from __future__ import unicode_literals 22 23 import sys 24 25 from setuptools import find_packages 26 from setuptools import setup 27 28 29 # make sure turbinia is in path 30 sys.path.insert(0, '.') 31 32 import turbinia # pylint: disable=wrong-import-position 33 34 turbinia_description = ( 35 'Turbinia is an open-source framework for deploying, managing, and running' 36 'forensic workloads on cloud platforms. It is intended to automate running ' 37 'of common forensic processing tools (i.e. Plaso, TSK, strings, etc) to ' 38 'help with processing evidence in the Cloud, scaling the processing of ' 39 'large amounts of evidence, and decreasing response time by parallelizing' 40 'processing where possible.') 41 42 requirements = [] 43 with open('requirements.txt','r') as f: 44 requirements = f.read().splitlines() 45 setup( 46 name='turbinia', 47 version=turbinia.__version__, 48 description='Automation and Scaling of Digital Forensics Tools', 49 long_description=turbinia_description, 50 license='Apache License, Version 2.0', 51 url='http://turbinia.plumbing/', 52 maintainer='Turbinia development team', 53 maintainer_email='[email protected]', 54 classifiers=[ 55 'Development Status :: 4 - Beta', 56 'Environment :: Console', 57 'Operating System :: OS Independent', 58 'Programming Language :: Python', 59 ], 60 packages=find_packages(), 61 include_package_data=True, 62 zip_safe=False, 63 entry_points={'console_scripts': ['turbiniactl=turbinia.turbiniactl:main']}, 64 install_requires=requirements, 65 extras_require={ 66 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'], 67 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'], 68 'worker': ['docker-explorer>=20191104', 'plaso>=20200430', 'pyhindsight>=20200607'] 69 } 70 ) 71 [end of setup.py] [start of turbinia/__init__.py] 1 # -*- coding: utf-8 -*- 2 # Copyright 2015 Google Inc. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 """Main Turbinia application.""" 16 17 __version__ = '20190819' 18 19 import logging 20 log = logging.getLogger('turbinia') 21 22 23 def log_and_report(message, trace): 24 """Log an error and if enabled, send to GCP Error Reporting API. 25 26 Args: 27 message(str): The user defined message to log. 28 trace(str): The error traceback message to log. 29 """ 30 from turbinia import config 31 32 log.error(message) 33 log.error(trace) 34 # If GCP Error Reporting is enabled. 35 config.LoadConfig() 36 if config.STACKDRIVER_TRACEBACK: 37 # Only load google_cloud if needed 38 from turbinia.lib import google_cloud 39 client = google_cloud.setup_stackdriver_traceback(config.TURBINIA_PROJECT) 40 client.report_exception() 41 42 43 class TurbiniaException(Exception): 44 """Turbinia Exception class.""" 45 pass 46 [end of turbinia/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -44,7 +44,6 @@ requirements = f.read().splitlines() setup( name='turbinia', - version=turbinia.__version__, description='Automation and Scaling of Digital Forensics Tools', long_description=turbinia_description, license='Apache License, Version 2.0', @@ -66,5 +65,7 @@ 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'], 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'], 'worker': ['docker-explorer>=20191104', 'plaso>=20200430', 'pyhindsight>=20200607'] - } + }, + use_scm_version=True, + setup_requires=['setuptools_scm'] ) diff --git a/turbinia/__init__.py b/turbinia/__init__.py --- a/turbinia/__init__.py +++ b/turbinia/__init__.py @@ -14,11 +14,15 @@ # limitations under the License. """Main Turbinia application.""" -__version__ = '20190819' - import logging log = logging.getLogger('turbinia') +from pkg_resources import get_distribution, DistributionNotFound +try: + __version__ = get_distribution(__name__).version +except DistributionNotFound: + __version__ = "unknown" + def log_and_report(message, trace): """Log an error and if enabled, send to GCP Error Reporting API.
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -44,7 +44,6 @@\n requirements = f.read().splitlines()\n setup(\n name='turbinia',\n- version=turbinia.__version__,\n description='Automation and Scaling of Digital Forensics Tools',\n long_description=turbinia_description,\n license='Apache License, Version 2.0',\n@@ -66,5 +65,7 @@\n 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],\n 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],\n 'worker': ['docker-explorer>=20191104', 'plaso>=20200430', 'pyhindsight>=20200607']\n- }\n+ },\n+ use_scm_version=True,\n+ setup_requires=['setuptools_scm']\n )\ndiff --git a/turbinia/__init__.py b/turbinia/__init__.py\n--- a/turbinia/__init__.py\n+++ b/turbinia/__init__.py\n@@ -14,11 +14,15 @@\n # limitations under the License.\n \"\"\"Main Turbinia application.\"\"\"\n \n-__version__ = '20190819'\n-\n import logging\n log = logging.getLogger('turbinia')\n \n+from pkg_resources import get_distribution, DistributionNotFound\n+try:\n+ __version__ = get_distribution(__name__).version\n+except DistributionNotFound:\n+ __version__ = \"unknown\"\n+\n \n def log_and_report(message, trace):\n \"\"\"Log an error and if enabled, send to GCP Error Reporting API.\n", "issue": "Generate version data from tags and commits\nToday we have a hard-coded value in `turbinia/__init__.py`, but it would be nice to generate the version number from the current TAG for releases and from the git commit id when there is no TAG (ie. when running from master or a different branch).\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This is the setup file for the project.\"\"\"\n\n# yapf: disable\n\nfrom __future__ import unicode_literals\n\nimport sys\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\n# make sure turbinia is in path\nsys.path.insert(0, '.')\n\nimport turbinia # pylint: disable=wrong-import-position\n\nturbinia_description = (\n 'Turbinia is an open-source framework for deploying, managing, and running'\n 'forensic workloads on cloud platforms. It is intended to automate running '\n 'of common forensic processing tools (i.e. Plaso, TSK, strings, etc) to '\n 'help with processing evidence in the Cloud, scaling the processing of '\n 'large amounts of evidence, and decreasing response time by parallelizing'\n 'processing where possible.')\n\nrequirements = []\nwith open('requirements.txt','r') as f:\n requirements = f.read().splitlines()\nsetup(\n name='turbinia',\n version=turbinia.__version__,\n description='Automation and Scaling of Digital Forensics Tools',\n long_description=turbinia_description,\n license='Apache License, Version 2.0',\n url='http://turbinia.plumbing/',\n maintainer='Turbinia development team',\n maintainer_email='[email protected]',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n ],\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n entry_points={'console_scripts': ['turbiniactl=turbinia.turbiniactl:main']},\n install_requires=requirements,\n extras_require={\n 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],\n 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],\n 'worker': ['docker-explorer>=20191104', 'plaso>=20200430', 'pyhindsight>=20200607']\n }\n)\n", "path": "setup.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Main Turbinia application.\"\"\"\n\n__version__ = '20190819'\n\nimport logging\nlog = logging.getLogger('turbinia')\n\n\ndef log_and_report(message, trace):\n \"\"\"Log an error and if enabled, send to GCP Error Reporting API.\n\n Args:\n message(str): The user defined message to log.\n trace(str): The error traceback message to log.\n \"\"\"\n from turbinia import config\n\n log.error(message)\n log.error(trace)\n # If GCP Error Reporting is enabled.\n config.LoadConfig()\n if config.STACKDRIVER_TRACEBACK:\n # Only load google_cloud if needed\n from turbinia.lib import google_cloud\n client = google_cloud.setup_stackdriver_traceback(config.TURBINIA_PROJECT)\n client.report_exception()\n\n\nclass TurbiniaException(Exception):\n \"\"\"Turbinia Exception class.\"\"\"\n pass\n", "path": "turbinia/__init__.py"}]}
1,789
400
gh_patches_debug_7116
rasdani/github-patches
git_diff
kivy__kivy-2814
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> codeinput.py example missing Pro.ttf I was trying the various kivy examples, and one of them failed, the `codeinput.py` file, with the following error: ``` sinclair@blastocyst $ kivy /Users/sinclair/kivy/examples/widgets/codeinput.py [INFO ] Kivy v1.8.0 [INFO ] [Logger ] Record log in /Users/sinclair/.kivy/logs/kivy_14-12-27_20.txt [INFO ] [Factory ] 157 symbols loaded [DEBUG ] [Cache ] register <kv.lang> with limit=None, timeout=Nones [DEBUG ] [Cache ] register <kv.image> with limit=None, timeout=60s [DEBUG ] [Cache ] register <kv.atlas> with limit=None, timeout=Nones [INFO ] [Image ] Providers: img_imageio, img_tex, img_dds, img_pygame, img_gif (img_pil ignored) [DEBUG ] [Cache ] register <kv.texture> with limit=1000, timeout=60s [DEBUG ] [Cache ] register <kv.shader> with limit=1000, timeout=3600s [INFO ] [Text ] Provider: pygame [DEBUG ] [Window ] Ignored <egl_rpi> (import error) [INFO ] [Window ] Provider: pygame(['window_egl_rpi'] ignored) [DEBUG ] [Window ] Display driver Quartz [DEBUG ] [Window ] Actual window size: 800x600 [DEBUG ] [Window ] Actual color bits r8 g8 b8 a8 [DEBUG ] [Window ] Actual depth bits: 24 [DEBUG ] [Window ] Actual stencil bits: 8 [DEBUG ] [Window ] Actual multisampling samples: 4 [INFO ] [GL ] OpenGL version <2.1 INTEL-10.0.86> [INFO ] [GL ] OpenGL vendor <Intel Inc.> [INFO ] [GL ] OpenGL renderer <Intel HD Graphics 5000 OpenGL Engine> [INFO ] [GL ] OpenGL parsed version: 2, 1 [INFO ] [GL ] Shading version <1.20> [INFO ] [GL ] Texture max size <16384> [INFO ] [GL ] Texture max units <16> [DEBUG ] [Shader ] Fragment compiled successfully [DEBUG ] [Shader ] Vertex compiled successfully [DEBUG ] [ImageImageIO] Load </Applications/Kivy.app/Contents/Resources/kivy/kivy/data/glsl/default.png> [INFO ] [GL ] BGRA texture support is available [INFO ] [Window ] virtual keyboard not allowed, single mode, not docked [DEBUG ] [Cache ] register <kv.loader> with limit=500, timeout=60s [INFO ] [Loader ] using a thread pool of 2 workers [DEBUG ] [Cache ] register <textinput.label> with limit=None, timeout=60.0s [DEBUG ] [Cache ] register <textinput.width> with limit=None, timeout=60.0s [DEBUG ] [App ] Loading kv </Users/sinclair/kivy/examples/widgets/codeinputtest.kv> Traceback (most recent call last): File "/Users/sinclair/kivy/examples/widgets/codeinput.py", line 179, in <module> CodeInputTest().run() File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/app.py", line 766, in run root = self.build() File "/Users/sinclair/kivy/examples/widgets/codeinput.py", line 111, in build values=sorted(map(str, fonts.get_fonts()))) File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/spinner.py", line 116, in __init__ self._build_dropdown() File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/spinner.py", line 130, in _build_dropdown self._update_dropdown() File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/spinner.py", line 139, in _update_dropdown item = cls(text=value) File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/behaviors.py", line 72, in __init__ super(ButtonBehavior, self).__init__(**kwargs) File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/label.py", line 141, in __init__ self._create_label() File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/label.py", line 161, in _create_label self._label = CoreLabel(**dkw) File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/core/text/__init__.py", line 136, in __init__ self.resolve_font_name() File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/core/text/__init__.py", line 196, in resolve_font_name raise IOError('Label: File %r not found' % fontname) IOError: Label: File u'/Library/Fonts/ Pro.ttf' not found ``` I am using OS X 10.10.1 I tried adding a font file called "Pro.ttf" at that location to make it happy, but the result was the same. </issue> <code> [start of examples/widgets/codeinput.py] 1 from kivy.app import App 2 from kivy.extras.highlight import KivyLexer 3 from kivy.uix.spinner import Spinner, SpinnerOption 4 from kivy.uix.boxlayout import BoxLayout 5 from kivy.uix.codeinput import CodeInput 6 from kivy.uix.popup import Popup 7 from kivy.properties import ListProperty 8 from kivy.core.window import Window 9 from pygments import lexers 10 from pygame import font as fonts 11 import codecs, os 12 13 example_text = ''' 14 ---------------------Python---------------------------------- 15 import kivy 16 kivy.require('1.0.6') # replace with your current kivy version ! 17 from kivy.app import App 18 from kivy.uix.button import Button 19 20 class MyApp(App): 21 def build(self): 22 return Button(text='Hello World') 23 24 if __name__ == '__main__': 25 MyApp().run() 26 ----------------------Java----------------------------------- 27 28 public static byte toUnsignedByte(int intVal) { 29 byte byteVal; 30 return (byte)(intVal & 0xFF); 31 } 32 ---------------------kv lang--------------------------------- 33 #:kivy 1.0 34 35 <YourWidget>: 36 canvas: 37 Color: 38 rgb: .5, .5, .5 39 Rectangle: 40 pos: self.pos 41 size: self.size 42 ---------------------HTML------------------------------------ 43 <!-- Place this tag where you want the +1 button to render. --> 44 <div class="g-plusone" data-annotation="inline" data-width="300"></div> 45 46 <!-- Place this tag after the last +1 button tag. --> 47 <script type="text/javascript"> 48 (function() { 49 var po = document.createElement('script'); 50 po.type = 'text/javascript'; 51 po.async = true; 52 po.src = 'https://apis.google.com/js/plusone.js'; 53 var s = document.getElementsByTagName('script')[0]; 54 s.parentNode.insertBefore(po, s); 55 })(); 56 </script> 57 ''' 58 59 60 class Fnt_SpinnerOption(SpinnerOption): 61 pass 62 63 64 class LoadDialog(Popup): 65 66 def load(self, path, selection): 67 self.choosen_file = [None, ] 68 self.choosen_file = selection 69 Window.title = selection[0][selection[0].rfind(os.sep)+1:] 70 self.dismiss() 71 72 def cancel(self): 73 self.dismiss() 74 75 76 class SaveDialog(Popup): 77 78 def save(self, path, selection): 79 _file = codecs.open(selection, 'w', encoding='utf8') 80 _file.write(self.text) 81 Window.title = selection[selection.rfind(os.sep)+1:] 82 _file.close() 83 self.dismiss() 84 85 def cancel(self): 86 self.dismiss() 87 88 89 class CodeInputTest(App): 90 91 files = ListProperty([None, ]) 92 93 def build(self): 94 b = BoxLayout(orientation='vertical') 95 languages = Spinner( 96 text='language', 97 values=sorted(['KvLexer', ] + list(lexers.LEXERS.keys()))) 98 99 languages.bind(text=self.change_lang) 100 101 menu = BoxLayout( 102 size_hint_y=None, 103 height='30pt') 104 fnt_size = Spinner( 105 text='12', 106 values=list(map(str, list(range(5, 40))))) 107 fnt_size.bind(text=self._update_size) 108 fnt_name = Spinner( 109 text='DroidSansMono', 110 option_cls=Fnt_SpinnerOption, 111 values=sorted(map(str, fonts.get_fonts()))) 112 fnt_name.bind(text=self._update_font) 113 mnu_file = Spinner( 114 text='File', 115 values=('Open', 'SaveAs', 'Save', 'Close')) 116 mnu_file.bind(text=self._file_menu_selected) 117 118 menu.add_widget(mnu_file) 119 menu.add_widget(fnt_size) 120 menu.add_widget(fnt_name) 121 menu.add_widget(languages) 122 b.add_widget(menu) 123 124 self.codeinput = CodeInput( 125 lexer=KivyLexer(), 126 font_name='data/fonts/DroidSansMono.ttf', font_size=12, 127 text=example_text) 128 129 b.add_widget(self.codeinput) 130 131 return b 132 133 def _update_size(self, instance, size): 134 self.codeinput.font_size = float(size) 135 136 def _update_font(self, instance, fnt_name): 137 instance.font_name = self.codeinput.font_name =\ 138 fonts.match_font(fnt_name) 139 140 def _file_menu_selected(self, instance, value): 141 if value == 'File': 142 return 143 instance.text = 'File' 144 if value == 'Open': 145 if not hasattr(self, 'load_dialog'): 146 self.load_dialog = LoadDialog() 147 self.load_dialog.open() 148 self.load_dialog.bind(choosen_file=self.setter('files')) 149 elif value == 'SaveAs': 150 if not hasattr(self, 'saveas_dialog'): 151 self.saveas_dialog = SaveDialog() 152 self.saveas_dialog.text = self.codeinput.text 153 self.saveas_dialog.open() 154 elif value == 'Save': 155 if self.files[0]: 156 _file = codecs.open(self.files[0], 'w', encoding='utf8') 157 _file.write(self.codeinput.text) 158 _file.close() 159 elif value == 'Close': 160 if self.files[0]: 161 self.codeinput.text = '' 162 Window.title = 'untitled' 163 164 def on_files(self, instance, values): 165 if not values[0]: 166 return 167 _file = codecs.open(values[0], 'r', encoding='utf8') 168 self.codeinput.text = _file.read() 169 _file.close() 170 171 def change_lang(self, instance, l): 172 if l == 'KvLexer': 173 lx = KivyLexer() 174 else: 175 lx = lexers.get_lexer_by_name(lexers.LEXERS[l][2][0]) 176 self.codeinput.lexer = lx 177 178 if __name__ == '__main__': 179 CodeInputTest().run() 180 [end of examples/widgets/codeinput.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/widgets/codeinput.py b/examples/widgets/codeinput.py --- a/examples/widgets/codeinput.py +++ b/examples/widgets/codeinput.py @@ -134,8 +134,9 @@ self.codeinput.font_size = float(size) def _update_font(self, instance, fnt_name): - instance.font_name = self.codeinput.font_name =\ - fonts.match_font(fnt_name) + font_name = fonts.match_font(fnt_name) + if os.path.exists(font_name): + instance.font_name = self.codeinput.font_name = font_name def _file_menu_selected(self, instance, value): if value == 'File':
{"golden_diff": "diff --git a/examples/widgets/codeinput.py b/examples/widgets/codeinput.py\n--- a/examples/widgets/codeinput.py\n+++ b/examples/widgets/codeinput.py\n@@ -134,8 +134,9 @@\n self.codeinput.font_size = float(size)\n \n def _update_font(self, instance, fnt_name):\n- instance.font_name = self.codeinput.font_name =\\\n- fonts.match_font(fnt_name)\n+ font_name = fonts.match_font(fnt_name)\n+ if os.path.exists(font_name):\n+ instance.font_name = self.codeinput.font_name = font_name\n \n def _file_menu_selected(self, instance, value):\n if value == 'File':\n", "issue": "codeinput.py example missing Pro.ttf\nI was trying the various kivy examples, and one of them failed, the `codeinput.py` file, with the following error:\n\n```\nsinclair@blastocyst $ kivy /Users/sinclair/kivy/examples/widgets/codeinput.py\n[INFO ] Kivy v1.8.0\n[INFO ] [Logger ] Record log in /Users/sinclair/.kivy/logs/kivy_14-12-27_20.txt\n[INFO ] [Factory ] 157 symbols loaded\n[DEBUG ] [Cache ] register <kv.lang> with limit=None, timeout=Nones\n[DEBUG ] [Cache ] register <kv.image> with limit=None, timeout=60s\n[DEBUG ] [Cache ] register <kv.atlas> with limit=None, timeout=Nones\n[INFO ] [Image ] Providers: img_imageio, img_tex, img_dds, img_pygame, img_gif (img_pil ignored)\n[DEBUG ] [Cache ] register <kv.texture> with limit=1000, timeout=60s\n[DEBUG ] [Cache ] register <kv.shader> with limit=1000, timeout=3600s\n[INFO ] [Text ] Provider: pygame\n[DEBUG ] [Window ] Ignored <egl_rpi> (import error)\n[INFO ] [Window ] Provider: pygame(['window_egl_rpi'] ignored)\n[DEBUG ] [Window ] Display driver Quartz\n[DEBUG ] [Window ] Actual window size: 800x600\n[DEBUG ] [Window ] Actual color bits r8 g8 b8 a8\n[DEBUG ] [Window ] Actual depth bits: 24\n[DEBUG ] [Window ] Actual stencil bits: 8\n[DEBUG ] [Window ] Actual multisampling samples: 4\n[INFO ] [GL ] OpenGL version <2.1 INTEL-10.0.86>\n[INFO ] [GL ] OpenGL vendor <Intel Inc.>\n[INFO ] [GL ] OpenGL renderer <Intel HD Graphics 5000 OpenGL Engine>\n[INFO ] [GL ] OpenGL parsed version: 2, 1\n[INFO ] [GL ] Shading version <1.20>\n[INFO ] [GL ] Texture max size <16384>\n[INFO ] [GL ] Texture max units <16>\n[DEBUG ] [Shader ] Fragment compiled successfully\n[DEBUG ] [Shader ] Vertex compiled successfully\n[DEBUG ] [ImageImageIO] Load </Applications/Kivy.app/Contents/Resources/kivy/kivy/data/glsl/default.png>\n[INFO ] [GL ] BGRA texture support is available\n[INFO ] [Window ] virtual keyboard not allowed, single mode, not docked\n[DEBUG ] [Cache ] register <kv.loader> with limit=500, timeout=60s\n[INFO ] [Loader ] using a thread pool of 2 workers\n[DEBUG ] [Cache ] register <textinput.label> with limit=None, timeout=60.0s\n[DEBUG ] [Cache ] register <textinput.width> with limit=None, timeout=60.0s\n[DEBUG ] [App ] Loading kv </Users/sinclair/kivy/examples/widgets/codeinputtest.kv>\n Traceback (most recent call last):\n File \"/Users/sinclair/kivy/examples/widgets/codeinput.py\", line 179, in <module>\n CodeInputTest().run()\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/app.py\", line 766, in run\n root = self.build()\n File \"/Users/sinclair/kivy/examples/widgets/codeinput.py\", line 111, in build\n values=sorted(map(str, fonts.get_fonts())))\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/spinner.py\", line 116, in __init__\n self._build_dropdown()\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/spinner.py\", line 130, in _build_dropdown\n self._update_dropdown()\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/spinner.py\", line 139, in _update_dropdown\n item = cls(text=value)\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/behaviors.py\", line 72, in __init__\n super(ButtonBehavior, self).__init__(**kwargs)\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/label.py\", line 141, in __init__\n self._create_label()\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/label.py\", line 161, in _create_label\n self._label = CoreLabel(**dkw)\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/core/text/__init__.py\", line 136, in __init__\n self.resolve_font_name()\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/core/text/__init__.py\", line 196, in resolve_font_name\n raise IOError('Label: File %r not found' % fontname)\n IOError: Label: File u'/Library/Fonts/ Pro.ttf' not found\n```\n\nI am using OS X 10.10.1\n\nI tried adding a font file called \"Pro.ttf\" at that location to make it happy, but the result was the same.\n\n", "before_files": [{"content": "from kivy.app import App\nfrom kivy.extras.highlight import KivyLexer\nfrom kivy.uix.spinner import Spinner, SpinnerOption\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.codeinput import CodeInput\nfrom kivy.uix.popup import Popup\nfrom kivy.properties import ListProperty\nfrom kivy.core.window import Window\nfrom pygments import lexers\nfrom pygame import font as fonts\nimport codecs, os\n\nexample_text = '''\n---------------------Python----------------------------------\nimport kivy\nkivy.require('1.0.6') # replace with your current kivy version !\nfrom kivy.app import App\nfrom kivy.uix.button import Button\n\nclass MyApp(App):\n def build(self):\n return Button(text='Hello World')\n\nif __name__ == '__main__':\n MyApp().run()\n----------------------Java-----------------------------------\n\npublic static byte toUnsignedByte(int intVal) {\n byte byteVal;\n return (byte)(intVal & 0xFF);\n}\n---------------------kv lang---------------------------------\n#:kivy 1.0\n\n<YourWidget>:\n canvas:\n Color:\n rgb: .5, .5, .5\n Rectangle:\n pos: self.pos\n size: self.size\n---------------------HTML------------------------------------\n<!-- Place this tag where you want the +1 button to render. -->\n<div class=\"g-plusone\" data-annotation=\"inline\" data-width=\"300\"></div>\n\n<!-- Place this tag after the last +1 button tag. -->\n<script type=\"text/javascript\">\n (function() {\n var po = document.createElement('script');\n po.type = 'text/javascript';\n po.async = true;\n po.src = 'https://apis.google.com/js/plusone.js';\n var s = document.getElementsByTagName('script')[0];\n s.parentNode.insertBefore(po, s);\n })();\n</script>\n'''\n\n\nclass Fnt_SpinnerOption(SpinnerOption):\n pass\n\n\nclass LoadDialog(Popup):\n\n def load(self, path, selection):\n self.choosen_file = [None, ]\n self.choosen_file = selection\n Window.title = selection[0][selection[0].rfind(os.sep)+1:]\n self.dismiss()\n\n def cancel(self):\n self.dismiss()\n\n\nclass SaveDialog(Popup):\n\n def save(self, path, selection):\n _file = codecs.open(selection, 'w', encoding='utf8')\n _file.write(self.text)\n Window.title = selection[selection.rfind(os.sep)+1:]\n _file.close()\n self.dismiss()\n\n def cancel(self):\n self.dismiss()\n\n\nclass CodeInputTest(App):\n\n files = ListProperty([None, ])\n\n def build(self):\n b = BoxLayout(orientation='vertical')\n languages = Spinner(\n text='language',\n values=sorted(['KvLexer', ] + list(lexers.LEXERS.keys())))\n\n languages.bind(text=self.change_lang)\n\n menu = BoxLayout(\n size_hint_y=None,\n height='30pt')\n fnt_size = Spinner(\n text='12',\n values=list(map(str, list(range(5, 40)))))\n fnt_size.bind(text=self._update_size)\n fnt_name = Spinner(\n text='DroidSansMono',\n option_cls=Fnt_SpinnerOption,\n values=sorted(map(str, fonts.get_fonts())))\n fnt_name.bind(text=self._update_font)\n mnu_file = Spinner(\n text='File',\n values=('Open', 'SaveAs', 'Save', 'Close'))\n mnu_file.bind(text=self._file_menu_selected)\n\n menu.add_widget(mnu_file)\n menu.add_widget(fnt_size)\n menu.add_widget(fnt_name)\n menu.add_widget(languages)\n b.add_widget(menu)\n\n self.codeinput = CodeInput(\n lexer=KivyLexer(),\n font_name='data/fonts/DroidSansMono.ttf', font_size=12,\n text=example_text)\n\n b.add_widget(self.codeinput)\n\n return b\n\n def _update_size(self, instance, size):\n self.codeinput.font_size = float(size)\n\n def _update_font(self, instance, fnt_name):\n instance.font_name = self.codeinput.font_name =\\\n fonts.match_font(fnt_name)\n\n def _file_menu_selected(self, instance, value):\n if value == 'File':\n return\n instance.text = 'File'\n if value == 'Open':\n if not hasattr(self, 'load_dialog'):\n self.load_dialog = LoadDialog()\n self.load_dialog.open()\n self.load_dialog.bind(choosen_file=self.setter('files'))\n elif value == 'SaveAs':\n if not hasattr(self, 'saveas_dialog'):\n self.saveas_dialog = SaveDialog()\n self.saveas_dialog.text = self.codeinput.text\n self.saveas_dialog.open()\n elif value == 'Save':\n if self.files[0]:\n _file = codecs.open(self.files[0], 'w', encoding='utf8')\n _file.write(self.codeinput.text)\n _file.close()\n elif value == 'Close':\n if self.files[0]:\n self.codeinput.text = ''\n Window.title = 'untitled'\n\n def on_files(self, instance, values):\n if not values[0]:\n return\n _file = codecs.open(values[0], 'r', encoding='utf8')\n self.codeinput.text = _file.read()\n _file.close()\n\n def change_lang(self, instance, l):\n if l == 'KvLexer':\n lx = KivyLexer()\n else:\n lx = lexers.get_lexer_by_name(lexers.LEXERS[l][2][0])\n self.codeinput.lexer = lx\n\nif __name__ == '__main__':\n CodeInputTest().run()\n", "path": "examples/widgets/codeinput.py"}]}
3,473
147
gh_patches_debug_2116
rasdani/github-patches
git_diff
comic__grand-challenge.org-3383
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Viewer configuration does not show linking options **Describe the bug** The view and edit pages for viewer configurations no longer show options to set the linking configuration. **To Reproduce** Steps to reproduce the behavior: 1. Go to https://grand-challenge.org/viewer-configurations/demo-rse/ 2. Scroll down to 'Linking Configuration' The options displayed are duplicates of the 'Plugin and Tools' section. **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Desktop (please complete the following information):** - OS: [e.g. iOS] - Browser [e.g. chrome, safari] - Version [e.g. 22] **Smartphone (please complete the following information):** - Device: [e.g. iPhone6] - OS: [e.g. iOS8.1] - Browser [e.g. stock browser, safari] - Version [e.g. 22] **Additional context** Add any other context about the problem here. </issue> <code> [start of app/grandchallenge/workstation_configs/forms.py] 1 from crispy_forms.helper import FormHelper 2 from crispy_forms.layout import Fieldset, Layout, Submit 3 from django.forms import ModelForm 4 from django_select2.forms import Select2MultipleWidget 5 6 from grandchallenge.core.forms import SaveFormInitMixin 7 from grandchallenge.core.widgets import ColorEditorWidget, JSONEditorWidget 8 from grandchallenge.workstation_configs.models import ( 9 KEY_BINDINGS_SCHEMA, 10 OVERLAY_SEGMENTS_SCHEMA, 11 WorkstationConfig, 12 ) 13 14 GENERAL_FIELDS = ( 15 "title", 16 "description", 17 "image_context", 18 "window_presets", 19 "default_window_preset", 20 "default_slab_thickness_mm", 21 "default_slab_render_method", 22 "default_orientation", 23 "default_image_interpolation", 24 "default_limit_view_area_to_image_volume", 25 "default_overlay_alpha", 26 "ghosting_slice_depth", 27 "overlay_luts", 28 "default_overlay_lut", 29 "default_overlay_interpolation", 30 "overlay_segments", 31 "key_bindings", 32 "default_zoom_scale", 33 "default_brush_size", 34 "default_annotation_color", 35 "default_annotation_line_width", 36 "auto_jump_center_of_gravity", 37 "point_bounding_box_size_mm", 38 ) 39 PLUGIN_FIELDS = ( 40 "show_image_info_plugin", 41 "show_display_plugin", 42 "show_image_switcher_plugin", 43 "show_algorithm_output_plugin", 44 "show_overlay_plugin", 45 "show_annotation_statistics_plugin", 46 "show_swivel_tool", 47 "show_invert_tool", 48 "show_flip_tool", 49 "show_window_level_tool", 50 "show_reset_tool", 51 "show_overlay_selection_tool", 52 "show_lut_selection_tool", 53 "show_annotation_counter_tool", 54 "enable_contrast_enhancement", 55 ) 56 LINKED_FIELDS = ( 57 "link_images", 58 "link_panning", 59 "link_zooming", 60 "link_slicing", 61 "link_orienting", 62 "link_windowing", 63 "link_inverting", 64 "link_flipping", 65 ) 66 67 68 class WorkstationConfigForm(SaveFormInitMixin, ModelForm): 69 def __init__(self, *args, read_only=False, **kwargs): 70 super().__init__(*args, **kwargs) 71 72 self.helper = FormHelper(self) 73 self.helper.layout = Layout( 74 Fieldset("", *GENERAL_FIELDS), 75 Fieldset( 76 "Plugins and Tools", 77 *PLUGIN_FIELDS, 78 css_class="border rounded px-2 my-4", 79 ), 80 Fieldset( 81 "Linking Configuration", 82 *PLUGIN_FIELDS, 83 css_class="border rounded px-2 my-4", 84 ), 85 ) 86 87 if read_only: 88 for field in self.fields: 89 self.fields[field].disabled = True 90 else: 91 self.helper.layout.append(Submit("save", "Save")) 92 93 class Meta: 94 model = WorkstationConfig 95 fields = ( 96 *GENERAL_FIELDS, 97 *PLUGIN_FIELDS, 98 *LINKED_FIELDS, 99 ) 100 101 widgets = { 102 "overlay_segments": JSONEditorWidget( 103 schema=OVERLAY_SEGMENTS_SCHEMA 104 ), 105 "key_bindings": JSONEditorWidget(schema=KEY_BINDINGS_SCHEMA), 106 "default_annotation_color": ColorEditorWidget(format="hex"), 107 "window_presets": Select2MultipleWidget, 108 "overlay_luts": Select2MultipleWidget, 109 } 110 help_texts = { 111 "overlay_segments": ( 112 model._meta.get_field("overlay_segments").help_text 113 + ". If an categorical overlay is shown, it is possible to show toggles " 114 "to change the visibility of the different overlay categories. To do " 115 "so, configure the categories that should be displayed. Data from the" 116 " algorithm's output.json can be added as an extra label to each " 117 "toggle using jinja templating. " 118 'For example: [{ "voxel_value": 0, "name": "Level 0", "visible": ' 119 'false, "metric_template": "{{metrics.volumes[0]}} mm³"},]' 120 ), 121 "key_bindings": model._meta.get_field("key_bindings").help_text 122 + ". A copy and paste JSON can be obtained from the viewer.", 123 } 124 [end of app/grandchallenge/workstation_configs/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/grandchallenge/workstation_configs/forms.py b/app/grandchallenge/workstation_configs/forms.py --- a/app/grandchallenge/workstation_configs/forms.py +++ b/app/grandchallenge/workstation_configs/forms.py @@ -79,7 +79,7 @@ ), Fieldset( "Linking Configuration", - *PLUGIN_FIELDS, + *LINKED_FIELDS, css_class="border rounded px-2 my-4", ), )
{"golden_diff": "diff --git a/app/grandchallenge/workstation_configs/forms.py b/app/grandchallenge/workstation_configs/forms.py\n--- a/app/grandchallenge/workstation_configs/forms.py\n+++ b/app/grandchallenge/workstation_configs/forms.py\n@@ -79,7 +79,7 @@\n ),\n Fieldset(\n \"Linking Configuration\",\n- *PLUGIN_FIELDS,\n+ *LINKED_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n )\n", "issue": "Viewer configuration does not show linking options\n**Describe the bug**\r\nThe view and edit pages for viewer configurations no longer show options to set the linking configuration.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to https://grand-challenge.org/viewer-configurations/demo-rse/\r\n2. Scroll down to 'Linking Configuration'\r\nThe options displayed are duplicates of the 'Plugin and Tools' section.\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS]\r\n - Browser [e.g. chrome, safari]\r\n - Version [e.g. 22]\r\n\r\n**Smartphone (please complete the following information):**\r\n - Device: [e.g. iPhone6]\r\n - OS: [e.g. iOS8.1]\r\n - Browser [e.g. stock browser, safari]\r\n - Version [e.g. 22]\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "from crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Fieldset, Layout, Submit\nfrom django.forms import ModelForm\nfrom django_select2.forms import Select2MultipleWidget\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.core.widgets import ColorEditorWidget, JSONEditorWidget\nfrom grandchallenge.workstation_configs.models import (\n KEY_BINDINGS_SCHEMA,\n OVERLAY_SEGMENTS_SCHEMA,\n WorkstationConfig,\n)\n\nGENERAL_FIELDS = (\n \"title\",\n \"description\",\n \"image_context\",\n \"window_presets\",\n \"default_window_preset\",\n \"default_slab_thickness_mm\",\n \"default_slab_render_method\",\n \"default_orientation\",\n \"default_image_interpolation\",\n \"default_limit_view_area_to_image_volume\",\n \"default_overlay_alpha\",\n \"ghosting_slice_depth\",\n \"overlay_luts\",\n \"default_overlay_lut\",\n \"default_overlay_interpolation\",\n \"overlay_segments\",\n \"key_bindings\",\n \"default_zoom_scale\",\n \"default_brush_size\",\n \"default_annotation_color\",\n \"default_annotation_line_width\",\n \"auto_jump_center_of_gravity\",\n \"point_bounding_box_size_mm\",\n)\nPLUGIN_FIELDS = (\n \"show_image_info_plugin\",\n \"show_display_plugin\",\n \"show_image_switcher_plugin\",\n \"show_algorithm_output_plugin\",\n \"show_overlay_plugin\",\n \"show_annotation_statistics_plugin\",\n \"show_swivel_tool\",\n \"show_invert_tool\",\n \"show_flip_tool\",\n \"show_window_level_tool\",\n \"show_reset_tool\",\n \"show_overlay_selection_tool\",\n \"show_lut_selection_tool\",\n \"show_annotation_counter_tool\",\n \"enable_contrast_enhancement\",\n)\nLINKED_FIELDS = (\n \"link_images\",\n \"link_panning\",\n \"link_zooming\",\n \"link_slicing\",\n \"link_orienting\",\n \"link_windowing\",\n \"link_inverting\",\n \"link_flipping\",\n)\n\n\nclass WorkstationConfigForm(SaveFormInitMixin, ModelForm):\n def __init__(self, *args, read_only=False, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper(self)\n self.helper.layout = Layout(\n Fieldset(\"\", *GENERAL_FIELDS),\n Fieldset(\n \"Plugins and Tools\",\n *PLUGIN_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n Fieldset(\n \"Linking Configuration\",\n *PLUGIN_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n )\n\n if read_only:\n for field in self.fields:\n self.fields[field].disabled = True\n else:\n self.helper.layout.append(Submit(\"save\", \"Save\"))\n\n class Meta:\n model = WorkstationConfig\n fields = (\n *GENERAL_FIELDS,\n *PLUGIN_FIELDS,\n *LINKED_FIELDS,\n )\n\n widgets = {\n \"overlay_segments\": JSONEditorWidget(\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n \"key_bindings\": JSONEditorWidget(schema=KEY_BINDINGS_SCHEMA),\n \"default_annotation_color\": ColorEditorWidget(format=\"hex\"),\n \"window_presets\": Select2MultipleWidget,\n \"overlay_luts\": Select2MultipleWidget,\n }\n help_texts = {\n \"overlay_segments\": (\n model._meta.get_field(\"overlay_segments\").help_text\n + \". If an categorical overlay is shown, it is possible to show toggles \"\n \"to change the visibility of the different overlay categories. To do \"\n \"so, configure the categories that should be displayed. Data from the\"\n \" algorithm's output.json can be added as an extra label to each \"\n \"toggle using jinja templating. \"\n 'For example: [{ \"voxel_value\": 0, \"name\": \"Level 0\", \"visible\": '\n 'false, \"metric_template\": \"{{metrics.volumes[0]}} mm\u00b3\"},]'\n ),\n \"key_bindings\": model._meta.get_field(\"key_bindings\").help_text\n + \". A copy and paste JSON can be obtained from the viewer.\",\n }\n", "path": "app/grandchallenge/workstation_configs/forms.py"}]}
1,901
102
gh_patches_debug_6050
rasdani/github-patches
git_diff
learningequality__kolibri-3563
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Very long, CPU-intensive process after asking to import selected Khan Academy resources ### Observed behavior Need more info on this, but this is the best I can produce right now.. observe the 566m of CPU usage: ``` top - 01:39:58 up 2 days, 11:30, 1 user, load average: 2.10, 2.17, 2.17 Tasks: 287 total, 1 running, 286 sleeping, 0 stopped, 0 zombie %Cpu(s): 45.6 us, 12.9 sy, 0.0 ni, 41.5 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st KiB Mem : 8054696 total, 904492 free, 5504828 used, 1645376 buff/cache KiB Swap: 16381948 total, 14482384 free, 1899564 used. 1499508 avail Mem PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 15123 kolibri 20 0 3474360 1.112g 10440 S 201.0 14.5 566:35.82 kolibri ``` ### Steps to reproduce Not sure, but I have asked to download a select sub-part of the KA resources, and my laptop is stuck for hours. ### Context Kolibri 0.9.0, debian installation source, Ubuntu 16.04 </issue> <code> [start of kolibri/content/management/commands/importcontent.py] 1 import logging as logger 2 import os 3 4 from django.conf import settings 5 from django.core.management.base import CommandError 6 from requests.exceptions import HTTPError 7 8 from ...utils import annotation 9 from ...utils import import_export_content 10 from ...utils import paths 11 from ...utils import transfer 12 from kolibri.tasks.management.commands.base import AsyncCommand 13 14 # constants to specify the transfer method to be used 15 DOWNLOAD_METHOD = "download" 16 COPY_METHOD = "copy" 17 18 logging = logger.getLogger(__name__) 19 20 21 class Command(AsyncCommand): 22 23 def add_arguments(self, parser): 24 # let's save the parser in case we need to print a help statement 25 self._parser = parser 26 27 # we want two groups of arguments. One group is when the 28 # 'importcontent disk' command is given, where we'll expect a file 29 # directory to be given. Another is the 'importcontent network' 30 # command to be given, where we'll expect a channel. 31 32 # However, some optional arguments apply to both groups. Add them here! 33 node_ids_help_text = """ 34 Specify one or more node IDs to import. Only the files associated to those node IDs will be imported. 35 36 e.g. 37 38 kolibri manage importcontent --node_ids <id1>,<id2>, [<ids>,...] {network, disk} <channel id> 39 """ 40 parser.add_argument( 41 "--node_ids", "-n", 42 # Split the comma separated string we get, into a list of strings 43 type=lambda x: x.split(","), 44 default=[], 45 required=False, 46 dest="node_ids", 47 help=node_ids_help_text, 48 ) 49 50 exclude_node_ids_help_text = """ 51 Specify one or more node IDs to exclude. Files associated to those node IDs will be not be imported. 52 53 e.g. 54 55 kolibri manage importcontent --exclude_node_ids <id1>,<id2>, [<ids>,...] {network, disk} <channel id> 56 """ 57 parser.add_argument( 58 "--exclude_node_ids", 59 # Split the comma separated string we get, into a list of string 60 type=lambda x: x.split(","), 61 default=[], 62 required=False, 63 dest="exclude_node_ids", 64 help=exclude_node_ids_help_text 65 ) 66 67 parser.add_argument( 68 "--include-unrenderable-content", 69 action='store_false', 70 default=True, 71 dest="renderable_only", 72 help="Import all content, not just that which this Kolibri instance can render" 73 ) 74 75 # to implement these two groups of commands and their corresponding 76 # arguments, we'll need argparse.subparsers. 77 subparsers = parser.add_subparsers(dest='command', help="The following subcommands are available.") 78 79 # the network command has a channel id required positional argument, 80 # and some optional content_id arguments. 81 82 # TODO: implement a --content-domain parameter, for optionally 83 # specifying the domain for the curation server. 84 85 # Note: cmd should be the management command instance, as though the 86 # interface for adding arguments is argparse, Django overrides the 87 # parser object with its own thing, hence why we need to add cmd. See 88 # http://stackoverflow.com/questions/36706220/is-it-possible-to-create-subparsers-in-a-django-management-command 89 network_subparser = subparsers.add_parser( 90 name='network', 91 cmd=self, 92 help="Download the given channel through the network.", 93 ) 94 network_subparser.add_argument('channel_id', type=str) 95 96 default_studio_url = settings.CENTRAL_CONTENT_DOWNLOAD_BASE_URL 97 network_subparser.add_argument( 98 "--baseurl", 99 type=str, 100 default=default_studio_url, 101 dest="baseurl", 102 ) 103 104 disk_subparser = subparsers.add_parser( 105 name='disk', 106 cmd=self, 107 help='Copy the content from the given folder.' 108 ) 109 disk_subparser.add_argument('channel_id', type=str) 110 disk_subparser.add_argument('directory', type=str) 111 112 def download_content(self, channel_id, node_ids=None, exclude_node_ids=None, baseurl=None, renderable_only=True): 113 self._transfer(DOWNLOAD_METHOD, channel_id, node_ids=node_ids, exclude_node_ids=exclude_node_ids, baseurl=baseurl, renderable_only=renderable_only) 114 115 def copy_content(self, channel_id, path, node_ids=None, exclude_node_ids=None, renderable_only=True): 116 self._transfer(COPY_METHOD, channel_id, path=path, node_ids=node_ids, exclude_node_ids=exclude_node_ids, renderable_only=renderable_only) 117 118 def _transfer(self, method, channel_id, path=None, node_ids=None, exclude_node_ids=None, baseurl=None, renderable_only=True): # noqa: max-complexity=16 119 120 files_to_download, total_bytes_to_transfer = import_export_content.get_files_to_transfer( 121 channel_id, node_ids, exclude_node_ids, False, renderable_only=renderable_only) 122 123 number_of_skipped_files = 0 124 file_checksums_to_annotate = [] 125 126 with self.start_progress(total=total_bytes_to_transfer) as overall_progress_update: 127 128 for f in files_to_download: 129 130 if self.is_cancelled(): 131 break 132 133 filename = f.get_filename() 134 dest = paths.get_content_storage_file_path(filename) 135 136 # if the file already exists, add its size to our overall progress, and skip 137 if os.path.isfile(dest) and os.path.getsize(dest) == f.file_size: 138 overall_progress_update(f.file_size) 139 file_checksums_to_annotate.append(f.id) 140 continue 141 142 # determine where we're downloading/copying from, and create appropriate transfer object 143 if method == DOWNLOAD_METHOD: 144 url = paths.get_content_storage_remote_url(filename, baseurl=baseurl) 145 filetransfer = transfer.FileDownload(url, dest) 146 elif method == COPY_METHOD: 147 srcpath = paths.get_content_storage_file_path(filename, datafolder=path) 148 filetransfer = transfer.FileCopy(srcpath, dest) 149 150 try: 151 152 with filetransfer: 153 154 with self.start_progress(total=filetransfer.total_size) as file_dl_progress_update: 155 156 for chunk in filetransfer: 157 if self.is_cancelled(): 158 filetransfer.cancel() 159 break 160 length = len(chunk) 161 overall_progress_update(length) 162 file_dl_progress_update(length) 163 164 file_checksums_to_annotate.append(f.id) 165 166 except HTTPError: 167 overall_progress_update(f.file_size) 168 169 except OSError: 170 number_of_skipped_files += 1 171 overall_progress_update(f.file_size) 172 173 annotation.set_availability(channel_id, file_checksums_to_annotate) 174 175 if number_of_skipped_files > 0: 176 logging.warning( 177 "{} files are skipped, because they are not found in the given external drive.".format( 178 number_of_skipped_files)) 179 180 if self.is_cancelled(): 181 self.cancel() 182 183 def handle_async(self, *args, **options): 184 if options['command'] == 'network': 185 self.download_content(options["channel_id"], 186 node_ids=options["node_ids"], 187 exclude_node_ids=options['exclude_node_ids'], 188 baseurl=options["baseurl"], 189 renderable_only=options["renderable_only"]) 190 elif options['command'] == 'disk': 191 self.copy_content(options["channel_id"], 192 options["directory"], 193 node_ids=options["node_ids"], 194 exclude_node_ids=options["exclude_node_ids"], 195 renderable_only=options["renderable_only"]) 196 else: 197 self._parser.print_help() 198 raise CommandError("Please give a valid subcommand. You gave: {}".format(options["command"])) 199 [end of kolibri/content/management/commands/importcontent.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kolibri/content/management/commands/importcontent.py b/kolibri/content/management/commands/importcontent.py --- a/kolibri/content/management/commands/importcontent.py +++ b/kolibri/content/management/commands/importcontent.py @@ -67,7 +67,7 @@ parser.add_argument( "--include-unrenderable-content", action='store_false', - default=True, + default=False, dest="renderable_only", help="Import all content, not just that which this Kolibri instance can render" )
{"golden_diff": "diff --git a/kolibri/content/management/commands/importcontent.py b/kolibri/content/management/commands/importcontent.py\n--- a/kolibri/content/management/commands/importcontent.py\n+++ b/kolibri/content/management/commands/importcontent.py\n@@ -67,7 +67,7 @@\n parser.add_argument(\n \"--include-unrenderable-content\",\n action='store_false',\n- default=True,\n+ default=False,\n dest=\"renderable_only\",\n help=\"Import all content, not just that which this Kolibri instance can render\"\n )\n", "issue": "Very long, CPU-intensive process after asking to import selected Khan Academy resources\n### Observed behavior\r\n\r\nNeed more info on this, but this is the best I can produce right now.. observe the 566m of CPU usage:\r\n\r\n```\r\ntop - 01:39:58 up 2 days, 11:30, 1 user, load average: 2.10, 2.17, 2.17\r\nTasks: 287 total, 1 running, 286 sleeping, 0 stopped, 0 zombie\r\n%Cpu(s): 45.6 us, 12.9 sy, 0.0 ni, 41.5 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st\r\nKiB Mem : 8054696 total, 904492 free, 5504828 used, 1645376 buff/cache\r\nKiB Swap: 16381948 total, 14482384 free, 1899564 used. 1499508 avail Mem \r\n\r\n PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND \r\n15123 kolibri 20 0 3474360 1.112g 10440 S 201.0 14.5 566:35.82 kolibri \r\n```\r\n\r\n### Steps to reproduce\r\n\r\nNot sure, but I have asked to download a select sub-part of the KA resources, and my laptop is stuck for hours.\r\n\r\n\r\n### Context\r\n\r\nKolibri 0.9.0, debian installation source, Ubuntu 16.04\r\n\n", "before_files": [{"content": "import logging as logger\nimport os\n\nfrom django.conf import settings\nfrom django.core.management.base import CommandError\nfrom requests.exceptions import HTTPError\n\nfrom ...utils import annotation\nfrom ...utils import import_export_content\nfrom ...utils import paths\nfrom ...utils import transfer\nfrom kolibri.tasks.management.commands.base import AsyncCommand\n\n# constants to specify the transfer method to be used\nDOWNLOAD_METHOD = \"download\"\nCOPY_METHOD = \"copy\"\n\nlogging = logger.getLogger(__name__)\n\n\nclass Command(AsyncCommand):\n\n def add_arguments(self, parser):\n # let's save the parser in case we need to print a help statement\n self._parser = parser\n\n # we want two groups of arguments. One group is when the\n # 'importcontent disk' command is given, where we'll expect a file\n # directory to be given. Another is the 'importcontent network'\n # command to be given, where we'll expect a channel.\n\n # However, some optional arguments apply to both groups. Add them here!\n node_ids_help_text = \"\"\"\n Specify one or more node IDs to import. Only the files associated to those node IDs will be imported.\n\n e.g.\n\n kolibri manage importcontent --node_ids <id1>,<id2>, [<ids>,...] {network, disk} <channel id>\n \"\"\"\n parser.add_argument(\n \"--node_ids\", \"-n\",\n # Split the comma separated string we get, into a list of strings\n type=lambda x: x.split(\",\"),\n default=[],\n required=False,\n dest=\"node_ids\",\n help=node_ids_help_text,\n )\n\n exclude_node_ids_help_text = \"\"\"\n Specify one or more node IDs to exclude. Files associated to those node IDs will be not be imported.\n\n e.g.\n\n kolibri manage importcontent --exclude_node_ids <id1>,<id2>, [<ids>,...] {network, disk} <channel id>\n \"\"\"\n parser.add_argument(\n \"--exclude_node_ids\",\n # Split the comma separated string we get, into a list of string\n type=lambda x: x.split(\",\"),\n default=[],\n required=False,\n dest=\"exclude_node_ids\",\n help=exclude_node_ids_help_text\n )\n\n parser.add_argument(\n \"--include-unrenderable-content\",\n action='store_false',\n default=True,\n dest=\"renderable_only\",\n help=\"Import all content, not just that which this Kolibri instance can render\"\n )\n\n # to implement these two groups of commands and their corresponding\n # arguments, we'll need argparse.subparsers.\n subparsers = parser.add_subparsers(dest='command', help=\"The following subcommands are available.\")\n\n # the network command has a channel id required positional argument,\n # and some optional content_id arguments.\n\n # TODO: implement a --content-domain parameter, for optionally\n # specifying the domain for the curation server.\n\n # Note: cmd should be the management command instance, as though the\n # interface for adding arguments is argparse, Django overrides the\n # parser object with its own thing, hence why we need to add cmd. See\n # http://stackoverflow.com/questions/36706220/is-it-possible-to-create-subparsers-in-a-django-management-command\n network_subparser = subparsers.add_parser(\n name='network',\n cmd=self,\n help=\"Download the given channel through the network.\",\n )\n network_subparser.add_argument('channel_id', type=str)\n\n default_studio_url = settings.CENTRAL_CONTENT_DOWNLOAD_BASE_URL\n network_subparser.add_argument(\n \"--baseurl\",\n type=str,\n default=default_studio_url,\n dest=\"baseurl\",\n )\n\n disk_subparser = subparsers.add_parser(\n name='disk',\n cmd=self,\n help='Copy the content from the given folder.'\n )\n disk_subparser.add_argument('channel_id', type=str)\n disk_subparser.add_argument('directory', type=str)\n\n def download_content(self, channel_id, node_ids=None, exclude_node_ids=None, baseurl=None, renderable_only=True):\n self._transfer(DOWNLOAD_METHOD, channel_id, node_ids=node_ids, exclude_node_ids=exclude_node_ids, baseurl=baseurl, renderable_only=renderable_only)\n\n def copy_content(self, channel_id, path, node_ids=None, exclude_node_ids=None, renderable_only=True):\n self._transfer(COPY_METHOD, channel_id, path=path, node_ids=node_ids, exclude_node_ids=exclude_node_ids, renderable_only=renderable_only)\n\n def _transfer(self, method, channel_id, path=None, node_ids=None, exclude_node_ids=None, baseurl=None, renderable_only=True): # noqa: max-complexity=16\n\n files_to_download, total_bytes_to_transfer = import_export_content.get_files_to_transfer(\n channel_id, node_ids, exclude_node_ids, False, renderable_only=renderable_only)\n\n number_of_skipped_files = 0\n file_checksums_to_annotate = []\n\n with self.start_progress(total=total_bytes_to_transfer) as overall_progress_update:\n\n for f in files_to_download:\n\n if self.is_cancelled():\n break\n\n filename = f.get_filename()\n dest = paths.get_content_storage_file_path(filename)\n\n # if the file already exists, add its size to our overall progress, and skip\n if os.path.isfile(dest) and os.path.getsize(dest) == f.file_size:\n overall_progress_update(f.file_size)\n file_checksums_to_annotate.append(f.id)\n continue\n\n # determine where we're downloading/copying from, and create appropriate transfer object\n if method == DOWNLOAD_METHOD:\n url = paths.get_content_storage_remote_url(filename, baseurl=baseurl)\n filetransfer = transfer.FileDownload(url, dest)\n elif method == COPY_METHOD:\n srcpath = paths.get_content_storage_file_path(filename, datafolder=path)\n filetransfer = transfer.FileCopy(srcpath, dest)\n\n try:\n\n with filetransfer:\n\n with self.start_progress(total=filetransfer.total_size) as file_dl_progress_update:\n\n for chunk in filetransfer:\n if self.is_cancelled():\n filetransfer.cancel()\n break\n length = len(chunk)\n overall_progress_update(length)\n file_dl_progress_update(length)\n\n file_checksums_to_annotate.append(f.id)\n\n except HTTPError:\n overall_progress_update(f.file_size)\n\n except OSError:\n number_of_skipped_files += 1\n overall_progress_update(f.file_size)\n\n annotation.set_availability(channel_id, file_checksums_to_annotate)\n\n if number_of_skipped_files > 0:\n logging.warning(\n \"{} files are skipped, because they are not found in the given external drive.\".format(\n number_of_skipped_files))\n\n if self.is_cancelled():\n self.cancel()\n\n def handle_async(self, *args, **options):\n if options['command'] == 'network':\n self.download_content(options[\"channel_id\"],\n node_ids=options[\"node_ids\"],\n exclude_node_ids=options['exclude_node_ids'],\n baseurl=options[\"baseurl\"],\n renderable_only=options[\"renderable_only\"])\n elif options['command'] == 'disk':\n self.copy_content(options[\"channel_id\"],\n options[\"directory\"],\n node_ids=options[\"node_ids\"],\n exclude_node_ids=options[\"exclude_node_ids\"],\n renderable_only=options[\"renderable_only\"])\n else:\n self._parser.print_help()\n raise CommandError(\"Please give a valid subcommand. You gave: {}\".format(options[\"command\"]))\n", "path": "kolibri/content/management/commands/importcontent.py"}]}
3,106
124
gh_patches_debug_36194
rasdani/github-patches
git_diff
rlworkgroup__garage-625
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> FireReset is firing warnings in the CI from recent tests runs ```sh UserWarning: WARN: <class 'garage.envs.wrappers.fire_reset.FireReset'> doesn't implement 'step' method, which is required for wrappers derived directly from Wrapper. Deprecated default implementation is used. ``` </issue> <code> [start of garage/envs/wrappers/noop.py] 1 """Noop wrapper for gym.Env.""" 2 import gym 3 import numpy as np 4 5 6 class Noop(gym.Wrapper): 7 """ 8 Noop wrapper for gym.Env. 9 10 It samples initial states by taking random number of no-ops on reset. 11 No-op is assumed to be action 0. 12 13 Args: 14 env: The environment to be wrapped. 15 noop_max: Maximum number no-op to be performed on reset. 16 """ 17 18 def __init__(self, env, noop_max=30): 19 super().__init__(env) 20 self._noop_max = noop_max 21 self._noop_action = 0 22 assert noop_max > 0, "noop_max should be larger than 0!" 23 assert env.unwrapped.get_action_meanings()[0] == 'NOOP', ( 24 "No-op should be the 0-th action but it's not in {}!".format(env)) 25 26 def reset(self, **kwargs): 27 """gym.Env reset function.""" 28 obs = self.env.reset(**kwargs) 29 noops = np.random.randint(1, self._noop_max + 1) 30 for _ in range(noops): 31 obs, _, done, _ = self.env.step(self._noop_action) 32 if done: 33 obs = self.env.reset(**kwargs) 34 return obs 35 [end of garage/envs/wrappers/noop.py] [start of garage/envs/wrappers/fire_reset.py] 1 """Fire reset wrapper for gym.Env.""" 2 import gym 3 4 5 class FireReset(gym.Wrapper): 6 """ 7 Fire reset wrapper for gym.Env. 8 9 Take action "fire" on reset. 10 11 Args: 12 env: The environment to be wrapped. 13 """ 14 15 def __init__(self, env): 16 super().__init__(env) 17 assert env.unwrapped.get_action_meanings()[1] == 'FIRE', ( 18 "Only use fire reset wrapper for suitable environment!") 19 assert len(env.unwrapped.get_action_meanings()) >= 3, ( 20 "Only use fire reset wrapper for suitable environment!") 21 22 def reset(self, **kwargs): 23 """gym.Env reset function.""" 24 self.env.reset(**kwargs) 25 obs, _, done, _ = self.env.step(1) 26 if done: 27 obs = self.env.reset(**kwargs) 28 return obs 29 [end of garage/envs/wrappers/fire_reset.py] [start of garage/envs/wrappers/clip_reward.py] 1 """Clip reward for gym.Env.""" 2 import gym 3 import numpy as np 4 5 6 class ClipReward(gym.Wrapper): 7 """Clip the reward by its sign.""" 8 9 def step(self, ac): 10 """gym.Env step function.""" 11 obs, reward, done, info = self.env.step(ac) 12 return obs, np.sign(reward), done, info 13 [end of garage/envs/wrappers/clip_reward.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/garage/envs/wrappers/clip_reward.py b/garage/envs/wrappers/clip_reward.py --- a/garage/envs/wrappers/clip_reward.py +++ b/garage/envs/wrappers/clip_reward.py @@ -10,3 +10,7 @@ """gym.Env step function.""" obs, reward, done, info = self.env.step(ac) return obs, np.sign(reward), done, info + + def reset(self): + """gym.Env reset.""" + return self.env.reset() diff --git a/garage/envs/wrappers/fire_reset.py b/garage/envs/wrappers/fire_reset.py --- a/garage/envs/wrappers/fire_reset.py +++ b/garage/envs/wrappers/fire_reset.py @@ -15,9 +15,9 @@ def __init__(self, env): super().__init__(env) assert env.unwrapped.get_action_meanings()[1] == 'FIRE', ( - "Only use fire reset wrapper for suitable environment!") + 'Only use fire reset wrapper for suitable environment!') assert len(env.unwrapped.get_action_meanings()) >= 3, ( - "Only use fire reset wrapper for suitable environment!") + 'Only use fire reset wrapper for suitable environment!') def reset(self, **kwargs): """gym.Env reset function.""" @@ -26,3 +26,7 @@ if done: obs = self.env.reset(**kwargs) return obs + + def step(self, action): + """gym.Env step function.""" + return self.env.step(action) diff --git a/garage/envs/wrappers/noop.py b/garage/envs/wrappers/noop.py --- a/garage/envs/wrappers/noop.py +++ b/garage/envs/wrappers/noop.py @@ -19,7 +19,7 @@ super().__init__(env) self._noop_max = noop_max self._noop_action = 0 - assert noop_max > 0, "noop_max should be larger than 0!" + assert noop_max > 0, 'noop_max should be larger than 0!' assert env.unwrapped.get_action_meanings()[0] == 'NOOP', ( "No-op should be the 0-th action but it's not in {}!".format(env)) @@ -28,7 +28,11 @@ obs = self.env.reset(**kwargs) noops = np.random.randint(1, self._noop_max + 1) for _ in range(noops): - obs, _, done, _ = self.env.step(self._noop_action) + obs, _, done, _ = self.step(self._noop_action) if done: obs = self.env.reset(**kwargs) return obs + + def step(self, action): + """gym.Env step function.""" + return self.env.step(action)
{"golden_diff": "diff --git a/garage/envs/wrappers/clip_reward.py b/garage/envs/wrappers/clip_reward.py\n--- a/garage/envs/wrappers/clip_reward.py\n+++ b/garage/envs/wrappers/clip_reward.py\n@@ -10,3 +10,7 @@\n \"\"\"gym.Env step function.\"\"\"\n obs, reward, done, info = self.env.step(ac)\n return obs, np.sign(reward), done, info\n+\n+ def reset(self):\n+ \"\"\"gym.Env reset.\"\"\"\n+ return self.env.reset()\ndiff --git a/garage/envs/wrappers/fire_reset.py b/garage/envs/wrappers/fire_reset.py\n--- a/garage/envs/wrappers/fire_reset.py\n+++ b/garage/envs/wrappers/fire_reset.py\n@@ -15,9 +15,9 @@\n def __init__(self, env):\n super().__init__(env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE', (\n- \"Only use fire reset wrapper for suitable environment!\")\n+ 'Only use fire reset wrapper for suitable environment!')\n assert len(env.unwrapped.get_action_meanings()) >= 3, (\n- \"Only use fire reset wrapper for suitable environment!\")\n+ 'Only use fire reset wrapper for suitable environment!')\n \n def reset(self, **kwargs):\n \"\"\"gym.Env reset function.\"\"\"\n@@ -26,3 +26,7 @@\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n+\n+ def step(self, action):\n+ \"\"\"gym.Env step function.\"\"\"\n+ return self.env.step(action)\ndiff --git a/garage/envs/wrappers/noop.py b/garage/envs/wrappers/noop.py\n--- a/garage/envs/wrappers/noop.py\n+++ b/garage/envs/wrappers/noop.py\n@@ -19,7 +19,7 @@\n super().__init__(env)\n self._noop_max = noop_max\n self._noop_action = 0\n- assert noop_max > 0, \"noop_max should be larger than 0!\"\n+ assert noop_max > 0, 'noop_max should be larger than 0!'\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP', (\n \"No-op should be the 0-th action but it's not in {}!\".format(env))\n \n@@ -28,7 +28,11 @@\n obs = self.env.reset(**kwargs)\n noops = np.random.randint(1, self._noop_max + 1)\n for _ in range(noops):\n- obs, _, done, _ = self.env.step(self._noop_action)\n+ obs, _, done, _ = self.step(self._noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n+\n+ def step(self, action):\n+ \"\"\"gym.Env step function.\"\"\"\n+ return self.env.step(action)\n", "issue": "FireReset is firing warnings in the CI\nfrom recent tests runs\r\n```sh\r\nUserWarning: WARN: <class 'garage.envs.wrappers.fire_reset.FireReset'> doesn't implement 'step' method, which is required for wrappers derived directly from Wrapper. Deprecated default implementation is used.\r\n```\n", "before_files": [{"content": "\"\"\"Noop wrapper for gym.Env.\"\"\"\nimport gym\nimport numpy as np\n\n\nclass Noop(gym.Wrapper):\n \"\"\"\n Noop wrapper for gym.Env.\n\n It samples initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n\n Args:\n env: The environment to be wrapped.\n noop_max: Maximum number no-op to be performed on reset.\n \"\"\"\n\n def __init__(self, env, noop_max=30):\n super().__init__(env)\n self._noop_max = noop_max\n self._noop_action = 0\n assert noop_max > 0, \"noop_max should be larger than 0!\"\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP', (\n \"No-op should be the 0-th action but it's not in {}!\".format(env))\n\n def reset(self, **kwargs):\n \"\"\"gym.Env reset function.\"\"\"\n obs = self.env.reset(**kwargs)\n noops = np.random.randint(1, self._noop_max + 1)\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self._noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n", "path": "garage/envs/wrappers/noop.py"}, {"content": "\"\"\"Fire reset wrapper for gym.Env.\"\"\"\nimport gym\n\n\nclass FireReset(gym.Wrapper):\n \"\"\"\n Fire reset wrapper for gym.Env.\n\n Take action \"fire\" on reset.\n\n Args:\n env: The environment to be wrapped.\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE', (\n \"Only use fire reset wrapper for suitable environment!\")\n assert len(env.unwrapped.get_action_meanings()) >= 3, (\n \"Only use fire reset wrapper for suitable environment!\")\n\n def reset(self, **kwargs):\n \"\"\"gym.Env reset function.\"\"\"\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(1)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n", "path": "garage/envs/wrappers/fire_reset.py"}, {"content": "\"\"\"Clip reward for gym.Env.\"\"\"\nimport gym\nimport numpy as np\n\n\nclass ClipReward(gym.Wrapper):\n \"\"\"Clip the reward by its sign.\"\"\"\n\n def step(self, ac):\n \"\"\"gym.Env step function.\"\"\"\n obs, reward, done, info = self.env.step(ac)\n return obs, np.sign(reward), done, info\n", "path": "garage/envs/wrappers/clip_reward.py"}]}
1,321
662
gh_patches_debug_660
rasdani/github-patches
git_diff
pex-tool__pex-2153
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.137 On the docket: + [x] A locked requirement with mixed artifact types fails to lock. #2150 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.136" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.136" +__version__ = "2.1.137"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.136\"\n+__version__ = \"2.1.137\"\n", "issue": "Release 2.1.137\nOn the docket:\r\n+ [x] A locked requirement with mixed artifact types fails to lock. #2150\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.136\"\n", "path": "pex/version.py"}]}
619
98
gh_patches_debug_19672
rasdani/github-patches
git_diff
NVIDIA__apex-620
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> _amp_state determines whether running in distributed at import It looks like the `_amp_state.py` module determines whether pytorch is running in distributed mode at the import level. The `distributed` only seems to be used in `maybe_print`. See code snippet: https://github.com/NVIDIA/apex/blob/37cdaf4ad57ab4e7dd9ef13dbed7b29aa939d061/apex/amp/_amp_state.py#L38-L52 This causes a couple issues: 1. It will only support the `env://` initialization of torch distributed 2. It will fail if amp is imported before launching the distributed training Neither of these is an issue for most, since most people launch via `torch.distributed.launch`. However, it can be an issue if you define your own distributed launch function or use `torch.multiprocessing.spawn`. I can't see a good reason to do it this way anyway, as it appears this variable is only used in the `maybe_print` function. I'll submit a pull request to fix this. Let me know if I'm missing something though. </issue> <code> [start of apex/amp/_amp_state.py] 1 # This is a "header object" that allows different amp modules to communicate. 2 # I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like. 3 # But apparently it's ok: 4 # http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm 5 import os 6 import torch 7 8 TORCH_MAJOR = int(torch.__version__.split('.')[0]) 9 TORCH_MINOR = int(torch.__version__.split('.')[1]) 10 11 if TORCH_MAJOR == 0: 12 import collections.abc as container_abcs 13 else: 14 from torch._six import container_abcs 15 16 17 class AmpState(object): 18 def __init__(self): 19 self.hard_override=False 20 self.allow_incoming_model_not_fp32 = False 21 self.verbosity=1 22 23 24 # Attribute stash. Could also just stash things as global module attributes. 25 _amp_state = AmpState() 26 27 28 def warn_or_err(msg): 29 if _amp_state.hard_override: 30 print("Warning: " + msg) 31 else: 32 raise RuntimeError(msg) 33 # I'm not sure if allowing hard_override is a good idea. 34 # + " If you're sure you know what you're doing, supply " + 35 # "hard_override=True to amp.initialize.") 36 37 38 distributed = False 39 if 'WORLD_SIZE' in os.environ: 40 distributed = int(os.environ['WORLD_SIZE']) > 1 41 42 43 def maybe_print(msg, rank0=False): 44 if _amp_state.verbosity > 0: 45 if rank0: 46 if distributed: 47 if torch.distributed.get_rank() == 0: 48 print(msg) 49 else: 50 print(msg) 51 else: 52 print(msg) 53 54 55 # def iter_params(param_groups): 56 # for group in param_groups: 57 # for p in group['params']: 58 # yield p 59 60 61 def master_params(optimizer): 62 """ 63 Generator expression that iterates over the params owned by ``optimizer``. 64 65 Args: 66 optimizer: An optimizer previously returned from ``amp.initialize``. 67 """ 68 for group in optimizer.param_groups: 69 for p in group['params']: 70 yield p 71 [end of apex/amp/_amp_state.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apex/amp/_amp_state.py b/apex/amp/_amp_state.py --- a/apex/amp/_amp_state.py +++ b/apex/amp/_amp_state.py @@ -1,5 +1,5 @@ # This is a "header object" that allows different amp modules to communicate. -# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like. +# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like. # But apparently it's ok: # http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm import os @@ -35,12 +35,9 @@ # "hard_override=True to amp.initialize.") -distributed = False -if 'WORLD_SIZE' in os.environ: - distributed = int(os.environ['WORLD_SIZE']) > 1 - - def maybe_print(msg, rank0=False): + distributed = torch.distributed.is_initialized() and \ + torch.distributed.get_world_size() > 1 if _amp_state.verbosity > 0: if rank0: if distributed:
{"golden_diff": "diff --git a/apex/amp/_amp_state.py b/apex/amp/_amp_state.py\n--- a/apex/amp/_amp_state.py\n+++ b/apex/amp/_amp_state.py\n@@ -1,5 +1,5 @@\n # This is a \"header object\" that allows different amp modules to communicate.\n-# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like. \n+# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like.\n # But apparently it's ok:\n # http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm\n import os\n@@ -35,12 +35,9 @@\n # \"hard_override=True to amp.initialize.\")\n \n \n-distributed = False\n-if 'WORLD_SIZE' in os.environ:\n- distributed = int(os.environ['WORLD_SIZE']) > 1\n-\n-\n def maybe_print(msg, rank0=False):\n+ distributed = torch.distributed.is_initialized() and \\\n+ torch.distributed.get_world_size() > 1\n if _amp_state.verbosity > 0:\n if rank0:\n if distributed:\n", "issue": "_amp_state determines whether running in distributed at import\nIt looks like the `_amp_state.py` module determines whether pytorch is running in distributed mode at the import level. The `distributed` only seems to be used in `maybe_print`. See code snippet:\r\n\r\nhttps://github.com/NVIDIA/apex/blob/37cdaf4ad57ab4e7dd9ef13dbed7b29aa939d061/apex/amp/_amp_state.py#L38-L52\r\n\r\nThis causes a couple issues:\r\n\r\n1. It will only support the `env://` initialization of torch distributed\r\n2. It will fail if amp is imported before launching the distributed training\r\n\r\nNeither of these is an issue for most, since most people launch via `torch.distributed.launch`. However, it can be an issue if you define your own distributed launch function or use `torch.multiprocessing.spawn`. I can't see a good reason to do it this way anyway, as it appears this variable is only used in the `maybe_print` function. I'll submit a pull request to fix this. Let me know if I'm missing something though.\n", "before_files": [{"content": "# This is a \"header object\" that allows different amp modules to communicate.\n# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like. \n# But apparently it's ok:\n# http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm\nimport os\nimport torch\n\nTORCH_MAJOR = int(torch.__version__.split('.')[0])\nTORCH_MINOR = int(torch.__version__.split('.')[1])\n\nif TORCH_MAJOR == 0:\n import collections.abc as container_abcs\nelse:\n from torch._six import container_abcs\n\n\nclass AmpState(object):\n def __init__(self):\n self.hard_override=False\n self.allow_incoming_model_not_fp32 = False\n self.verbosity=1\n\n\n# Attribute stash. Could also just stash things as global module attributes.\n_amp_state = AmpState()\n\n\ndef warn_or_err(msg):\n if _amp_state.hard_override:\n print(\"Warning: \" + msg)\n else:\n raise RuntimeError(msg)\n # I'm not sure if allowing hard_override is a good idea.\n # + \" If you're sure you know what you're doing, supply \" +\n # \"hard_override=True to amp.initialize.\")\n\n\ndistributed = False\nif 'WORLD_SIZE' in os.environ:\n distributed = int(os.environ['WORLD_SIZE']) > 1\n\n\ndef maybe_print(msg, rank0=False):\n if _amp_state.verbosity > 0:\n if rank0:\n if distributed:\n if torch.distributed.get_rank() == 0:\n print(msg)\n else:\n print(msg)\n else:\n print(msg)\n\n\n# def iter_params(param_groups):\n# for group in param_groups:\n# for p in group['params']:\n# yield p\n\n\ndef master_params(optimizer):\n \"\"\"\n Generator expression that iterates over the params owned by ``optimizer``.\n\n Args:\n optimizer: An optimizer previously returned from ``amp.initialize``.\n \"\"\"\n for group in optimizer.param_groups:\n for p in group['params']:\n yield p\n", "path": "apex/amp/_amp_state.py"}]}
1,378
268
gh_patches_debug_9775
rasdani/github-patches
git_diff
tensorflow__tfx-635
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `transform_output` not truely optional in 0.14.0 release `transform_output` not truely optional for Trainer in 0.14.0 release. Culprit code is [here](https://github.com/tensorflow/tfx/blob/r0.14/tfx/components/trainer/executor.py#L148). </issue> <code> [start of tfx/components/trainer/executor.py] 1 # Copyright 2019 Google LLC. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """TFX local trainer executor.""" 15 16 from __future__ import absolute_import 17 from __future__ import division 18 from __future__ import print_function 19 20 import os 21 import tensorflow as tf 22 import tensorflow_model_analysis as tfma 23 from typing import Any, Dict, List, Text 24 25 from tensorflow_metadata.proto.v0 import schema_pb2 26 from tfx import types 27 from tfx.components.base import base_executor 28 from tfx.extensions.google_cloud_ai_platform import runner 29 from tfx.proto import trainer_pb2 30 from tfx.types import artifact_utils 31 from tfx.utils import import_utils 32 from tfx.utils import io_utils 33 from tfx.utils import path_utils 34 from google.protobuf import json_format 35 36 37 def _all_files_pattern(file_pattern: Text) -> Text: 38 return '{}*'.format(file_pattern) 39 40 41 class Executor(base_executor.BaseExecutor): 42 """Local trainer used by the TFX Trainer component. 43 44 The Trainer executor supplements TensorFlow training with a component to 45 enable warm-start training of any user-specified tf.estimator. The Trainer is 46 a library built on top of TensorFlow that is expected to be integrated into a 47 custom user-specified binary. 48 49 To include Trainer in a TFX pipeline, configure your pipeline similar to 50 https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py#L104. 51 52 For more details on the Trainer component itself, please refer to 53 https://tensorflow.org/tfx/guide/trainer. For a tutorial on TF Estimator, 54 please refer to https://www.tensorflow.org/extend/estimators. 55 56 How to create a trainer callback function to be used by this Trainer executor: 57 An estimator can be executed by TFX by first creating a trainer_fn callback 58 method that returns an estimator and some additional parameters, similar to 59 https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py#L285. 60 This becomes the basis of the new Executor for Trainer. This Executor will 61 then train and evaluate this estimator using the 62 tf.estimator.train_and_evaluate API to train locally. 63 """ 64 65 # Name of subdirectory which contains checkpoints from prior runs 66 _CHECKPOINT_FILE_NAME = 'checkpoint' 67 68 def _GetTrainerFn(self, exec_properties: Dict[Text, Any]) -> Any: 69 """Loads and returns user-defined trainer_fn.""" 70 71 has_module_file = bool(exec_properties.get('module_file')) 72 has_trainer_fn = bool(exec_properties.get('trainer_fn')) 73 74 if has_module_file == has_trainer_fn: 75 raise ValueError( 76 "Neither or both of 'module_file' 'trainer_fn' have been supplied in " 77 "'exec_properties'.") 78 79 if has_module_file: 80 return import_utils.import_func_from_source( 81 exec_properties['module_file'], 'trainer_fn') 82 83 trainer_fn_path_split = exec_properties['trainer_fn'].split('.') 84 return import_utils.import_func_from_module( 85 '.'.join(trainer_fn_path_split[0:-1]), trainer_fn_path_split[-1]) 86 87 def Do(self, input_dict: Dict[Text, List[types.Artifact]], 88 output_dict: Dict[Text, List[types.Artifact]], 89 exec_properties: Dict[Text, Any]) -> None: 90 """Uses a user-supplied tf.estimator to train a TensorFlow model locally. 91 92 The Trainer Executor invokes a training_fn callback function provided by 93 the user via the module_file parameter. With the tf.estimator returned by 94 this function, the Trainer Executor then builds a TensorFlow model using the 95 user-provided tf.estimator. 96 97 Args: 98 input_dict: Input dict from input key to a list of ML-Metadata Artifacts. 99 - examples: Examples used for training, must include 'train' and 'eval' 100 splits. 101 - transform_output: Optional input transform graph. 102 - schema: Schema of the data. 103 output_dict: Output dict from output key to a list of Artifacts. 104 - output: Exported model. 105 exec_properties: A dict of execution properties. 106 - train_args: JSON string of trainer_pb2.TrainArgs instance, providing 107 args for training. 108 - eval_args: JSON string of trainer_pb2.EvalArgs instance, providing 109 args for eval. 110 - module_file: Python module file containing UDF model definition. 111 - warm_starting: Whether or not we need to do warm starting. 112 - warm_start_from: Optional. If warm_starting is True, this is the 113 directory to find previous model to warm start on. 114 115 Returns: 116 None 117 118 Raises: 119 ValueError: When neither or both of 'module_file' and 'trainer_fn' 120 are present in 'exec_properties'. 121 """ 122 self._log_startup(input_dict, output_dict, exec_properties) 123 124 # TODO(zhitaoli): Deprecate this in a future version. 125 if exec_properties.get('custom_config', None): 126 cmle_args = exec_properties.get('custom_config', 127 {}).get('cmle_training_args') 128 if cmle_args: 129 executor_class_path = '.'.join([Executor.__module__, Executor.__name__]) 130 tf.logging.warn( 131 'Passing \'cmle_training_args\' to trainer directly is deprecated, ' 132 'please use extension executor at ' 133 'tfx.extensions.google_cloud_ai_platform.trainer.executor instead') 134 135 return runner.start_cmle_training(input_dict, output_dict, 136 exec_properties, executor_class_path, 137 cmle_args) 138 139 trainer_fn = self._GetTrainerFn(exec_properties) 140 141 # Set up training parameters 142 train_files = [ 143 _all_files_pattern( 144 artifact_utils.get_split_uri(input_dict['examples'], 'train')) 145 ] 146 transform_output = artifact_utils.get_single_uri( 147 input_dict['transform_output'] 148 ) if input_dict['transform_output'] else None 149 eval_files = [ 150 _all_files_pattern( 151 artifact_utils.get_split_uri(input_dict['examples'], 'eval')) 152 ] 153 schema_file = io_utils.get_only_uri_in_dir( 154 artifact_utils.get_single_uri(input_dict['schema'])) 155 156 train_args = trainer_pb2.TrainArgs() 157 eval_args = trainer_pb2.EvalArgs() 158 json_format.Parse(exec_properties['train_args'], train_args) 159 json_format.Parse(exec_properties['eval_args'], eval_args) 160 161 # https://github.com/tensorflow/tfx/issues/45: Replace num_steps=0 with 162 # num_steps=None. Conversion of the proto to python will set the default 163 # value of an int as 0 so modify the value here. Tensorflow will raise an 164 # error if num_steps <= 0. 165 train_steps = train_args.num_steps or None 166 eval_steps = eval_args.num_steps or None 167 168 output_path = artifact_utils.get_single_uri(output_dict['output']) 169 serving_model_dir = path_utils.serving_model_dir(output_path) 170 eval_model_dir = path_utils.eval_model_dir(output_path) 171 172 # Assemble warm start path if needed. 173 warm_start_from = None 174 if exec_properties.get('warm_starting') and exec_properties.get( 175 'warm_start_from'): 176 previous_model_dir = os.path.join(exec_properties['warm_start_from'], 177 path_utils.SERVING_MODEL_DIR) 178 if previous_model_dir and tf.gfile.Exists( 179 os.path.join(previous_model_dir, self._CHECKPOINT_FILE_NAME)): 180 warm_start_from = previous_model_dir 181 182 # TODO(b/126242806) Use PipelineInputs when it is available in third_party. 183 hparams = tf.contrib.training.HParams( 184 # A list of uris for train files. 185 train_files=train_files, 186 # An optional single uri for transform graph produced by TFT. Will be 187 # None if not specified. 188 transform_output=transform_output, 189 # A single uri for the output directory of the serving model. 190 serving_model_dir=serving_model_dir, 191 # A list of uris for eval files. 192 eval_files=eval_files, 193 # A single uri for schema file. 194 schema_file=schema_file, 195 # Number of train steps. 196 train_steps=train_steps, 197 # Number of eval steps. 198 eval_steps=eval_steps, 199 # A single uri for the model directory to warm start from. 200 warm_start_from=warm_start_from) 201 202 schema = io_utils.parse_pbtxt_file(schema_file, schema_pb2.Schema()) 203 204 training_spec = trainer_fn(hparams, schema) 205 206 # Train the model 207 tf.logging.info('Training model.') 208 tf.estimator.train_and_evaluate(training_spec['estimator'], 209 training_spec['train_spec'], 210 training_spec['eval_spec']) 211 tf.logging.info('Training complete. Model written to %s', 212 serving_model_dir) 213 214 # Export an eval savedmodel for TFMA 215 tf.logging.info('Exporting eval_savedmodel for TFMA.') 216 tfma.export.export_eval_savedmodel( 217 estimator=training_spec['estimator'], 218 export_dir_base=eval_model_dir, 219 eval_input_receiver_fn=training_spec['eval_input_receiver_fn']) 220 221 tf.logging.info('Exported eval_savedmodel to %s.', eval_model_dir) 222 [end of tfx/components/trainer/executor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tfx/components/trainer/executor.py b/tfx/components/trainer/executor.py --- a/tfx/components/trainer/executor.py +++ b/tfx/components/trainer/executor.py @@ -144,8 +144,8 @@ artifact_utils.get_split_uri(input_dict['examples'], 'train')) ] transform_output = artifact_utils.get_single_uri( - input_dict['transform_output'] - ) if input_dict['transform_output'] else None + input_dict['transform_output']) if input_dict.get( + 'transform_output', None) else None eval_files = [ _all_files_pattern( artifact_utils.get_split_uri(input_dict['examples'], 'eval'))
{"golden_diff": "diff --git a/tfx/components/trainer/executor.py b/tfx/components/trainer/executor.py\n--- a/tfx/components/trainer/executor.py\n+++ b/tfx/components/trainer/executor.py\n@@ -144,8 +144,8 @@\n artifact_utils.get_split_uri(input_dict['examples'], 'train'))\n ]\n transform_output = artifact_utils.get_single_uri(\n- input_dict['transform_output']\n- ) if input_dict['transform_output'] else None\n+ input_dict['transform_output']) if input_dict.get(\n+ 'transform_output', None) else None\n eval_files = [\n _all_files_pattern(\n artifact_utils.get_split_uri(input_dict['examples'], 'eval'))\n", "issue": "`transform_output` not truely optional in 0.14.0 release\n`transform_output` not truely optional for Trainer in 0.14.0 release. Culprit code is [here](https://github.com/tensorflow/tfx/blob/r0.14/tfx/components/trainer/executor.py#L148).\n", "before_files": [{"content": "# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"TFX local trainer executor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nimport tensorflow_model_analysis as tfma\nfrom typing import Any, Dict, List, Text\n\nfrom tensorflow_metadata.proto.v0 import schema_pb2\nfrom tfx import types\nfrom tfx.components.base import base_executor\nfrom tfx.extensions.google_cloud_ai_platform import runner\nfrom tfx.proto import trainer_pb2\nfrom tfx.types import artifact_utils\nfrom tfx.utils import import_utils\nfrom tfx.utils import io_utils\nfrom tfx.utils import path_utils\nfrom google.protobuf import json_format\n\n\ndef _all_files_pattern(file_pattern: Text) -> Text:\n return '{}*'.format(file_pattern)\n\n\nclass Executor(base_executor.BaseExecutor):\n \"\"\"Local trainer used by the TFX Trainer component.\n\n The Trainer executor supplements TensorFlow training with a component to\n enable warm-start training of any user-specified tf.estimator. The Trainer is\n a library built on top of TensorFlow that is expected to be integrated into a\n custom user-specified binary.\n\n To include Trainer in a TFX pipeline, configure your pipeline similar to\n https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py#L104.\n\n For more details on the Trainer component itself, please refer to\n https://tensorflow.org/tfx/guide/trainer. For a tutorial on TF Estimator,\n please refer to https://www.tensorflow.org/extend/estimators.\n\n How to create a trainer callback function to be used by this Trainer executor:\n An estimator can be executed by TFX by first creating a trainer_fn callback\n method that returns an estimator and some additional parameters, similar to\n https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py#L285.\n This becomes the basis of the new Executor for Trainer. This Executor will\n then train and evaluate this estimator using the\n tf.estimator.train_and_evaluate API to train locally.\n \"\"\"\n\n # Name of subdirectory which contains checkpoints from prior runs\n _CHECKPOINT_FILE_NAME = 'checkpoint'\n\n def _GetTrainerFn(self, exec_properties: Dict[Text, Any]) -> Any:\n \"\"\"Loads and returns user-defined trainer_fn.\"\"\"\n\n has_module_file = bool(exec_properties.get('module_file'))\n has_trainer_fn = bool(exec_properties.get('trainer_fn'))\n\n if has_module_file == has_trainer_fn:\n raise ValueError(\n \"Neither or both of 'module_file' 'trainer_fn' have been supplied in \"\n \"'exec_properties'.\")\n\n if has_module_file:\n return import_utils.import_func_from_source(\n exec_properties['module_file'], 'trainer_fn')\n\n trainer_fn_path_split = exec_properties['trainer_fn'].split('.')\n return import_utils.import_func_from_module(\n '.'.join(trainer_fn_path_split[0:-1]), trainer_fn_path_split[-1])\n\n def Do(self, input_dict: Dict[Text, List[types.Artifact]],\n output_dict: Dict[Text, List[types.Artifact]],\n exec_properties: Dict[Text, Any]) -> None:\n \"\"\"Uses a user-supplied tf.estimator to train a TensorFlow model locally.\n\n The Trainer Executor invokes a training_fn callback function provided by\n the user via the module_file parameter. With the tf.estimator returned by\n this function, the Trainer Executor then builds a TensorFlow model using the\n user-provided tf.estimator.\n\n Args:\n input_dict: Input dict from input key to a list of ML-Metadata Artifacts.\n - examples: Examples used for training, must include 'train' and 'eval'\n splits.\n - transform_output: Optional input transform graph.\n - schema: Schema of the data.\n output_dict: Output dict from output key to a list of Artifacts.\n - output: Exported model.\n exec_properties: A dict of execution properties.\n - train_args: JSON string of trainer_pb2.TrainArgs instance, providing\n args for training.\n - eval_args: JSON string of trainer_pb2.EvalArgs instance, providing\n args for eval.\n - module_file: Python module file containing UDF model definition.\n - warm_starting: Whether or not we need to do warm starting.\n - warm_start_from: Optional. If warm_starting is True, this is the\n directory to find previous model to warm start on.\n\n Returns:\n None\n\n Raises:\n ValueError: When neither or both of 'module_file' and 'trainer_fn'\n are present in 'exec_properties'.\n \"\"\"\n self._log_startup(input_dict, output_dict, exec_properties)\n\n # TODO(zhitaoli): Deprecate this in a future version.\n if exec_properties.get('custom_config', None):\n cmle_args = exec_properties.get('custom_config',\n {}).get('cmle_training_args')\n if cmle_args:\n executor_class_path = '.'.join([Executor.__module__, Executor.__name__])\n tf.logging.warn(\n 'Passing \\'cmle_training_args\\' to trainer directly is deprecated, '\n 'please use extension executor at '\n 'tfx.extensions.google_cloud_ai_platform.trainer.executor instead')\n\n return runner.start_cmle_training(input_dict, output_dict,\n exec_properties, executor_class_path,\n cmle_args)\n\n trainer_fn = self._GetTrainerFn(exec_properties)\n\n # Set up training parameters\n train_files = [\n _all_files_pattern(\n artifact_utils.get_split_uri(input_dict['examples'], 'train'))\n ]\n transform_output = artifact_utils.get_single_uri(\n input_dict['transform_output']\n ) if input_dict['transform_output'] else None\n eval_files = [\n _all_files_pattern(\n artifact_utils.get_split_uri(input_dict['examples'], 'eval'))\n ]\n schema_file = io_utils.get_only_uri_in_dir(\n artifact_utils.get_single_uri(input_dict['schema']))\n\n train_args = trainer_pb2.TrainArgs()\n eval_args = trainer_pb2.EvalArgs()\n json_format.Parse(exec_properties['train_args'], train_args)\n json_format.Parse(exec_properties['eval_args'], eval_args)\n\n # https://github.com/tensorflow/tfx/issues/45: Replace num_steps=0 with\n # num_steps=None. Conversion of the proto to python will set the default\n # value of an int as 0 so modify the value here. Tensorflow will raise an\n # error if num_steps <= 0.\n train_steps = train_args.num_steps or None\n eval_steps = eval_args.num_steps or None\n\n output_path = artifact_utils.get_single_uri(output_dict['output'])\n serving_model_dir = path_utils.serving_model_dir(output_path)\n eval_model_dir = path_utils.eval_model_dir(output_path)\n\n # Assemble warm start path if needed.\n warm_start_from = None\n if exec_properties.get('warm_starting') and exec_properties.get(\n 'warm_start_from'):\n previous_model_dir = os.path.join(exec_properties['warm_start_from'],\n path_utils.SERVING_MODEL_DIR)\n if previous_model_dir and tf.gfile.Exists(\n os.path.join(previous_model_dir, self._CHECKPOINT_FILE_NAME)):\n warm_start_from = previous_model_dir\n\n # TODO(b/126242806) Use PipelineInputs when it is available in third_party.\n hparams = tf.contrib.training.HParams(\n # A list of uris for train files.\n train_files=train_files,\n # An optional single uri for transform graph produced by TFT. Will be\n # None if not specified.\n transform_output=transform_output,\n # A single uri for the output directory of the serving model.\n serving_model_dir=serving_model_dir,\n # A list of uris for eval files.\n eval_files=eval_files,\n # A single uri for schema file.\n schema_file=schema_file,\n # Number of train steps.\n train_steps=train_steps,\n # Number of eval steps.\n eval_steps=eval_steps,\n # A single uri for the model directory to warm start from.\n warm_start_from=warm_start_from)\n\n schema = io_utils.parse_pbtxt_file(schema_file, schema_pb2.Schema())\n\n training_spec = trainer_fn(hparams, schema)\n\n # Train the model\n tf.logging.info('Training model.')\n tf.estimator.train_and_evaluate(training_spec['estimator'],\n training_spec['train_spec'],\n training_spec['eval_spec'])\n tf.logging.info('Training complete. Model written to %s',\n serving_model_dir)\n\n # Export an eval savedmodel for TFMA\n tf.logging.info('Exporting eval_savedmodel for TFMA.')\n tfma.export.export_eval_savedmodel(\n estimator=training_spec['estimator'],\n export_dir_base=eval_model_dir,\n eval_input_receiver_fn=training_spec['eval_input_receiver_fn'])\n\n tf.logging.info('Exported eval_savedmodel to %s.', eval_model_dir)\n", "path": "tfx/components/trainer/executor.py"}]}
3,271
154
gh_patches_debug_14679
rasdani/github-patches
git_diff
deepset-ai__haystack-7972
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add Distribution-based rank fusion in JoinDocuments **Is your feature request related to a problem? Please describe.** Add [Distribution-based rank fusion](https://medium.com/plain-simple-software/distribution-based-score-fusion-dbsf-a-new-approach-to-vector-search-ranking-f87c37488b18) in JoinDocuments **Describe the solution you'd like** ``` def _distribution_based_rank_fusion(self, document_lists): """ Merge multiple lists of Documents and assign scores based on Distribution-Based Score Fusion. (https://medium.com/plain-simple-software/distribution-based-score-fusion-dbsf-a-new-approach-to-vector-search-ranking-f87c37488b18) If a Document is in more than one retriever, the sone with the highest score is used. """ for documents in document_lists: scores_list = [] for doc in documents: scores_list.append(doc.score) mean_score = sum(scores_list) / len(scores_list) std_dev = ( sum((x - mean_score) ** 2 for x in scores_list) / len(scores_list) ) ** 0.5 min_score = mean_score - 3 * std_dev max_score = mean_score + 3 * std_dev for doc in documents: doc.score = (doc.score - min_score) / (max_score - min_score) output = self._concatenate(document_lists=document_lists) return output ``` </issue> <code> [start of haystack/components/joiners/document_joiner.py] 1 # SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]> 2 # 3 # SPDX-License-Identifier: Apache-2.0 4 5 import itertools 6 from collections import defaultdict 7 from math import inf 8 from typing import List, Optional 9 10 from haystack import Document, component, logging 11 from haystack.core.component.types import Variadic 12 13 logger = logging.getLogger(__name__) 14 15 16 @component 17 class DocumentJoiner: 18 """ 19 A component that joins multiple list of Documents into a single list. 20 21 It supports different joins modes: 22 - concatenate: Keeps the highest scored Document in case of duplicates. 23 - merge: Merge a calculate a weighted sum of the scores of duplicate Documents. 24 - reciprocal_rank_fusion: Merge and assign scores based on reciprocal rank fusion. 25 - distribution_based_rank_fusion: Merge and assign scores based on scores distribution in each retriever 26 27 Usage example: 28 ```python 29 document_store = InMemoryDocumentStore() 30 p = Pipeline() 31 p.add_component(instance=InMemoryBM25Retriever(document_store=document_store), name="bm25_retriever") 32 p.add_component( 33 instance=SentenceTransformersTextEmbedder(model="sentence-transformers/all-MiniLM-L6-v2"), 34 name="text_embedder", 35 ) 36 p.add_component(instance=InMemoryEmbeddingRetriever(document_store=document_store), name="embedding_retriever") 37 p.add_component(instance=DocumentJoiner(), name="joiner") 38 p.connect("bm25_retriever", "joiner") 39 p.connect("embedding_retriever", "joiner") 40 p.connect("text_embedder", "embedding_retriever") 41 query = "What is the capital of France?" 42 p.run(data={"query": query}) 43 ``` 44 """ 45 46 def __init__( 47 self, 48 join_mode: str = "concatenate", 49 weights: Optional[List[float]] = None, 50 top_k: Optional[int] = None, 51 sort_by_score: bool = True, 52 ): 53 """ 54 Create an DocumentJoiner component. 55 56 :param join_mode: 57 Specifies the join mode to use. Available modes: 58 - `concatenate` 59 - `merge` 60 - `reciprocal_rank_fusion` 61 - `distribution_based_rank_fusion` 62 :param weights: 63 Weight for each list of Documents received, must have the same length as the number of inputs. 64 If `join_mode` is `concatenate` or `distribution_based_rank_fusion` this parameter is ignored. 65 :param top_k: 66 The maximum number of Documents to return. 67 :param sort_by_score: 68 If True sorts the Documents by score in descending order. 69 If a Document has no score, it is handled as if its score is -infinity. 70 """ 71 if join_mode not in ["concatenate", "merge", "reciprocal_rank_fusion", "distribution_based_rank_fusion"]: 72 raise ValueError(f"DocumentJoiner component does not support '{join_mode}' join_mode.") 73 self.join_mode = join_mode 74 self.weights = [float(i) / sum(weights) for i in weights] if weights else None 75 self.top_k = top_k 76 self.sort_by_score = sort_by_score 77 78 @component.output_types(documents=List[Document]) 79 def run(self, documents: Variadic[List[Document]], top_k: Optional[int] = None): 80 """ 81 Joins multiple lists of Documents into a single list depending on the `join_mode` parameter. 82 83 :param documents: 84 List of list of Documents to be merged. 85 :param top_k: 86 The maximum number of Documents to return. Overrides the instance's `top_k` if provided. 87 88 :returns: 89 A dictionary with the following keys: 90 - `documents`: Merged list of Documents 91 """ 92 output_documents = [] 93 94 documents = list(documents) 95 if self.join_mode == "concatenate": 96 output_documents = self._concatenate(documents) 97 elif self.join_mode == "merge": 98 output_documents = self._merge(documents) 99 elif self.join_mode == "reciprocal_rank_fusion": 100 output_documents = self._reciprocal_rank_fusion(documents) 101 elif self.join_mode == "distribution_based_rank_fusion": 102 output_documents = self._distribution_based_rank_fusion(documents) 103 104 if self.sort_by_score: 105 output_documents = sorted( 106 output_documents, key=lambda doc: doc.score if doc.score is not None else -inf, reverse=True 107 ) 108 if any(doc.score is None for doc in output_documents): 109 logger.info( 110 "Some of the Documents DocumentJoiner got have score=None. It was configured to sort Documents by " 111 "score, so those with score=None were sorted as if they had a score of -infinity." 112 ) 113 114 if top_k: 115 output_documents = output_documents[:top_k] 116 elif self.top_k: 117 output_documents = output_documents[: self.top_k] 118 119 return {"documents": output_documents} 120 121 def _concatenate(self, document_lists: List[List[Document]]) -> List[Document]: 122 """ 123 Concatenate multiple lists of Documents and return only the Document with the highest score for duplicates. 124 """ 125 output = [] 126 docs_per_id = defaultdict(list) 127 for doc in itertools.chain.from_iterable(document_lists): 128 docs_per_id[doc.id].append(doc) 129 for docs in docs_per_id.values(): 130 doc_with_best_score = max(docs, key=lambda doc: doc.score if doc.score else -inf) 131 output.append(doc_with_best_score) 132 return output 133 134 def _merge(self, document_lists: List[List[Document]]) -> List[Document]: 135 """ 136 Merge multiple lists of Documents and calculate a weighted sum of the scores of duplicate Documents. 137 """ 138 scores_map: dict = defaultdict(int) 139 documents_map = {} 140 weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists) 141 142 for documents, weight in zip(document_lists, weights): 143 for doc in documents: 144 scores_map[doc.id] += (doc.score if doc.score else 0) * weight 145 documents_map[doc.id] = doc 146 147 for doc in documents_map.values(): 148 doc.score = scores_map[doc.id] 149 150 return list(documents_map.values()) 151 152 def _reciprocal_rank_fusion(self, document_lists: List[List[Document]]) -> List[Document]: 153 """ 154 Merge multiple lists of Documents and assign scores based on reciprocal rank fusion. 155 156 The constant k is set to 61 (60 was suggested by the original paper, 157 plus 1 as python lists are 0-based and the paper used 1-based ranking). 158 """ 159 k = 61 160 161 scores_map: dict = defaultdict(int) 162 documents_map = {} 163 weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists) 164 165 # Calculate weighted reciprocal rank fusion score 166 for documents, weight in zip(document_lists, weights): 167 for rank, doc in enumerate(documents): 168 scores_map[doc.id] += (weight * len(document_lists)) / (k + rank) 169 documents_map[doc.id] = doc 170 171 # Normalize scores. Note: len(results) / k is the maximum possible score, 172 # achieved by being ranked first in all doc lists with non-zero weight. 173 for _id in scores_map: 174 scores_map[_id] /= len(document_lists) / k 175 176 for doc in documents_map.values(): 177 doc.score = scores_map[doc.id] 178 179 return list(documents_map.values()) 180 181 def _distribution_based_rank_fusion(self, document_lists: List[List[Document]]) -> List[Document]: 182 """ 183 Merge multiple lists of Documents and assign scores based on Distribution-Based Score Fusion. 184 185 (https://medium.com/plain-simple-software/distribution-based-score-fusion-dbsf-a-new-approach-to-vector-search-ranking-f87c37488b18) 186 If a Document is in more than one retriever, the one with the highest score is used. 187 """ 188 for documents in document_lists: 189 scores_list = [] 190 191 for doc in documents: 192 scores_list.append(doc.score if doc.score is not None else 0) 193 194 mean_score = sum(scores_list) / len(scores_list) 195 std_dev = (sum((x - mean_score) ** 2 for x in scores_list) / len(scores_list)) ** 0.5 196 min_score = mean_score - 3 * std_dev 197 max_score = mean_score + 3 * std_dev 198 199 for doc in documents: 200 doc.score = (doc.score - min_score) / (max_score - min_score) 201 202 output = self._concatenate(document_lists=document_lists) 203 204 return output 205 [end of haystack/components/joiners/document_joiner.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/haystack/components/joiners/document_joiner.py b/haystack/components/joiners/document_joiner.py --- a/haystack/components/joiners/document_joiner.py +++ b/haystack/components/joiners/document_joiner.py @@ -195,9 +195,11 @@ std_dev = (sum((x - mean_score) ** 2 for x in scores_list) / len(scores_list)) ** 0.5 min_score = mean_score - 3 * std_dev max_score = mean_score + 3 * std_dev + delta_score = max_score - min_score for doc in documents: - doc.score = (doc.score - min_score) / (max_score - min_score) + doc.score = (doc.score - min_score) / delta_score if delta_score != 0.0 else 0.0 + # if all docs have the same score delta_score is 0, the docs are uninformative for the query output = self._concatenate(document_lists=document_lists)
{"golden_diff": "diff --git a/haystack/components/joiners/document_joiner.py b/haystack/components/joiners/document_joiner.py\n--- a/haystack/components/joiners/document_joiner.py\n+++ b/haystack/components/joiners/document_joiner.py\n@@ -195,9 +195,11 @@\n std_dev = (sum((x - mean_score) ** 2 for x in scores_list) / len(scores_list)) ** 0.5\n min_score = mean_score - 3 * std_dev\n max_score = mean_score + 3 * std_dev\n+ delta_score = max_score - min_score\n \n for doc in documents:\n- doc.score = (doc.score - min_score) / (max_score - min_score)\n+ doc.score = (doc.score - min_score) / delta_score if delta_score != 0.0 else 0.0\n+ # if all docs have the same score delta_score is 0, the docs are uninformative for the query\n \n output = self._concatenate(document_lists=document_lists)\n", "issue": "Add Distribution-based rank fusion in JoinDocuments\n**Is your feature request related to a problem? Please describe.**\r\nAdd [Distribution-based rank fusion](https://medium.com/plain-simple-software/distribution-based-score-fusion-dbsf-a-new-approach-to-vector-search-ranking-f87c37488b18) in JoinDocuments\r\n\r\n**Describe the solution you'd like**\r\n```\r\ndef _distribution_based_rank_fusion(self, document_lists):\r\n \"\"\"\r\n Merge multiple lists of Documents and assign scores based on Distribution-Based Score Fusion.\r\n (https://medium.com/plain-simple-software/distribution-based-score-fusion-dbsf-a-new-approach-to-vector-search-ranking-f87c37488b18)\r\n\r\n If a Document is in more than one retriever, the sone with the highest score is used.\r\n \"\"\"\r\n for documents in document_lists:\r\n scores_list = []\r\n\r\n for doc in documents:\r\n scores_list.append(doc.score)\r\n\r\n mean_score = sum(scores_list) / len(scores_list)\r\n std_dev = (\r\n sum((x - mean_score) ** 2 for x in scores_list) / len(scores_list)\r\n ) ** 0.5\r\n min_score = mean_score - 3 * std_dev\r\n max_score = mean_score + 3 * std_dev\r\n\r\n for doc in documents:\r\n doc.score = (doc.score - min_score) / (max_score - min_score)\r\n\r\n output = self._concatenate(document_lists=document_lists)\r\n\r\n return output\r\n```\n", "before_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport itertools\nfrom collections import defaultdict\nfrom math import inf\nfrom typing import List, Optional\n\nfrom haystack import Document, component, logging\nfrom haystack.core.component.types import Variadic\n\nlogger = logging.getLogger(__name__)\n\n\n@component\nclass DocumentJoiner:\n \"\"\"\n A component that joins multiple list of Documents into a single list.\n\n It supports different joins modes:\n - concatenate: Keeps the highest scored Document in case of duplicates.\n - merge: Merge a calculate a weighted sum of the scores of duplicate Documents.\n - reciprocal_rank_fusion: Merge and assign scores based on reciprocal rank fusion.\n - distribution_based_rank_fusion: Merge and assign scores based on scores distribution in each retriever\n\n Usage example:\n ```python\n document_store = InMemoryDocumentStore()\n p = Pipeline()\n p.add_component(instance=InMemoryBM25Retriever(document_store=document_store), name=\"bm25_retriever\")\n p.add_component(\n instance=SentenceTransformersTextEmbedder(model=\"sentence-transformers/all-MiniLM-L6-v2\"),\n name=\"text_embedder\",\n )\n p.add_component(instance=InMemoryEmbeddingRetriever(document_store=document_store), name=\"embedding_retriever\")\n p.add_component(instance=DocumentJoiner(), name=\"joiner\")\n p.connect(\"bm25_retriever\", \"joiner\")\n p.connect(\"embedding_retriever\", \"joiner\")\n p.connect(\"text_embedder\", \"embedding_retriever\")\n query = \"What is the capital of France?\"\n p.run(data={\"query\": query})\n ```\n \"\"\"\n\n def __init__(\n self,\n join_mode: str = \"concatenate\",\n weights: Optional[List[float]] = None,\n top_k: Optional[int] = None,\n sort_by_score: bool = True,\n ):\n \"\"\"\n Create an DocumentJoiner component.\n\n :param join_mode:\n Specifies the join mode to use. Available modes:\n - `concatenate`\n - `merge`\n - `reciprocal_rank_fusion`\n - `distribution_based_rank_fusion`\n :param weights:\n Weight for each list of Documents received, must have the same length as the number of inputs.\n If `join_mode` is `concatenate` or `distribution_based_rank_fusion` this parameter is ignored.\n :param top_k:\n The maximum number of Documents to return.\n :param sort_by_score:\n If True sorts the Documents by score in descending order.\n If a Document has no score, it is handled as if its score is -infinity.\n \"\"\"\n if join_mode not in [\"concatenate\", \"merge\", \"reciprocal_rank_fusion\", \"distribution_based_rank_fusion\"]:\n raise ValueError(f\"DocumentJoiner component does not support '{join_mode}' join_mode.\")\n self.join_mode = join_mode\n self.weights = [float(i) / sum(weights) for i in weights] if weights else None\n self.top_k = top_k\n self.sort_by_score = sort_by_score\n\n @component.output_types(documents=List[Document])\n def run(self, documents: Variadic[List[Document]], top_k: Optional[int] = None):\n \"\"\"\n Joins multiple lists of Documents into a single list depending on the `join_mode` parameter.\n\n :param documents:\n List of list of Documents to be merged.\n :param top_k:\n The maximum number of Documents to return. Overrides the instance's `top_k` if provided.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: Merged list of Documents\n \"\"\"\n output_documents = []\n\n documents = list(documents)\n if self.join_mode == \"concatenate\":\n output_documents = self._concatenate(documents)\n elif self.join_mode == \"merge\":\n output_documents = self._merge(documents)\n elif self.join_mode == \"reciprocal_rank_fusion\":\n output_documents = self._reciprocal_rank_fusion(documents)\n elif self.join_mode == \"distribution_based_rank_fusion\":\n output_documents = self._distribution_based_rank_fusion(documents)\n\n if self.sort_by_score:\n output_documents = sorted(\n output_documents, key=lambda doc: doc.score if doc.score is not None else -inf, reverse=True\n )\n if any(doc.score is None for doc in output_documents):\n logger.info(\n \"Some of the Documents DocumentJoiner got have score=None. It was configured to sort Documents by \"\n \"score, so those with score=None were sorted as if they had a score of -infinity.\"\n )\n\n if top_k:\n output_documents = output_documents[:top_k]\n elif self.top_k:\n output_documents = output_documents[: self.top_k]\n\n return {\"documents\": output_documents}\n\n def _concatenate(self, document_lists: List[List[Document]]) -> List[Document]:\n \"\"\"\n Concatenate multiple lists of Documents and return only the Document with the highest score for duplicates.\n \"\"\"\n output = []\n docs_per_id = defaultdict(list)\n for doc in itertools.chain.from_iterable(document_lists):\n docs_per_id[doc.id].append(doc)\n for docs in docs_per_id.values():\n doc_with_best_score = max(docs, key=lambda doc: doc.score if doc.score else -inf)\n output.append(doc_with_best_score)\n return output\n\n def _merge(self, document_lists: List[List[Document]]) -> List[Document]:\n \"\"\"\n Merge multiple lists of Documents and calculate a weighted sum of the scores of duplicate Documents.\n \"\"\"\n scores_map: dict = defaultdict(int)\n documents_map = {}\n weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)\n\n for documents, weight in zip(document_lists, weights):\n for doc in documents:\n scores_map[doc.id] += (doc.score if doc.score else 0) * weight\n documents_map[doc.id] = doc\n\n for doc in documents_map.values():\n doc.score = scores_map[doc.id]\n\n return list(documents_map.values())\n\n def _reciprocal_rank_fusion(self, document_lists: List[List[Document]]) -> List[Document]:\n \"\"\"\n Merge multiple lists of Documents and assign scores based on reciprocal rank fusion.\n\n The constant k is set to 61 (60 was suggested by the original paper,\n plus 1 as python lists are 0-based and the paper used 1-based ranking).\n \"\"\"\n k = 61\n\n scores_map: dict = defaultdict(int)\n documents_map = {}\n weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)\n\n # Calculate weighted reciprocal rank fusion score\n for documents, weight in zip(document_lists, weights):\n for rank, doc in enumerate(documents):\n scores_map[doc.id] += (weight * len(document_lists)) / (k + rank)\n documents_map[doc.id] = doc\n\n # Normalize scores. Note: len(results) / k is the maximum possible score,\n # achieved by being ranked first in all doc lists with non-zero weight.\n for _id in scores_map:\n scores_map[_id] /= len(document_lists) / k\n\n for doc in documents_map.values():\n doc.score = scores_map[doc.id]\n\n return list(documents_map.values())\n\n def _distribution_based_rank_fusion(self, document_lists: List[List[Document]]) -> List[Document]:\n \"\"\"\n Merge multiple lists of Documents and assign scores based on Distribution-Based Score Fusion.\n\n (https://medium.com/plain-simple-software/distribution-based-score-fusion-dbsf-a-new-approach-to-vector-search-ranking-f87c37488b18)\n If a Document is in more than one retriever, the one with the highest score is used.\n \"\"\"\n for documents in document_lists:\n scores_list = []\n\n for doc in documents:\n scores_list.append(doc.score if doc.score is not None else 0)\n\n mean_score = sum(scores_list) / len(scores_list)\n std_dev = (sum((x - mean_score) ** 2 for x in scores_list) / len(scores_list)) ** 0.5\n min_score = mean_score - 3 * std_dev\n max_score = mean_score + 3 * std_dev\n\n for doc in documents:\n doc.score = (doc.score - min_score) / (max_score - min_score)\n\n output = self._concatenate(document_lists=document_lists)\n\n return output\n", "path": "haystack/components/joiners/document_joiner.py"}]}
3,245
233
gh_patches_debug_24424
rasdani/github-patches
git_diff
cisagov__manage.get.gov-2112
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Domain request table: Capture the "CISA region" a `domain request` is in ### Issue description _As an analyst I want to know which [CISA region](https://www.cisa.gov/about/regions) a request is in so that if my research doesn't determine a request is authentic/eligible, I can know which region I need to reach out to_ Each US state/territory is in a "CISA region". When we need additional assistance in verification, we can reach out to regional contacts. Having regional designations in-registrar means we don't have to look it up, saving analyst time. ### Acceptance criteria - [ ] For **non-federal requests**, the domain request detail page shows the CISA Region number based on the state abbreviation code of the organization address in the request. (See markup in "Additional Context") - [ ] For **federal requests**, the domain request detail page shows "N/A" for the CISA region number, and no table lookup is necessary. - [ ] The CISA region is not displayed on any user-facing views. ### Additional context Please make this reusable by domain information as well. consider domain helper or other utility that can then take in the org address state/territory abbreviation and returns the number for the region. Regions and state abbreviations haven't changed in a long time and aren't expected to change in the future, they do not need to be in a table and a simple dictionary lookup (while ugly) is probably the simplest solution. Based on the official two-letter state/territory abbreviation from the organization address, use the data in the following table to determine the region number: | Region | Locales | | ------- | ----- | | 1 |Connecticut, Maine, Massachusetts, New Hampshire, Rhode Island, Vermont| | 2 |New Jersey, New York, Puerto Rico, U.S. Virgin Islands| |3| Delaware, District of Columbia, Maryland, Pennsylvania, Virginia, West Virginia| |4| Alabama, Florida, Georgia, Kentucky, Mississippi, North Carolina, South Carolina, Tennessee| |5| Illinois, Indiana, Michigan, Minnesota, Ohio, Wisconsin| |6| Arkansas, Louisiana, New Mexico, Oklahoma, Texas| |7|Iowa, Kansas, Missouri, Nebraska| |8|Colorado, Montana, North Dakota, South Dakota, Utah, Wyoming| |9|Arizona, California, Hawaii, Nevada, Guam, American Samoa, Commonwealth of the Northern Mariana Islands| |10|Alaska, Idaho, Oregon, Washington| Click Image to open Miro: [![Image](https://github.com/cisagov/manage.get.gov/assets/62573986/bca81201-ce20-409b-bb31-10bca552ed23)](https://miro.com/app/board/uXjVKNvtde0=/?moveToWidget=3458764587423408611&cot=14) ### Links to other issues Blocks #2095 </issue> <code> [start of src/registrar/templatetags/custom_filters.py] 1 import logging 2 from django import template 3 import re 4 from registrar.models.domain_request import DomainRequest 5 6 register = template.Library() 7 logger = logging.getLogger(__name__) 8 9 10 @register.filter(name="extract_value") 11 def extract_value(html_input): 12 match = re.search(r'value="([^"]*)"', html_input) 13 if match: 14 return match.group(1) 15 return "" 16 17 18 @register.filter 19 def extract_a_text(value): 20 # Use regex to extract the text within the <a> tag 21 pattern = r"<a\b[^>]*>(.*?)</a>" 22 match = re.search(pattern, value) 23 if match: 24 extracted_text = match.group(1) 25 else: 26 extracted_text = "" 27 28 return extracted_text 29 30 31 @register.filter 32 def find_index(haystack, needle): 33 try: 34 return haystack.index(needle) 35 except ValueError: 36 return -1 37 38 39 @register.filter 40 def slice_after(value, substring): 41 index = value.find(substring) 42 if index != -1: 43 result = value[index + len(substring) :] 44 return result 45 return value 46 47 48 @register.filter 49 def contains_checkbox(html_list): 50 for html_string in html_list: 51 if re.search(r'<input[^>]*type="checkbox"', html_string): 52 return True 53 return False 54 55 56 @register.filter 57 def get_organization_long_name(generic_org_type): 58 organization_choices_dict = dict(DomainRequest.OrganizationChoicesVerbose.choices) 59 long_form_type = organization_choices_dict[generic_org_type] 60 if long_form_type is None: 61 logger.error("Organization type error, triggered by a template's custom filter") 62 return "Error" 63 64 return long_form_type 65 66 67 @register.filter(name="has_permission") 68 def has_permission(user, permission): 69 return user.has_perm(permission) 70 [end of src/registrar/templatetags/custom_filters.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/registrar/templatetags/custom_filters.py b/src/registrar/templatetags/custom_filters.py --- a/src/registrar/templatetags/custom_filters.py +++ b/src/registrar/templatetags/custom_filters.py @@ -67,3 +67,69 @@ @register.filter(name="has_permission") def has_permission(user, permission): return user.has_perm(permission) + + [email protected] +def get_region(state): + if state and isinstance(state, str): + regions = { + "CT": 1, + "ME": 1, + "MA": 1, + "NH": 1, + "RI": 1, + "VT": 1, + "NJ": 2, + "NY": 2, + "PR": 2, + "VI": 2, + "DE": 3, + "DC": 3, + "MD": 3, + "PA": 3, + "VA": 3, + "WV": 3, + "AL": 4, + "FL": 4, + "GA": 4, + "KY": 4, + "MS": 4, + "NC": 4, + "SC": 4, + "TN": 4, + "IL": 5, + "IN": 5, + "MI": 5, + "MN": 5, + "OH": 5, + "WI": 5, + "AR": 6, + "LA": 6, + "NM": 6, + "OK": 6, + "TX": 6, + "IA": 7, + "KS": 7, + "MO": 7, + "NE": 7, + "CO": 8, + "MT": 8, + "ND": 8, + "SD": 8, + "UT": 8, + "WY": 8, + "AZ": 9, + "CA": 9, + "HI": 9, + "NV": 9, + "GU": 9, + "AS": 9, + "MP": 9, + "AK": 10, + "ID": 10, + "OR": 10, + "WA": 10, + } + return regions.get(state.upper(), "N/A") + else: + return None
{"golden_diff": "diff --git a/src/registrar/templatetags/custom_filters.py b/src/registrar/templatetags/custom_filters.py\n--- a/src/registrar/templatetags/custom_filters.py\n+++ b/src/registrar/templatetags/custom_filters.py\n@@ -67,3 +67,69 @@\n @register.filter(name=\"has_permission\")\n def has_permission(user, permission):\n return user.has_perm(permission)\n+\n+\[email protected]\n+def get_region(state):\n+ if state and isinstance(state, str):\n+ regions = {\n+ \"CT\": 1,\n+ \"ME\": 1,\n+ \"MA\": 1,\n+ \"NH\": 1,\n+ \"RI\": 1,\n+ \"VT\": 1,\n+ \"NJ\": 2,\n+ \"NY\": 2,\n+ \"PR\": 2,\n+ \"VI\": 2,\n+ \"DE\": 3,\n+ \"DC\": 3,\n+ \"MD\": 3,\n+ \"PA\": 3,\n+ \"VA\": 3,\n+ \"WV\": 3,\n+ \"AL\": 4,\n+ \"FL\": 4,\n+ \"GA\": 4,\n+ \"KY\": 4,\n+ \"MS\": 4,\n+ \"NC\": 4,\n+ \"SC\": 4,\n+ \"TN\": 4,\n+ \"IL\": 5,\n+ \"IN\": 5,\n+ \"MI\": 5,\n+ \"MN\": 5,\n+ \"OH\": 5,\n+ \"WI\": 5,\n+ \"AR\": 6,\n+ \"LA\": 6,\n+ \"NM\": 6,\n+ \"OK\": 6,\n+ \"TX\": 6,\n+ \"IA\": 7,\n+ \"KS\": 7,\n+ \"MO\": 7,\n+ \"NE\": 7,\n+ \"CO\": 8,\n+ \"MT\": 8,\n+ \"ND\": 8,\n+ \"SD\": 8,\n+ \"UT\": 8,\n+ \"WY\": 8,\n+ \"AZ\": 9,\n+ \"CA\": 9,\n+ \"HI\": 9,\n+ \"NV\": 9,\n+ \"GU\": 9,\n+ \"AS\": 9,\n+ \"MP\": 9,\n+ \"AK\": 10,\n+ \"ID\": 10,\n+ \"OR\": 10,\n+ \"WA\": 10,\n+ }\n+ return regions.get(state.upper(), \"N/A\")\n+ else:\n+ return None\n", "issue": "Domain request table: Capture the \"CISA region\" a `domain request` is in\n### Issue description\n\n_As an analyst\nI want to know which [CISA region](https://www.cisa.gov/about/regions) a request is in \nso that if my research doesn't determine a request is authentic/eligible, I can know which region I need to reach out to_\n\nEach US state/territory is in a \"CISA region\". When we need additional assistance in verification, we can reach out to regional contacts. Having regional designations in-registrar means we don't have to look it up, saving analyst time.\n\n### Acceptance criteria\n\n- [ ] For **non-federal requests**, the domain request detail page shows the CISA Region number based on the state abbreviation code of the organization address in the request. (See markup in \"Additional Context\")\n- [ ] For **federal requests**, the domain request detail page shows \"N/A\" for the CISA region number, and no table lookup is necessary.\n- [ ] The CISA region is not displayed on any user-facing views.\n\n### Additional context\nPlease make this reusable by domain information as well. consider domain helper or other utility that can then take in the org address state/territory abbreviation and returns the number for the region. Regions and state abbreviations haven't changed in a long time and aren't expected to change in the future, they do not need to be in a table and a simple dictionary lookup (while ugly) is probably the simplest solution.\n\nBased on the official two-letter state/territory abbreviation from the organization address, use the data in the following table to determine the region number:\n\n| Region | Locales |\n| ------- | ----- |\n| 1 |Connecticut, Maine, Massachusetts, New Hampshire, Rhode Island, Vermont|\n| 2 |New Jersey, New York, Puerto Rico, U.S. Virgin Islands|\n|3| Delaware, District of Columbia, Maryland, Pennsylvania, Virginia, West Virginia|\n|4| Alabama, Florida, Georgia, Kentucky, Mississippi, North Carolina, South Carolina, Tennessee|\n|5| Illinois, Indiana, Michigan, Minnesota, Ohio, Wisconsin|\n|6| Arkansas, Louisiana, New Mexico, Oklahoma, Texas|\n|7|Iowa, Kansas, Missouri, Nebraska|\n|8|Colorado, Montana, North Dakota, South Dakota, Utah, Wyoming|\n|9|Arizona, California, Hawaii, Nevada, Guam, American Samoa, Commonwealth of the Northern Mariana Islands|\n|10|Alaska, Idaho, Oregon, Washington|\n\nClick Image to open Miro:\n[![Image](https://github.com/cisagov/manage.get.gov/assets/62573986/bca81201-ce20-409b-bb31-10bca552ed23)](https://miro.com/app/board/uXjVKNvtde0=/?moveToWidget=3458764587423408611&cot=14)\n\n### Links to other issues\n\nBlocks #2095 \n", "before_files": [{"content": "import logging\nfrom django import template\nimport re\nfrom registrar.models.domain_request import DomainRequest\n\nregister = template.Library()\nlogger = logging.getLogger(__name__)\n\n\[email protected](name=\"extract_value\")\ndef extract_value(html_input):\n match = re.search(r'value=\"([^\"]*)\"', html_input)\n if match:\n return match.group(1)\n return \"\"\n\n\[email protected]\ndef extract_a_text(value):\n # Use regex to extract the text within the <a> tag\n pattern = r\"<a\\b[^>]*>(.*?)</a>\"\n match = re.search(pattern, value)\n if match:\n extracted_text = match.group(1)\n else:\n extracted_text = \"\"\n\n return extracted_text\n\n\[email protected]\ndef find_index(haystack, needle):\n try:\n return haystack.index(needle)\n except ValueError:\n return -1\n\n\[email protected]\ndef slice_after(value, substring):\n index = value.find(substring)\n if index != -1:\n result = value[index + len(substring) :]\n return result\n return value\n\n\[email protected]\ndef contains_checkbox(html_list):\n for html_string in html_list:\n if re.search(r'<input[^>]*type=\"checkbox\"', html_string):\n return True\n return False\n\n\[email protected]\ndef get_organization_long_name(generic_org_type):\n organization_choices_dict = dict(DomainRequest.OrganizationChoicesVerbose.choices)\n long_form_type = organization_choices_dict[generic_org_type]\n if long_form_type is None:\n logger.error(\"Organization type error, triggered by a template's custom filter\")\n return \"Error\"\n\n return long_form_type\n\n\[email protected](name=\"has_permission\")\ndef has_permission(user, permission):\n return user.has_perm(permission)\n", "path": "src/registrar/templatetags/custom_filters.py"}]}
1,712
597
gh_patches_debug_2096
rasdani/github-patches
git_diff
liqd__a4-product-1097
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> archived projects accessible via activity feed At https://www.beteiligung.in/liqd/ all projects are private but I can see the content of the projects if I click on the activity feed. Even if not signed in. </issue> <code> [start of apps/organisations/views.py] 1 from django.contrib.messages.views import SuccessMessageMixin 2 from django.utils.translation import ugettext_lazy as _ 3 from django.views import generic 4 from django.views.generic import DetailView 5 6 from adhocracy4.actions.models import Action 7 from adhocracy4.projects.models import Project 8 from adhocracy4.rules import mixins as rules_mixins 9 from apps.projects import query 10 11 from . import forms 12 from .models import Organisation 13 14 15 class OrganisationView(DetailView): 16 template_name = 'organisation_landing_page.html' 17 model = Organisation 18 slug_url_kwarg = 'organisation_slug' 19 20 def get_context_data(self, **kwargs): 21 context = super().get_context_data(**kwargs) 22 23 project_list = Project.objects\ 24 .filter(organisation=self.object, 25 is_archived=False, 26 is_draft=False) 27 project_list = query.filter_viewable( 28 project_list, self.request.user 29 ) 30 context['project_list'] = project_list 31 32 context['action_list'] = Action.objects\ 33 .filter(project__organisation=self.object)\ 34 .filter_public()\ 35 .exclude_updates()[:4] 36 37 context['stats'] = { 38 'users': 1204, 39 'items': 3425, 40 'comments': 23234, 41 'ratings': 134234, 42 } 43 44 return context 45 46 47 class InformationView(DetailView): 48 template_name = 'organisation_information.html' 49 model = Organisation 50 slug_url_kwarg = 'organisation_slug' 51 52 53 class ImprintView(DetailView): 54 template_name = 'organisation_imprint.html' 55 model = Organisation 56 slug_url_kwarg = 'organisation_slug' 57 58 59 class OrganisationUpdateView(rules_mixins.PermissionRequiredMixin, 60 SuccessMessageMixin, 61 generic.UpdateView): 62 model = Organisation 63 form_class = forms.OrganisationForm 64 slug_url_kwarg = 'organisation_slug' 65 template_name = 'organisation_form.html' 66 success_message = _('Organisation successfully updated.') 67 permission_required = 'a4_candy_organisations.change_organisation' 68 menu_item = 'organisation' 69 70 def get_success_url(self): 71 return self.request.path 72 [end of apps/organisations/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/organisations/views.py b/apps/organisations/views.py --- a/apps/organisations/views.py +++ b/apps/organisations/views.py @@ -31,6 +31,7 @@ context['action_list'] = Action.objects\ .filter(project__organisation=self.object)\ + .filter(project__is_archived=False) \ .filter_public()\ .exclude_updates()[:4]
{"golden_diff": "diff --git a/apps/organisations/views.py b/apps/organisations/views.py\n--- a/apps/organisations/views.py\n+++ b/apps/organisations/views.py\n@@ -31,6 +31,7 @@\n \n context['action_list'] = Action.objects\\\n .filter(project__organisation=self.object)\\\n+ .filter(project__is_archived=False) \\\n .filter_public()\\\n .exclude_updates()[:4]\n", "issue": "archived projects accessible via activity feed\n At https://www.beteiligung.in/liqd/ all projects are private but I can see the content of the projects if I click on the activity feed. Even if not signed in.\n", "before_files": [{"content": "from django.contrib.messages.views import SuccessMessageMixin\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic import DetailView\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.projects.models import Project\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom apps.projects import query\n\nfrom . import forms\nfrom .models import Organisation\n\n\nclass OrganisationView(DetailView):\n template_name = 'organisation_landing_page.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n project_list = Project.objects\\\n .filter(organisation=self.object,\n is_archived=False,\n is_draft=False)\n project_list = query.filter_viewable(\n project_list, self.request.user\n )\n context['project_list'] = project_list\n\n context['action_list'] = Action.objects\\\n .filter(project__organisation=self.object)\\\n .filter_public()\\\n .exclude_updates()[:4]\n\n context['stats'] = {\n 'users': 1204,\n 'items': 3425,\n 'comments': 23234,\n 'ratings': 134234,\n }\n\n return context\n\n\nclass InformationView(DetailView):\n template_name = 'organisation_information.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n\nclass ImprintView(DetailView):\n template_name = 'organisation_imprint.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n\nclass OrganisationUpdateView(rules_mixins.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = Organisation\n form_class = forms.OrganisationForm\n slug_url_kwarg = 'organisation_slug'\n template_name = 'organisation_form.html'\n success_message = _('Organisation successfully updated.')\n permission_required = 'a4_candy_organisations.change_organisation'\n menu_item = 'organisation'\n\n def get_success_url(self):\n return self.request.path\n", "path": "apps/organisations/views.py"}]}
1,181
94
gh_patches_debug_30641
rasdani/github-patches
git_diff
localstack__localstack-9677
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> bug: StateMachine yaml Definition is not supported (DefinitionString works) ### Is there an existing issue for this? - [X] I have searched the existing issues ### Current Behavior when describing using cloudformation YAML with Definition, LocalStack fails to create StateMachine. Switching to YAML with DefinitionString works as expected. Examples taken from here https://docs.aws.amazon.com/step-functions/latest/dg/development-options.html#development-options-format Attaching working example [localstack_stepfunctions_and_serverless.zip](https://github.com/localstack/localstack/files/7791134/localstack_stepfunctions_and_serverless.z ### Expected Behavior YAML with Definition is much more preferable in a complex state machine description with bunch of substitutions, references and parameters ### How are you starting LocalStack? With the `localstack` script ### Steps To Reproduce #### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`) localstack --debug start -d #### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands) awslocal s3 mb s3://s4-echo-serverless-deployment-share-local sls deploy --stage local ### Environment ```markdown - OS: OSX Monterey - LocalStack: latest ``` ### Anything else? Error when using YAML with Definition [error.txt](https://github.com/localstack/localstack/files/7791149/error.txt) Success when using YAML with DefinitionString [success.txt](https://github.com/localstack/localstack/files/7791151/success.txt) </issue> <code> [start of localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py] 1 # LocalStack Resource Provider Scaffolding v2 2 from __future__ import annotations 3 4 import re 5 from pathlib import Path 6 from typing import Optional, TypedDict 7 8 import localstack.services.cloudformation.provider_utils as util 9 from localstack.services.cloudformation.resource_provider import ( 10 LOG, 11 OperationStatus, 12 ProgressEvent, 13 ResourceProvider, 14 ResourceRequest, 15 ) 16 from localstack.utils.strings import to_str 17 18 19 class StepFunctionsStateMachineProperties(TypedDict): 20 RoleArn: Optional[str] 21 Arn: Optional[str] 22 Definition: Optional[dict] 23 DefinitionS3Location: Optional[S3Location] 24 DefinitionString: Optional[str] 25 DefinitionSubstitutions: Optional[dict] 26 LoggingConfiguration: Optional[LoggingConfiguration] 27 Name: Optional[str] 28 StateMachineName: Optional[str] 29 StateMachineRevisionId: Optional[str] 30 StateMachineType: Optional[str] 31 Tags: Optional[list[TagsEntry]] 32 TracingConfiguration: Optional[TracingConfiguration] 33 34 35 class CloudWatchLogsLogGroup(TypedDict): 36 LogGroupArn: Optional[str] 37 38 39 class LogDestination(TypedDict): 40 CloudWatchLogsLogGroup: Optional[CloudWatchLogsLogGroup] 41 42 43 class LoggingConfiguration(TypedDict): 44 Destinations: Optional[list[LogDestination]] 45 IncludeExecutionData: Optional[bool] 46 Level: Optional[str] 47 48 49 class TracingConfiguration(TypedDict): 50 Enabled: Optional[bool] 51 52 53 class S3Location(TypedDict): 54 Bucket: Optional[str] 55 Key: Optional[str] 56 Version: Optional[str] 57 58 59 class TagsEntry(TypedDict): 60 Key: Optional[str] 61 Value: Optional[str] 62 63 64 REPEATED_INVOCATION = "repeated_invocation" 65 66 67 class StepFunctionsStateMachineProvider(ResourceProvider[StepFunctionsStateMachineProperties]): 68 TYPE = "AWS::StepFunctions::StateMachine" # Autogenerated. Don't change 69 SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change 70 71 def create( 72 self, 73 request: ResourceRequest[StepFunctionsStateMachineProperties], 74 ) -> ProgressEvent[StepFunctionsStateMachineProperties]: 75 """ 76 Create a new resource. 77 78 Primary identifier fields: 79 - /properties/Arn 80 81 Required properties: 82 - RoleArn 83 84 Create-only properties: 85 - /properties/StateMachineName 86 - /properties/StateMachineType 87 88 Read-only properties: 89 - /properties/Arn 90 - /properties/Name 91 - /properties/StateMachineRevisionId 92 93 IAM permissions required: 94 - states:CreateStateMachine 95 - iam:PassRole 96 - s3:GetObject 97 98 """ 99 model = request.desired_state 100 step_function = request.aws_client_factory.stepfunctions 101 102 if not model.get("StateMachineName"): 103 model["StateMachineName"] = util.generate_default_name( 104 stack_name=request.stack_name, logical_resource_id=request.logical_resource_id 105 ) 106 107 params = { 108 "name": model.get("StateMachineName"), 109 "roleArn": model.get("RoleArn"), 110 "type": model.get("StateMachineType", "STANDARD"), 111 } 112 113 # get definition 114 s3_client = request.aws_client_factory.s3 115 116 definition_str = self._get_definition(model, s3_client) 117 118 params["definition"] = definition_str 119 120 response = step_function.create_state_machine(**params) 121 122 model["Arn"] = response["stateMachineArn"] 123 model["Name"] = model["StateMachineName"] 124 125 return ProgressEvent( 126 status=OperationStatus.SUCCESS, 127 resource_model=model, 128 custom_context=request.custom_context, 129 ) 130 131 def _get_definition(self, model, s3_client): 132 definition_str = model.get("DefinitionString") 133 s3_location = model.get("DefinitionS3Location") 134 if not definition_str and s3_location: 135 # TODO: currently not covered by tests - add a test to mimick the behavior of "sam deploy ..." 136 137 LOG.debug("Fetching state machine definition from S3: %s", s3_location) 138 result = s3_client.get_object(Bucket=s3_location["Bucket"], Key=s3_location["Key"]) 139 definition_str = to_str(result["Body"].read()) 140 substitutions = model.get("DefinitionSubstitutions") 141 if substitutions is not None: 142 definition_str = _apply_substitutions(definition_str, substitutions) 143 return definition_str 144 145 def read( 146 self, 147 request: ResourceRequest[StepFunctionsStateMachineProperties], 148 ) -> ProgressEvent[StepFunctionsStateMachineProperties]: 149 """ 150 Fetch resource information 151 152 IAM permissions required: 153 - states:DescribeStateMachine 154 - states:ListTagsForResource 155 """ 156 raise NotImplementedError 157 158 def delete( 159 self, 160 request: ResourceRequest[StepFunctionsStateMachineProperties], 161 ) -> ProgressEvent[StepFunctionsStateMachineProperties]: 162 """ 163 Delete a resource 164 165 IAM permissions required: 166 - states:DeleteStateMachine 167 - states:DescribeStateMachine 168 """ 169 model = request.desired_state 170 step_function = request.aws_client_factory.stepfunctions 171 172 step_function.delete_state_machine(stateMachineArn=model["Arn"]) 173 174 return ProgressEvent( 175 status=OperationStatus.SUCCESS, 176 resource_model=model, 177 custom_context=request.custom_context, 178 ) 179 180 def update( 181 self, 182 request: ResourceRequest[StepFunctionsStateMachineProperties], 183 ) -> ProgressEvent[StepFunctionsStateMachineProperties]: 184 """ 185 Update a resource 186 187 IAM permissions required: 188 - states:UpdateStateMachine 189 - states:TagResource 190 - states:UntagResource 191 - states:ListTagsForResource 192 - iam:PassRole 193 """ 194 model = request.desired_state 195 step_function = request.aws_client_factory.stepfunctions 196 197 if not model.get("Arn"): 198 model["Arn"] = request.previous_state["Arn"] 199 200 params = { 201 "stateMachineArn": model["Arn"], 202 "definition": model["DefinitionString"], 203 } 204 205 step_function.update_state_machine(**params) 206 207 return ProgressEvent( 208 status=OperationStatus.SUCCESS, 209 resource_model=model, 210 custom_context=request.custom_context, 211 ) 212 213 214 def _apply_substitutions(definition: str, substitutions: dict[str, str]) -> str: 215 substitution_regex = re.compile("\\${[a-zA-Z0-9_]+}") # might be a bit too strict in some cases 216 tokens = substitution_regex.findall(definition) 217 result = definition 218 for token in tokens: 219 raw_token = token[2:-1] # strip ${ and } 220 if raw_token not in substitutions.keys(): 221 raise 222 result = result.replace(token, substitutions[raw_token]) 223 224 return result 225 [end of localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py b/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py --- a/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py +++ b/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py @@ -1,6 +1,7 @@ # LocalStack Resource Provider Scaffolding v2 from __future__ import annotations +import json import re from pathlib import Path from typing import Optional, TypedDict @@ -129,14 +130,20 @@ ) def _get_definition(self, model, s3_client): - definition_str = model.get("DefinitionString") - s3_location = model.get("DefinitionS3Location") - if not definition_str and s3_location: + if "DefinitionString" in model: + definition_str = model.get("DefinitionString") + elif "DefinitionS3Location" in model: # TODO: currently not covered by tests - add a test to mimick the behavior of "sam deploy ..." - + s3_location = model.get("DefinitionS3Location") LOG.debug("Fetching state machine definition from S3: %s", s3_location) result = s3_client.get_object(Bucket=s3_location["Bucket"], Key=s3_location["Key"]) definition_str = to_str(result["Body"].read()) + elif "Definition" in model: + definition = model.get("Definition") + definition_str = json.dumps(definition) + else: + definition_str = None + substitutions = model.get("DefinitionSubstitutions") if substitutions is not None: definition_str = _apply_substitutions(definition_str, substitutions)
{"golden_diff": "diff --git a/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py b/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py\n--- a/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py\n+++ b/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py\n@@ -1,6 +1,7 @@\n # LocalStack Resource Provider Scaffolding v2\n from __future__ import annotations\n \n+import json\n import re\n from pathlib import Path\n from typing import Optional, TypedDict\n@@ -129,14 +130,20 @@\n )\n \n def _get_definition(self, model, s3_client):\n- definition_str = model.get(\"DefinitionString\")\n- s3_location = model.get(\"DefinitionS3Location\")\n- if not definition_str and s3_location:\n+ if \"DefinitionString\" in model:\n+ definition_str = model.get(\"DefinitionString\")\n+ elif \"DefinitionS3Location\" in model:\n # TODO: currently not covered by tests - add a test to mimick the behavior of \"sam deploy ...\"\n-\n+ s3_location = model.get(\"DefinitionS3Location\")\n LOG.debug(\"Fetching state machine definition from S3: %s\", s3_location)\n result = s3_client.get_object(Bucket=s3_location[\"Bucket\"], Key=s3_location[\"Key\"])\n definition_str = to_str(result[\"Body\"].read())\n+ elif \"Definition\" in model:\n+ definition = model.get(\"Definition\")\n+ definition_str = json.dumps(definition)\n+ else:\n+ definition_str = None\n+\n substitutions = model.get(\"DefinitionSubstitutions\")\n if substitutions is not None:\n definition_str = _apply_substitutions(definition_str, substitutions)\n", "issue": "bug: StateMachine yaml Definition is not supported (DefinitionString works)\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Behavior\n\nwhen describing using cloudformation YAML with Definition, LocalStack fails to create StateMachine. Switching to YAML with DefinitionString works as expected. \r\n\r\nExamples taken from here\r\nhttps://docs.aws.amazon.com/step-functions/latest/dg/development-options.html#development-options-format\r\n\r\nAttaching working example \r\n[localstack_stepfunctions_and_serverless.zip](https://github.com/localstack/localstack/files/7791134/localstack_stepfunctions_and_serverless.z\r\n\r\n\r\n\n\n### Expected Behavior\n\nYAML with Definition is much more preferable in a complex state machine description with bunch of substitutions, references and parameters\n\n### How are you starting LocalStack?\n\nWith the `localstack` script\n\n### Steps To Reproduce\n\n#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)\r\n\r\n localstack --debug start -d\r\n\r\n#### Client commands (e.g., AWS SDK code snippet, or sequence of \"awslocal\" commands)\r\n\r\n awslocal s3 mb s3://s4-echo-serverless-deployment-share-local\r\nsls deploy --stage local\r\n\n\n### Environment\n\n```markdown\n- OS: OSX Monterey\r\n- LocalStack: latest\n```\n\n\n### Anything else?\n\nError when using YAML with Definition\r\n[error.txt](https://github.com/localstack/localstack/files/7791149/error.txt)\r\n\r\nSuccess when using YAML with DefinitionString\r\n[success.txt](https://github.com/localstack/localstack/files/7791151/success.txt)\r\n\r\n\n", "before_files": [{"content": "# LocalStack Resource Provider Scaffolding v2\nfrom __future__ import annotations\n\nimport re\nfrom pathlib import Path\nfrom typing import Optional, TypedDict\n\nimport localstack.services.cloudformation.provider_utils as util\nfrom localstack.services.cloudformation.resource_provider import (\n LOG,\n OperationStatus,\n ProgressEvent,\n ResourceProvider,\n ResourceRequest,\n)\nfrom localstack.utils.strings import to_str\n\n\nclass StepFunctionsStateMachineProperties(TypedDict):\n RoleArn: Optional[str]\n Arn: Optional[str]\n Definition: Optional[dict]\n DefinitionS3Location: Optional[S3Location]\n DefinitionString: Optional[str]\n DefinitionSubstitutions: Optional[dict]\n LoggingConfiguration: Optional[LoggingConfiguration]\n Name: Optional[str]\n StateMachineName: Optional[str]\n StateMachineRevisionId: Optional[str]\n StateMachineType: Optional[str]\n Tags: Optional[list[TagsEntry]]\n TracingConfiguration: Optional[TracingConfiguration]\n\n\nclass CloudWatchLogsLogGroup(TypedDict):\n LogGroupArn: Optional[str]\n\n\nclass LogDestination(TypedDict):\n CloudWatchLogsLogGroup: Optional[CloudWatchLogsLogGroup]\n\n\nclass LoggingConfiguration(TypedDict):\n Destinations: Optional[list[LogDestination]]\n IncludeExecutionData: Optional[bool]\n Level: Optional[str]\n\n\nclass TracingConfiguration(TypedDict):\n Enabled: Optional[bool]\n\n\nclass S3Location(TypedDict):\n Bucket: Optional[str]\n Key: Optional[str]\n Version: Optional[str]\n\n\nclass TagsEntry(TypedDict):\n Key: Optional[str]\n Value: Optional[str]\n\n\nREPEATED_INVOCATION = \"repeated_invocation\"\n\n\nclass StepFunctionsStateMachineProvider(ResourceProvider[StepFunctionsStateMachineProperties]):\n TYPE = \"AWS::StepFunctions::StateMachine\" # Autogenerated. Don't change\n SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change\n\n def create(\n self,\n request: ResourceRequest[StepFunctionsStateMachineProperties],\n ) -> ProgressEvent[StepFunctionsStateMachineProperties]:\n \"\"\"\n Create a new resource.\n\n Primary identifier fields:\n - /properties/Arn\n\n Required properties:\n - RoleArn\n\n Create-only properties:\n - /properties/StateMachineName\n - /properties/StateMachineType\n\n Read-only properties:\n - /properties/Arn\n - /properties/Name\n - /properties/StateMachineRevisionId\n\n IAM permissions required:\n - states:CreateStateMachine\n - iam:PassRole\n - s3:GetObject\n\n \"\"\"\n model = request.desired_state\n step_function = request.aws_client_factory.stepfunctions\n\n if not model.get(\"StateMachineName\"):\n model[\"StateMachineName\"] = util.generate_default_name(\n stack_name=request.stack_name, logical_resource_id=request.logical_resource_id\n )\n\n params = {\n \"name\": model.get(\"StateMachineName\"),\n \"roleArn\": model.get(\"RoleArn\"),\n \"type\": model.get(\"StateMachineType\", \"STANDARD\"),\n }\n\n # get definition\n s3_client = request.aws_client_factory.s3\n\n definition_str = self._get_definition(model, s3_client)\n\n params[\"definition\"] = definition_str\n\n response = step_function.create_state_machine(**params)\n\n model[\"Arn\"] = response[\"stateMachineArn\"]\n model[\"Name\"] = model[\"StateMachineName\"]\n\n return ProgressEvent(\n status=OperationStatus.SUCCESS,\n resource_model=model,\n custom_context=request.custom_context,\n )\n\n def _get_definition(self, model, s3_client):\n definition_str = model.get(\"DefinitionString\")\n s3_location = model.get(\"DefinitionS3Location\")\n if not definition_str and s3_location:\n # TODO: currently not covered by tests - add a test to mimick the behavior of \"sam deploy ...\"\n\n LOG.debug(\"Fetching state machine definition from S3: %s\", s3_location)\n result = s3_client.get_object(Bucket=s3_location[\"Bucket\"], Key=s3_location[\"Key\"])\n definition_str = to_str(result[\"Body\"].read())\n substitutions = model.get(\"DefinitionSubstitutions\")\n if substitutions is not None:\n definition_str = _apply_substitutions(definition_str, substitutions)\n return definition_str\n\n def read(\n self,\n request: ResourceRequest[StepFunctionsStateMachineProperties],\n ) -> ProgressEvent[StepFunctionsStateMachineProperties]:\n \"\"\"\n Fetch resource information\n\n IAM permissions required:\n - states:DescribeStateMachine\n - states:ListTagsForResource\n \"\"\"\n raise NotImplementedError\n\n def delete(\n self,\n request: ResourceRequest[StepFunctionsStateMachineProperties],\n ) -> ProgressEvent[StepFunctionsStateMachineProperties]:\n \"\"\"\n Delete a resource\n\n IAM permissions required:\n - states:DeleteStateMachine\n - states:DescribeStateMachine\n \"\"\"\n model = request.desired_state\n step_function = request.aws_client_factory.stepfunctions\n\n step_function.delete_state_machine(stateMachineArn=model[\"Arn\"])\n\n return ProgressEvent(\n status=OperationStatus.SUCCESS,\n resource_model=model,\n custom_context=request.custom_context,\n )\n\n def update(\n self,\n request: ResourceRequest[StepFunctionsStateMachineProperties],\n ) -> ProgressEvent[StepFunctionsStateMachineProperties]:\n \"\"\"\n Update a resource\n\n IAM permissions required:\n - states:UpdateStateMachine\n - states:TagResource\n - states:UntagResource\n - states:ListTagsForResource\n - iam:PassRole\n \"\"\"\n model = request.desired_state\n step_function = request.aws_client_factory.stepfunctions\n\n if not model.get(\"Arn\"):\n model[\"Arn\"] = request.previous_state[\"Arn\"]\n\n params = {\n \"stateMachineArn\": model[\"Arn\"],\n \"definition\": model[\"DefinitionString\"],\n }\n\n step_function.update_state_machine(**params)\n\n return ProgressEvent(\n status=OperationStatus.SUCCESS,\n resource_model=model,\n custom_context=request.custom_context,\n )\n\n\ndef _apply_substitutions(definition: str, substitutions: dict[str, str]) -> str:\n substitution_regex = re.compile(\"\\\\${[a-zA-Z0-9_]+}\") # might be a bit too strict in some cases\n tokens = substitution_regex.findall(definition)\n result = definition\n for token in tokens:\n raw_token = token[2:-1] # strip ${ and }\n if raw_token not in substitutions.keys():\n raise\n result = result.replace(token, substitutions[raw_token])\n\n return result\n", "path": "localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py"}]}
2,895
384
gh_patches_debug_28588
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-920
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Errors when using Fn::Transform *cfn-lint version: (`0.20.2`)* Given the following valid template, I see 2 errors: ```yaml Parameters: ImageId: Type: AWS::EC2::Image::Id InstanceType: Type: String Resources: LaunchConfiguration: Type: AWS::AutoScaling::LaunchConfiguration Properties: ImageId: !Ref ImageId InstanceType: !Ref InstanceType UserData: 'Fn::Base64': 'Fn::Sub': 'Fn::Transform': # Returns a string that contains Fn::Sub tokens like ${AWS::Region} Name: DynamicUserData ``` * `E1019 Sub should be a string or array of 2 items for Resources/LaunchConfiguration/Properties/UserData/Fn::Base64/Fn::Sub` - `Fn::Transform` can return a string or a template, so `Fn::Sub` should be forgiving of it. * `E0002 Unknown exception while processing rule W2001: expected string or bytes-like object` Same root cause, but fails in a different way due to assumption that it complies with E1019 in a specific way. </issue> <code> [start of src/cfnlint/rules/functions/Sub.py] 1 """ 2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 import six 18 from cfnlint import CloudFormationLintRule 19 from cfnlint import RuleMatch 20 21 22 class Sub(CloudFormationLintRule): 23 """Check if Sub values are correct""" 24 id = 'E1019' 25 shortdesc = 'Sub validation of parameters' 26 description = 'Making sure the sub function is properly configured' 27 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html' 28 tags = ['functions', 'sub'] 29 30 def _test_string(self, cfn, sub_string, parameters, tree): 31 """Test if a string has appropriate parameters""" 32 33 matches = [] 34 string_params = cfn.get_sub_parameters(sub_string) 35 36 for string_param in string_params: 37 if isinstance(string_param, (six.string_types)): 38 matches.extend(self._test_parameter(string_param, cfn, parameters, tree)) 39 40 return matches 41 42 def _get_parameters(self, cfn): 43 """Get all Parameter Names""" 44 results = {} 45 parameters = cfn.template.get('Parameters', {}) 46 if isinstance(parameters, dict): 47 for param_name, param_values in parameters.items(): 48 # This rule isn't here to check the Types but we need 49 # something valid if it doesn't exist 50 results[param_name] = param_values.get('Type', 'String') 51 52 return results 53 54 def _test_parameters(self, parameters, cfn, tree): 55 """Check parameters for appropriate configuration""" 56 57 supported_functions = [ 58 'Fn::Base64', 59 'Fn::FindInMap', 60 'Fn::GetAtt', 61 'Fn::GetAZs', 62 'Fn::ImportValue', 63 'Fn::If', 64 'Fn::Join', 65 'Fn::Select', 66 'Fn::Sub', 67 'Ref' 68 ] 69 70 matches = [] 71 for parameter_name, parameter_value_obj in parameters.items(): 72 param_tree = tree[:] + [parameter_name] 73 if isinstance(parameter_value_obj, dict): 74 if len(parameter_value_obj) == 1: 75 for key, value in parameter_value_obj.items(): 76 if key not in supported_functions: 77 message = 'Sub parameter should use a valid function for {0}' 78 matches.append(RuleMatch( 79 param_tree, message.format('/'.join(map(str, tree))))) 80 elif key in ['Ref']: 81 matches.extend(self._test_parameter(value, cfn, {}, tree)) 82 elif key in ['Fn::GetAtt']: 83 if isinstance(value, list): 84 matches.extend(self._test_parameter('.'.join(value), cfn, {}, tree)) 85 elif isinstance(value, six.string_types): 86 matches.extend(self._test_parameter(value, cfn, {}, tree)) 87 else: 88 message = 'Sub parameter should be an object of 1 for {0}' 89 matches.append(RuleMatch( 90 param_tree, message.format('/'.join(map(str, tree))))) 91 elif not isinstance(parameter_value_obj, six.string_types): 92 message = 'Sub parameter should be an object of 1 or string for {0}' 93 matches.append(RuleMatch( 94 param_tree, message.format('/'.join(map(str, tree))))) 95 96 return matches 97 98 def _test_parameter(self, parameter, cfn, parameters, tree): 99 """ Test a parameter """ 100 101 matches = [] 102 get_atts = cfn.get_valid_getatts() 103 104 valid_pseudo_params = [ 105 'AWS::Region', 106 'AWS::StackName', 107 'AWS::URLSuffix', 108 'AWS::StackId', 109 'AWS::Region', 110 'AWS::Partition', 111 'AWS::NotificationARNs', 112 'AWS::AccountId' 113 ] 114 115 odd_list_params = [ 116 'CommaDelimitedList', 117 'AWS::SSM::Parameter::Value<CommaDelimitedList>', 118 ] 119 120 valid_params = valid_pseudo_params 121 valid_params.extend(cfn.get_resource_names()) 122 template_parameters = self._get_parameters(cfn) 123 124 for key, _ in parameters.items(): 125 valid_params.append(key) 126 127 if parameter not in valid_params: 128 found = False 129 if parameter in template_parameters: 130 found = True 131 if ( 132 template_parameters.get(parameter) in odd_list_params or 133 template_parameters.get(parameter).startswith('AWS::SSM::Parameter::Value<List') or 134 template_parameters.get(parameter).startswith('List')): 135 message = 'Fn::Sub cannot use list {0} at {1}' 136 matches.append(RuleMatch( 137 tree, message.format(parameter, '/'.join(map(str, tree))))) 138 for resource, attributes in get_atts.items(): 139 for attribute_name, attribute_values in attributes.items(): 140 if resource == parameter.split('.')[0] and attribute_name == '*': 141 if attribute_values.get('Type') == 'List': 142 message = 'Fn::Sub cannot use list {0} at {1}' 143 matches.append(RuleMatch( 144 tree, message.format(parameter, '/'.join(map(str, tree))))) 145 found = True 146 elif (resource == parameter.split('.')[0] and 147 attribute_name == '.'.join(parameter.split('.')[1:])): 148 if attribute_values.get('Type') == 'List': 149 message = 'Fn::Sub cannot use list {0} at {1}' 150 matches.append(RuleMatch( 151 tree, message.format(parameter, '/'.join(map(str, tree))))) 152 found = True 153 if not found: 154 message = 'Parameter {0} for Fn::Sub not found at {1}' 155 matches.append(RuleMatch( 156 tree, message.format(parameter, '/'.join(map(str, tree))))) 157 158 return matches 159 160 def match(self, cfn): 161 """Check CloudFormation Join""" 162 163 matches = [] 164 165 sub_objs = cfn.search_deep_keys('Fn::Sub') 166 167 for sub_obj in sub_objs: 168 sub_value_obj = sub_obj[-1] 169 tree = sub_obj[:-1] 170 if isinstance(sub_value_obj, six.string_types): 171 matches.extend(self._test_string(cfn, sub_value_obj, {}, tree)) 172 elif isinstance(sub_value_obj, list): 173 if len(sub_value_obj) == 2: 174 sub_string = sub_value_obj[0] 175 parameters = sub_value_obj[1] 176 if not isinstance(sub_string, six.string_types): 177 message = 'Subs first element should be of type string for {0}' 178 matches.append(RuleMatch( 179 tree + [0], message.format('/'.join(map(str, tree))))) 180 if not isinstance(parameters, dict): 181 message = 'Subs second element should be an object for {0}' 182 matches.append(RuleMatch( 183 tree + [1], message.format('/'.join(map(str, tree))))) 184 else: 185 matches.extend(self._test_string(cfn, sub_string, parameters, tree + [0])) 186 matches.extend(self._test_parameters(parameters, cfn, tree)) 187 else: 188 message = 'Sub should be an array of 2 for {0}' 189 matches.append(RuleMatch( 190 tree, message.format('/'.join(map(str, tree))))) 191 else: 192 message = 'Sub should be a string or array of 2 items for {0}' 193 matches.append(RuleMatch( 194 tree, message.format('/'.join(map(str, tree))))) 195 196 return matches 197 [end of src/cfnlint/rules/functions/Sub.py] [start of src/cfnlint/rules/parameters/Used.py] 1 """ 2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 from __future__ import unicode_literals 18 import re 19 import six 20 from cfnlint import CloudFormationLintRule 21 from cfnlint import RuleMatch 22 23 24 class Used(CloudFormationLintRule): 25 """Check if Parameters are used""" 26 id = 'W2001' 27 shortdesc = 'Check if Parameters are Used' 28 description = 'Making sure the parameters defined are used' 29 source_url = 'https://github.com/aws-cloudformation/cfn-python-lint' 30 tags = ['parameters'] 31 32 def searchstring(self, string, parameter): 33 """Search string for tokenized fields""" 34 regex = re.compile(r'\${(%s)}' % parameter) 35 return regex.findall(string) 36 37 def isparaminref(self, subs, parameter): 38 """Search sub strings for parameters""" 39 for sub in subs: 40 if isinstance(sub, (six.text_type, six.string_types)): 41 if self.searchstring(sub, parameter): 42 return True 43 44 return False 45 46 def match(self, cfn): 47 """Check CloudFormation Parameters""" 48 49 matches = [] 50 51 reftrees = cfn.transform_pre.get('Ref') 52 subtrees = cfn.transform_pre.get('Fn::Sub') 53 refs = [] 54 for reftree in reftrees: 55 refs.append(reftree[-1]) 56 subs = [] 57 for subtree in subtrees: 58 if isinstance(subtree[-1], list): 59 subs.extend(cfn.get_sub_parameters(subtree[-1][0])) 60 else: 61 subs.extend(cfn.get_sub_parameters(subtree[-1])) 62 63 for paramname, _ in cfn.get_parameters().items(): 64 if paramname not in refs: 65 if paramname not in subs: 66 message = 'Parameter {0} not used.' 67 matches.append(RuleMatch( 68 ['Parameters', paramname], 69 message.format(paramname) 70 )) 71 72 return matches 73 [end of src/cfnlint/rules/parameters/Used.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/functions/Sub.py b/src/cfnlint/rules/functions/Sub.py --- a/src/cfnlint/rules/functions/Sub.py +++ b/src/cfnlint/rules/functions/Sub.py @@ -188,6 +188,17 @@ message = 'Sub should be an array of 2 for {0}' matches.append(RuleMatch( tree, message.format('/'.join(map(str, tree))))) + elif isinstance(sub_value_obj, dict): + if len(sub_value_obj) == 1: + for key, _ in sub_value_obj.items(): + if not key == 'Fn::Transform': + message = 'Sub should be a string or array of 2 items for {0}' + matches.append(RuleMatch( + tree, message.format('/'.join(map(str, tree))))) + else: + message = 'Sub should be a string or array of 2 items for {0}' + matches.append(RuleMatch( + tree, message.format('/'.join(map(str, tree))))) else: message = 'Sub should be a string or array of 2 items for {0}' matches.append(RuleMatch( diff --git a/src/cfnlint/rules/parameters/Used.py b/src/cfnlint/rules/parameters/Used.py --- a/src/cfnlint/rules/parameters/Used.py +++ b/src/cfnlint/rules/parameters/Used.py @@ -57,7 +57,7 @@ for subtree in subtrees: if isinstance(subtree[-1], list): subs.extend(cfn.get_sub_parameters(subtree[-1][0])) - else: + elif isinstance(subtree[-1], six.string_types): subs.extend(cfn.get_sub_parameters(subtree[-1])) for paramname, _ in cfn.get_parameters().items():
{"golden_diff": "diff --git a/src/cfnlint/rules/functions/Sub.py b/src/cfnlint/rules/functions/Sub.py\n--- a/src/cfnlint/rules/functions/Sub.py\n+++ b/src/cfnlint/rules/functions/Sub.py\n@@ -188,6 +188,17 @@\n message = 'Sub should be an array of 2 for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n+ elif isinstance(sub_value_obj, dict):\n+ if len(sub_value_obj) == 1:\n+ for key, _ in sub_value_obj.items():\n+ if not key == 'Fn::Transform':\n+ message = 'Sub should be a string or array of 2 items for {0}'\n+ matches.append(RuleMatch(\n+ tree, message.format('/'.join(map(str, tree)))))\n+ else:\n+ message = 'Sub should be a string or array of 2 items for {0}'\n+ matches.append(RuleMatch(\n+ tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Sub should be a string or array of 2 items for {0}'\n matches.append(RuleMatch(\ndiff --git a/src/cfnlint/rules/parameters/Used.py b/src/cfnlint/rules/parameters/Used.py\n--- a/src/cfnlint/rules/parameters/Used.py\n+++ b/src/cfnlint/rules/parameters/Used.py\n@@ -57,7 +57,7 @@\n for subtree in subtrees:\n if isinstance(subtree[-1], list):\n subs.extend(cfn.get_sub_parameters(subtree[-1][0]))\n- else:\n+ elif isinstance(subtree[-1], six.string_types):\n subs.extend(cfn.get_sub_parameters(subtree[-1]))\n \n for paramname, _ in cfn.get_parameters().items():\n", "issue": "Errors when using Fn::Transform\n*cfn-lint version: (`0.20.2`)*\r\n\r\nGiven the following valid template, I see 2 errors:\r\n\r\n```yaml\r\nParameters:\r\n ImageId:\r\n Type: AWS::EC2::Image::Id\r\n InstanceType:\r\n Type: String\r\n\r\nResources:\r\n LaunchConfiguration:\r\n Type: AWS::AutoScaling::LaunchConfiguration\r\n Properties:\r\n ImageId: !Ref ImageId\r\n InstanceType: !Ref InstanceType\r\n UserData:\r\n 'Fn::Base64':\r\n 'Fn::Sub':\r\n 'Fn::Transform': # Returns a string that contains Fn::Sub tokens like ${AWS::Region}\r\n Name: DynamicUserData\r\n```\r\n* `E1019 Sub should be a string or array of 2 items for Resources/LaunchConfiguration/Properties/UserData/Fn::Base64/Fn::Sub` - `Fn::Transform` can return a string or a template, so `Fn::Sub` should be forgiving of it.\r\n* `E0002 Unknown exception while processing rule W2001: expected string or bytes-like object` Same root cause, but fails in a different way due to assumption that it complies with E1019 in a specific way.\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Sub(CloudFormationLintRule):\n \"\"\"Check if Sub values are correct\"\"\"\n id = 'E1019'\n shortdesc = 'Sub validation of parameters'\n description = 'Making sure the sub function is properly configured'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n def _test_string(self, cfn, sub_string, parameters, tree):\n \"\"\"Test if a string has appropriate parameters\"\"\"\n\n matches = []\n string_params = cfn.get_sub_parameters(sub_string)\n\n for string_param in string_params:\n if isinstance(string_param, (six.string_types)):\n matches.extend(self._test_parameter(string_param, cfn, parameters, tree))\n\n return matches\n\n def _get_parameters(self, cfn):\n \"\"\"Get all Parameter Names\"\"\"\n results = {}\n parameters = cfn.template.get('Parameters', {})\n if isinstance(parameters, dict):\n for param_name, param_values in parameters.items():\n # This rule isn't here to check the Types but we need\n # something valid if it doesn't exist\n results[param_name] = param_values.get('Type', 'String')\n\n return results\n\n def _test_parameters(self, parameters, cfn, tree):\n \"\"\"Check parameters for appropriate configuration\"\"\"\n\n supported_functions = [\n 'Fn::Base64',\n 'Fn::FindInMap',\n 'Fn::GetAtt',\n 'Fn::GetAZs',\n 'Fn::ImportValue',\n 'Fn::If',\n 'Fn::Join',\n 'Fn::Select',\n 'Fn::Sub',\n 'Ref'\n ]\n\n matches = []\n for parameter_name, parameter_value_obj in parameters.items():\n param_tree = tree[:] + [parameter_name]\n if isinstance(parameter_value_obj, dict):\n if len(parameter_value_obj) == 1:\n for key, value in parameter_value_obj.items():\n if key not in supported_functions:\n message = 'Sub parameter should use a valid function for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n elif key in ['Ref']:\n matches.extend(self._test_parameter(value, cfn, {}, tree))\n elif key in ['Fn::GetAtt']:\n if isinstance(value, list):\n matches.extend(self._test_parameter('.'.join(value), cfn, {}, tree))\n elif isinstance(value, six.string_types):\n matches.extend(self._test_parameter(value, cfn, {}, tree))\n else:\n message = 'Sub parameter should be an object of 1 for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n elif not isinstance(parameter_value_obj, six.string_types):\n message = 'Sub parameter should be an object of 1 or string for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n\n return matches\n\n def _test_parameter(self, parameter, cfn, parameters, tree):\n \"\"\" Test a parameter \"\"\"\n\n matches = []\n get_atts = cfn.get_valid_getatts()\n\n valid_pseudo_params = [\n 'AWS::Region',\n 'AWS::StackName',\n 'AWS::URLSuffix',\n 'AWS::StackId',\n 'AWS::Region',\n 'AWS::Partition',\n 'AWS::NotificationARNs',\n 'AWS::AccountId'\n ]\n\n odd_list_params = [\n 'CommaDelimitedList',\n 'AWS::SSM::Parameter::Value<CommaDelimitedList>',\n ]\n\n valid_params = valid_pseudo_params\n valid_params.extend(cfn.get_resource_names())\n template_parameters = self._get_parameters(cfn)\n\n for key, _ in parameters.items():\n valid_params.append(key)\n\n if parameter not in valid_params:\n found = False\n if parameter in template_parameters:\n found = True\n if (\n template_parameters.get(parameter) in odd_list_params or\n template_parameters.get(parameter).startswith('AWS::SSM::Parameter::Value<List') or\n template_parameters.get(parameter).startswith('List')):\n message = 'Fn::Sub cannot use list {0} at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n for resource, attributes in get_atts.items():\n for attribute_name, attribute_values in attributes.items():\n if resource == parameter.split('.')[0] and attribute_name == '*':\n if attribute_values.get('Type') == 'List':\n message = 'Fn::Sub cannot use list {0} at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n found = True\n elif (resource == parameter.split('.')[0] and\n attribute_name == '.'.join(parameter.split('.')[1:])):\n if attribute_values.get('Type') == 'List':\n message = 'Fn::Sub cannot use list {0} at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n found = True\n if not found:\n message = 'Parameter {0} for Fn::Sub not found at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Join\"\"\"\n\n matches = []\n\n sub_objs = cfn.search_deep_keys('Fn::Sub')\n\n for sub_obj in sub_objs:\n sub_value_obj = sub_obj[-1]\n tree = sub_obj[:-1]\n if isinstance(sub_value_obj, six.string_types):\n matches.extend(self._test_string(cfn, sub_value_obj, {}, tree))\n elif isinstance(sub_value_obj, list):\n if len(sub_value_obj) == 2:\n sub_string = sub_value_obj[0]\n parameters = sub_value_obj[1]\n if not isinstance(sub_string, six.string_types):\n message = 'Subs first element should be of type string for {0}'\n matches.append(RuleMatch(\n tree + [0], message.format('/'.join(map(str, tree)))))\n if not isinstance(parameters, dict):\n message = 'Subs second element should be an object for {0}'\n matches.append(RuleMatch(\n tree + [1], message.format('/'.join(map(str, tree)))))\n else:\n matches.extend(self._test_string(cfn, sub_string, parameters, tree + [0]))\n matches.extend(self._test_parameters(parameters, cfn, tree))\n else:\n message = 'Sub should be an array of 2 for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Sub should be a string or array of 2 items for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n\n return matches\n", "path": "src/cfnlint/rules/functions/Sub.py"}, {"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom __future__ import unicode_literals\nimport re\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Used(CloudFormationLintRule):\n \"\"\"Check if Parameters are used\"\"\"\n id = 'W2001'\n shortdesc = 'Check if Parameters are Used'\n description = 'Making sure the parameters defined are used'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['parameters']\n\n def searchstring(self, string, parameter):\n \"\"\"Search string for tokenized fields\"\"\"\n regex = re.compile(r'\\${(%s)}' % parameter)\n return regex.findall(string)\n\n def isparaminref(self, subs, parameter):\n \"\"\"Search sub strings for parameters\"\"\"\n for sub in subs:\n if isinstance(sub, (six.text_type, six.string_types)):\n if self.searchstring(sub, parameter):\n return True\n\n return False\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n\n matches = []\n\n reftrees = cfn.transform_pre.get('Ref')\n subtrees = cfn.transform_pre.get('Fn::Sub')\n refs = []\n for reftree in reftrees:\n refs.append(reftree[-1])\n subs = []\n for subtree in subtrees:\n if isinstance(subtree[-1], list):\n subs.extend(cfn.get_sub_parameters(subtree[-1][0]))\n else:\n subs.extend(cfn.get_sub_parameters(subtree[-1]))\n\n for paramname, _ in cfn.get_parameters().items():\n if paramname not in refs:\n if paramname not in subs:\n message = 'Parameter {0} not used.'\n matches.append(RuleMatch(\n ['Parameters', paramname],\n message.format(paramname)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/parameters/Used.py"}]}
3,807
398
gh_patches_debug_33794
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2634
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider motel6 is broken During the global build at 2021-08-18-14-42-26, spider **motel6** failed with **0 features** and **2 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/motel6.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/motel6.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/motel6.geojson)) </issue> <code> [start of locations/spiders/motel6.py] 1 # -*- coding: utf-8 -*- 2 import scrapy 3 import json 4 from locations.items import GeojsonPointItem 5 6 brand_lookup = { 7 "MS": "Motel 6", 8 "SS": "Studio 6", 9 "HS": "Hotel 6" 10 } 11 12 13 class Motel6Spider(scrapy.Spider): 14 name = "motel6" 15 allowed_domains = ["motel6.com"] 16 start_urls = ( 17 'https://www.motel6.com/var/g6/hotel-summary/ms.infinity.1.json', 18 ) 19 20 def parse(self, response): 21 idata = json.loads(response.body_as_unicode()) 22 storeids = idata.keys() 23 URL = 'https://www.motel6.com/var/g6/hotel-information/en/{}.json' 24 for storeid in storeids: 25 try: 26 int(storeid) 27 except ValueError: 28 continue 29 try: 30 yield scrapy.Request(URL.format(storeid), callback=self.parse_hotel) 31 except ValueError: 32 continue 33 34 def parse_hotel(self, response): 35 mdata = json.loads(response.body_as_unicode()) 36 37 properties = { 38 'ref': mdata["property_id"], 39 'name': mdata["name"], 40 'addr_full': mdata["address"], 41 'city': mdata["city"], 42 'postcode': mdata["zip"], 43 'lat': mdata["latitude"], 44 'lon': mdata["longitude"], 45 'phone': mdata["phone"], 46 'state': mdata["state"], 47 'website': mdata["microsite_url"], 48 'brand': brand_lookup[mdata["brand_id"]], 49 } 50 51 yield GeojsonPointItem(**properties) 52 [end of locations/spiders/motel6.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/motel6.py b/locations/spiders/motel6.py --- a/locations/spiders/motel6.py +++ b/locations/spiders/motel6.py @@ -14,20 +14,21 @@ name = "motel6" allowed_domains = ["motel6.com"] start_urls = ( - 'https://www.motel6.com/var/g6/hotel-summary/ms.infinity.1.json', + 'https://www.motel6.com/content/g6-cache/property-summary.1.json', ) def parse(self, response): idata = json.loads(response.body_as_unicode()) - storeids = idata.keys() - URL = 'https://www.motel6.com/var/g6/hotel-information/en/{}.json' - for storeid in storeids: + url = 'https://www.motel6.com/bin/g6/propertydata.{}.json' + + for storeid in idata.keys(): try: int(storeid) except ValueError: continue + try: - yield scrapy.Request(URL.format(storeid), callback=self.parse_hotel) + yield scrapy.Request(url.format(storeid), callback=self.parse_hotel) except ValueError: continue @@ -35,17 +36,17 @@ mdata = json.loads(response.body_as_unicode()) properties = { - 'ref': mdata["property_id"], - 'name': mdata["name"], - 'addr_full': mdata["address"], - 'city': mdata["city"], - 'postcode': mdata["zip"], - 'lat': mdata["latitude"], - 'lon': mdata["longitude"], - 'phone': mdata["phone"], - 'state': mdata["state"], - 'website': mdata["microsite_url"], - 'brand': brand_lookup[mdata["brand_id"]], + 'ref': mdata["property_id"], + 'name': mdata["name"], + 'addr_full': mdata["address"], + 'city': mdata["city"], + 'postcode': mdata["zip"], + 'lat': mdata["latitude"], + 'lon': mdata["longitude"], + 'phone': mdata["phone"], + 'state': mdata["state"], + 'website': mdata["microsite_url"], + 'brand': brand_lookup[mdata["brand_id"]], } yield GeojsonPointItem(**properties)
{"golden_diff": "diff --git a/locations/spiders/motel6.py b/locations/spiders/motel6.py\n--- a/locations/spiders/motel6.py\n+++ b/locations/spiders/motel6.py\n@@ -14,20 +14,21 @@\n name = \"motel6\"\n allowed_domains = [\"motel6.com\"]\n start_urls = (\n- 'https://www.motel6.com/var/g6/hotel-summary/ms.infinity.1.json',\n+ 'https://www.motel6.com/content/g6-cache/property-summary.1.json',\n )\n \n def parse(self, response):\n idata = json.loads(response.body_as_unicode())\n- storeids = idata.keys()\n- URL = 'https://www.motel6.com/var/g6/hotel-information/en/{}.json'\n- for storeid in storeids:\n+ url = 'https://www.motel6.com/bin/g6/propertydata.{}.json'\n+\n+ for storeid in idata.keys():\n try:\n int(storeid)\n except ValueError:\n continue\n+\n try:\n- yield scrapy.Request(URL.format(storeid), callback=self.parse_hotel)\n+ yield scrapy.Request(url.format(storeid), callback=self.parse_hotel)\n except ValueError:\n continue\n \n@@ -35,17 +36,17 @@\n mdata = json.loads(response.body_as_unicode())\n \n properties = {\n- 'ref': mdata[\"property_id\"],\n- 'name': mdata[\"name\"],\n- 'addr_full': mdata[\"address\"],\n- 'city': mdata[\"city\"],\n- 'postcode': mdata[\"zip\"],\n- 'lat': mdata[\"latitude\"],\n- 'lon': mdata[\"longitude\"],\n- 'phone': mdata[\"phone\"],\n- 'state': mdata[\"state\"],\n- 'website': mdata[\"microsite_url\"],\n- 'brand': brand_lookup[mdata[\"brand_id\"]],\n+ 'ref': mdata[\"property_id\"],\n+ 'name': mdata[\"name\"],\n+ 'addr_full': mdata[\"address\"],\n+ 'city': mdata[\"city\"],\n+ 'postcode': mdata[\"zip\"],\n+ 'lat': mdata[\"latitude\"],\n+ 'lon': mdata[\"longitude\"],\n+ 'phone': mdata[\"phone\"],\n+ 'state': mdata[\"state\"],\n+ 'website': mdata[\"microsite_url\"],\n+ 'brand': brand_lookup[mdata[\"brand_id\"]],\n }\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider motel6 is broken\nDuring the global build at 2021-08-18-14-42-26, spider **motel6** failed with **0 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/motel6.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/motel6.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/motel6.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom locations.items import GeojsonPointItem\n\nbrand_lookup = {\n \"MS\": \"Motel 6\",\n \"SS\": \"Studio 6\",\n \"HS\": \"Hotel 6\"\n}\n\n\nclass Motel6Spider(scrapy.Spider):\n name = \"motel6\"\n allowed_domains = [\"motel6.com\"]\n start_urls = (\n 'https://www.motel6.com/var/g6/hotel-summary/ms.infinity.1.json',\n )\n\n def parse(self, response):\n idata = json.loads(response.body_as_unicode())\n storeids = idata.keys()\n URL = 'https://www.motel6.com/var/g6/hotel-information/en/{}.json'\n for storeid in storeids:\n try:\n int(storeid)\n except ValueError:\n continue\n try:\n yield scrapy.Request(URL.format(storeid), callback=self.parse_hotel)\n except ValueError:\n continue\n\n def parse_hotel(self, response):\n mdata = json.loads(response.body_as_unicode())\n\n properties = {\n 'ref': mdata[\"property_id\"],\n 'name': mdata[\"name\"],\n 'addr_full': mdata[\"address\"],\n 'city': mdata[\"city\"],\n 'postcode': mdata[\"zip\"],\n 'lat': mdata[\"latitude\"],\n 'lon': mdata[\"longitude\"],\n 'phone': mdata[\"phone\"],\n 'state': mdata[\"state\"],\n 'website': mdata[\"microsite_url\"],\n 'brand': brand_lookup[mdata[\"brand_id\"]],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/motel6.py"}]}
1,174
553
gh_patches_debug_42319
rasdani/github-patches
git_diff
AlexsLemonade__refinebio-3385
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Weekly stats shows 0 downloads for some users ### Context A lot of changes has gone into prod recently. One of them is the EngagementBot weekly stats updates. ### Problem or idea The most recent summary contains lines like "0 downloads from " indicating some (potential) stats inaccuracy. ### Solution or next step Figure out why the value is empty and fix the issue. If everything is right then just hide those 0 downloads items. </issue> <code> [start of api/data_refinery_api/management/commands/post_downloads_summary.py] 1 import datetime 2 from collections import Counter 3 4 from django.conf import settings 5 from django.core.management.base import BaseCommand 6 from django.template.defaultfilters import pluralize 7 from django.utils import timezone 8 9 import requests 10 11 from data_refinery_common.models import Dataset, DatasetAnnotation 12 13 14 class Command(BaseCommand): 15 help = "Post downloads summary to Slack" 16 17 def add_arguments(self, parser): 18 parser.add_argument( 19 "--channel", 20 type=str, 21 default="ccdl-general", 22 help=("Optional parameter to choose the channel where the message will be posted."), 23 ) 24 parser.add_argument( 25 "--days", 26 type=int, 27 default=7, # Default to a week. 28 help=("Number of days in the past for which to build the stats."), 29 ) 30 parser.add_argument( 31 "--top-countries", 32 type=int, 33 default=5, 34 help=("Number of countries to show in the per country downloads summary."), 35 ) 36 37 def handle(self, *args, **options): 38 post_downloads_summary(options["days"], options["channel"], options["top_countries"]) 39 40 41 def format_user_data(header, data): 42 """ 43 Formats user email, downloads count, location information sorted 44 by downloads count. 45 """ 46 # Allowed overhead for 2 column sorting: downloads count, email. 47 lines = sorted(data, key=lambda u: u[0].lower()) 48 lines = [ 49 f"{email.lower()} | {downloads} download{pluralize(downloads)} from {location}" 50 for email, downloads, location in sorted(lines, key=lambda u: u[1], reverse=True) 51 ] 52 lines.insert(0, header) 53 54 return "\n".join(lines) 55 56 57 def get_user_location(ip_address): 58 """Gets user location information based on their IP address.""" 59 try: 60 data = requests.get(f"https://ipapi.co/{ip_address}/json/", timeout=10).json() 61 # The list of available fields https://ipapi.co/api/#complete-location 62 return ", ".join((data["city"], data["country_name"])) 63 except (requests.exceptions.RequestException, KeyError, ValueError): 64 return ip_address 65 66 67 def post_downloads_summary(days, channel, top_countries=5): 68 """Posts downloads summary to Slack channel.""" 69 70 start_time = timezone.now() - datetime.timedelta(days=days) 71 datasets = Dataset.processed_filtered_objects.filter( 72 created_at__gt=start_time 73 ).prefetch_related("datasetannotation_set") 74 annotations = DatasetAnnotation.objects.filter(dataset__in=datasets) 75 users_emails = set(dataset.email_address for dataset in datasets) 76 77 locations = set() 78 locations_cache = dict() 79 for annotation in annotations: 80 if "location" not in annotation.data: 81 ip_address = annotation.data["ip"] 82 if ip_address not in locations_cache: 83 locations_cache[ip_address] = get_user_location(ip_address) 84 85 # Save the locations permanently, since IP addresses can cycle over time. 86 annotation.data["location"] = locations_cache[ip_address] 87 annotation.save() 88 locations.add(annotation.data["location"]) 89 90 downloads_per_country = Counter() 91 downloads_total = 0 92 new_users = [] 93 returning_users = [] 94 for user_email in users_emails: 95 user_annotations = annotations.filter(dataset__email_address=user_email) 96 user_downloads = user_annotations.count() 97 downloads_total += user_downloads 98 99 user_locations = set() 100 for user_annotation in user_annotations: 101 user_locations.add(user_annotation.data["location"]) 102 try: 103 country = user_annotation.data["location"].split(", ")[1] 104 downloads_per_country.update({country: 1}) 105 except (IndexError, TypeError): 106 pass 107 108 user_locations = "; ".join(sorted(user_locations)) 109 user_data = (user_email, user_downloads, user_locations) 110 111 is_returning_user = Dataset.processed_filtered_objects.filter( 112 created_at__lt=start_time, email_address=user_email 113 ) 114 if is_returning_user: 115 returning_users.append(user_data) 116 else: 117 new_users.append(user_data) 118 119 if downloads_total: 120 locations_count = len(locations) 121 users_emails_count = len(users_emails) 122 fallback_text = ( 123 f"In the last {days} day{pluralize(days)}, {users_emails_count} " 124 f"user{pluralize(users_emails_count)} downloaded {downloads_total} " 125 f"dataset{pluralize(downloads_total)} from {locations_count} " 126 f"location{pluralize(locations_count)}." 127 ) 128 else: 129 fallback_text = f"There were no downloads in the last {days} day{pluralize(days)}." 130 131 blocks = [ 132 {"type": "section", "text": {"type": "plain_text", "emoji": True, "text": fallback_text}} 133 ] 134 135 if new_users: 136 blocks.append( 137 { 138 "type": "section", 139 "text": {"type": "mrkdwn", "text": format_user_data("*New users*", new_users)}, 140 } 141 ) 142 143 if returning_users: 144 blocks.append( 145 { 146 "type": "section", 147 "text": { 148 "type": "mrkdwn", 149 "text": format_user_data("*Returning users*", returning_users), 150 }, 151 } 152 ) 153 154 if top_countries and downloads_per_country: 155 countries_count = downloads_per_country.most_common(top_countries) 156 top_countries = min(top_countries, len(countries_count)) 157 lines = [f"*Top {top_countries} countr{pluralize(top_countries, 'y,ies')}*"] 158 # Allowed overhead for 2 column sorting: downloads count, country. 159 countries_count = sorted(countries_count, key=lambda cc: cc[0]) 160 countries_count = sorted(countries_count, key=lambda cc: cc[1], reverse=True) 161 for country, count in countries_count: 162 lines.append(f"{country}: {count} download{pluralize(count)}") 163 164 blocks.append( 165 { 166 "type": "section", 167 "text": {"type": "mrkdwn", "text": "\n".join(lines)}, 168 } 169 ) 170 171 # Post to Slack. 172 requests.post( 173 settings.ENGAGEMENTBOT_WEBHOOK, 174 json={ 175 "username": "EngagementBot", 176 "icon_emoji": ":halal:", 177 "channel": f"#{channel}", 178 "text": fallback_text, 179 "blocks": blocks, 180 }, 181 headers={"Content-Type": "application/json"}, 182 timeout=10, 183 ) 184 [end of api/data_refinery_api/management/commands/post_downloads_summary.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/api/data_refinery_api/management/commands/post_downloads_summary.py b/api/data_refinery_api/management/commands/post_downloads_summary.py --- a/api/data_refinery_api/management/commands/post_downloads_summary.py +++ b/api/data_refinery_api/management/commands/post_downloads_summary.py @@ -66,7 +66,6 @@ def post_downloads_summary(days, channel, top_countries=5): """Posts downloads summary to Slack channel.""" - start_time = timezone.now() - datetime.timedelta(days=days) datasets = Dataset.processed_filtered_objects.filter( created_at__gt=start_time @@ -75,7 +74,7 @@ users_emails = set(dataset.email_address for dataset in datasets) locations = set() - locations_cache = dict() + locations_cache = {} for annotation in annotations: if "location" not in annotation.data: ip_address = annotation.data["ip"] @@ -94,8 +93,10 @@ for user_email in users_emails: user_annotations = annotations.filter(dataset__email_address=user_email) user_downloads = user_annotations.count() - downloads_total += user_downloads + if user_downloads == 0: + continue + downloads_total += user_downloads user_locations = set() for user_annotation in user_annotations: user_locations.add(user_annotation.data["location"]) @@ -110,18 +111,18 @@ is_returning_user = Dataset.processed_filtered_objects.filter( created_at__lt=start_time, email_address=user_email - ) + ).exists() if is_returning_user: returning_users.append(user_data) else: new_users.append(user_data) - if downloads_total: + if downloads_total > 0: locations_count = len(locations) - users_emails_count = len(users_emails) + users_count = len(new_users) + len(returning_users) fallback_text = ( - f"In the last {days} day{pluralize(days)}, {users_emails_count} " - f"user{pluralize(users_emails_count)} downloaded {downloads_total} " + f"In the last {days} day{pluralize(days)}, {users_count} " + f"user{pluralize(users_count)} downloaded {downloads_total} " f"dataset{pluralize(downloads_total)} from {locations_count} " f"location{pluralize(locations_count)}." ) @@ -129,14 +130,20 @@ fallback_text = f"There were no downloads in the last {days} day{pluralize(days)}." blocks = [ - {"type": "section", "text": {"type": "plain_text", "emoji": True, "text": fallback_text}} + { + "type": "section", + "text": {"type": "plain_text", "emoji": True, "text": fallback_text}, + } ] if new_users: blocks.append( { "type": "section", - "text": {"type": "mrkdwn", "text": format_user_data("*New users*", new_users)}, + "text": { + "type": "mrkdwn", + "text": format_user_data("*New users*", new_users), + }, } )
{"golden_diff": "diff --git a/api/data_refinery_api/management/commands/post_downloads_summary.py b/api/data_refinery_api/management/commands/post_downloads_summary.py\n--- a/api/data_refinery_api/management/commands/post_downloads_summary.py\n+++ b/api/data_refinery_api/management/commands/post_downloads_summary.py\n@@ -66,7 +66,6 @@\n \n def post_downloads_summary(days, channel, top_countries=5):\n \"\"\"Posts downloads summary to Slack channel.\"\"\"\n-\n start_time = timezone.now() - datetime.timedelta(days=days)\n datasets = Dataset.processed_filtered_objects.filter(\n created_at__gt=start_time\n@@ -75,7 +74,7 @@\n users_emails = set(dataset.email_address for dataset in datasets)\n \n locations = set()\n- locations_cache = dict()\n+ locations_cache = {}\n for annotation in annotations:\n if \"location\" not in annotation.data:\n ip_address = annotation.data[\"ip\"]\n@@ -94,8 +93,10 @@\n for user_email in users_emails:\n user_annotations = annotations.filter(dataset__email_address=user_email)\n user_downloads = user_annotations.count()\n- downloads_total += user_downloads\n+ if user_downloads == 0:\n+ continue\n \n+ downloads_total += user_downloads\n user_locations = set()\n for user_annotation in user_annotations:\n user_locations.add(user_annotation.data[\"location\"])\n@@ -110,18 +111,18 @@\n \n is_returning_user = Dataset.processed_filtered_objects.filter(\n created_at__lt=start_time, email_address=user_email\n- )\n+ ).exists()\n if is_returning_user:\n returning_users.append(user_data)\n else:\n new_users.append(user_data)\n \n- if downloads_total:\n+ if downloads_total > 0:\n locations_count = len(locations)\n- users_emails_count = len(users_emails)\n+ users_count = len(new_users) + len(returning_users)\n fallback_text = (\n- f\"In the last {days} day{pluralize(days)}, {users_emails_count} \"\n- f\"user{pluralize(users_emails_count)} downloaded {downloads_total} \"\n+ f\"In the last {days} day{pluralize(days)}, {users_count} \"\n+ f\"user{pluralize(users_count)} downloaded {downloads_total} \"\n f\"dataset{pluralize(downloads_total)} from {locations_count} \"\n f\"location{pluralize(locations_count)}.\"\n )\n@@ -129,14 +130,20 @@\n fallback_text = f\"There were no downloads in the last {days} day{pluralize(days)}.\"\n \n blocks = [\n- {\"type\": \"section\", \"text\": {\"type\": \"plain_text\", \"emoji\": True, \"text\": fallback_text}}\n+ {\n+ \"type\": \"section\",\n+ \"text\": {\"type\": \"plain_text\", \"emoji\": True, \"text\": fallback_text},\n+ }\n ]\n \n if new_users:\n blocks.append(\n {\n \"type\": \"section\",\n- \"text\": {\"type\": \"mrkdwn\", \"text\": format_user_data(\"*New users*\", new_users)},\n+ \"text\": {\n+ \"type\": \"mrkdwn\",\n+ \"text\": format_user_data(\"*New users*\", new_users),\n+ },\n }\n )\n", "issue": "Weekly stats shows 0 downloads for some users\n### Context\r\n\r\nA lot of changes has gone into prod recently. One of them is the EngagementBot weekly stats updates.\r\n\r\n### Problem or idea\r\n\r\nThe most recent summary contains lines like \"0 downloads from \" indicating some (potential) stats inaccuracy.\r\n\r\n### Solution or next step\r\n\r\nFigure out why the value is empty and fix the issue. If everything is right then just hide those 0 downloads items.\r\n\n", "before_files": [{"content": "import datetime\nfrom collections import Counter\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.template.defaultfilters import pluralize\nfrom django.utils import timezone\n\nimport requests\n\nfrom data_refinery_common.models import Dataset, DatasetAnnotation\n\n\nclass Command(BaseCommand):\n help = \"Post downloads summary to Slack\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--channel\",\n type=str,\n default=\"ccdl-general\",\n help=(\"Optional parameter to choose the channel where the message will be posted.\"),\n )\n parser.add_argument(\n \"--days\",\n type=int,\n default=7, # Default to a week.\n help=(\"Number of days in the past for which to build the stats.\"),\n )\n parser.add_argument(\n \"--top-countries\",\n type=int,\n default=5,\n help=(\"Number of countries to show in the per country downloads summary.\"),\n )\n\n def handle(self, *args, **options):\n post_downloads_summary(options[\"days\"], options[\"channel\"], options[\"top_countries\"])\n\n\ndef format_user_data(header, data):\n \"\"\"\n Formats user email, downloads count, location information sorted\n by downloads count.\n \"\"\"\n # Allowed overhead for 2 column sorting: downloads count, email.\n lines = sorted(data, key=lambda u: u[0].lower())\n lines = [\n f\"{email.lower()} | {downloads} download{pluralize(downloads)} from {location}\"\n for email, downloads, location in sorted(lines, key=lambda u: u[1], reverse=True)\n ]\n lines.insert(0, header)\n\n return \"\\n\".join(lines)\n\n\ndef get_user_location(ip_address):\n \"\"\"Gets user location information based on their IP address.\"\"\"\n try:\n data = requests.get(f\"https://ipapi.co/{ip_address}/json/\", timeout=10).json()\n # The list of available fields https://ipapi.co/api/#complete-location\n return \", \".join((data[\"city\"], data[\"country_name\"]))\n except (requests.exceptions.RequestException, KeyError, ValueError):\n return ip_address\n\n\ndef post_downloads_summary(days, channel, top_countries=5):\n \"\"\"Posts downloads summary to Slack channel.\"\"\"\n\n start_time = timezone.now() - datetime.timedelta(days=days)\n datasets = Dataset.processed_filtered_objects.filter(\n created_at__gt=start_time\n ).prefetch_related(\"datasetannotation_set\")\n annotations = DatasetAnnotation.objects.filter(dataset__in=datasets)\n users_emails = set(dataset.email_address for dataset in datasets)\n\n locations = set()\n locations_cache = dict()\n for annotation in annotations:\n if \"location\" not in annotation.data:\n ip_address = annotation.data[\"ip\"]\n if ip_address not in locations_cache:\n locations_cache[ip_address] = get_user_location(ip_address)\n\n # Save the locations permanently, since IP addresses can cycle over time.\n annotation.data[\"location\"] = locations_cache[ip_address]\n annotation.save()\n locations.add(annotation.data[\"location\"])\n\n downloads_per_country = Counter()\n downloads_total = 0\n new_users = []\n returning_users = []\n for user_email in users_emails:\n user_annotations = annotations.filter(dataset__email_address=user_email)\n user_downloads = user_annotations.count()\n downloads_total += user_downloads\n\n user_locations = set()\n for user_annotation in user_annotations:\n user_locations.add(user_annotation.data[\"location\"])\n try:\n country = user_annotation.data[\"location\"].split(\", \")[1]\n downloads_per_country.update({country: 1})\n except (IndexError, TypeError):\n pass\n\n user_locations = \"; \".join(sorted(user_locations))\n user_data = (user_email, user_downloads, user_locations)\n\n is_returning_user = Dataset.processed_filtered_objects.filter(\n created_at__lt=start_time, email_address=user_email\n )\n if is_returning_user:\n returning_users.append(user_data)\n else:\n new_users.append(user_data)\n\n if downloads_total:\n locations_count = len(locations)\n users_emails_count = len(users_emails)\n fallback_text = (\n f\"In the last {days} day{pluralize(days)}, {users_emails_count} \"\n f\"user{pluralize(users_emails_count)} downloaded {downloads_total} \"\n f\"dataset{pluralize(downloads_total)} from {locations_count} \"\n f\"location{pluralize(locations_count)}.\"\n )\n else:\n fallback_text = f\"There were no downloads in the last {days} day{pluralize(days)}.\"\n\n blocks = [\n {\"type\": \"section\", \"text\": {\"type\": \"plain_text\", \"emoji\": True, \"text\": fallback_text}}\n ]\n\n if new_users:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": format_user_data(\"*New users*\", new_users)},\n }\n )\n\n if returning_users:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": format_user_data(\"*Returning users*\", returning_users),\n },\n }\n )\n\n if top_countries and downloads_per_country:\n countries_count = downloads_per_country.most_common(top_countries)\n top_countries = min(top_countries, len(countries_count))\n lines = [f\"*Top {top_countries} countr{pluralize(top_countries, 'y,ies')}*\"]\n # Allowed overhead for 2 column sorting: downloads count, country.\n countries_count = sorted(countries_count, key=lambda cc: cc[0])\n countries_count = sorted(countries_count, key=lambda cc: cc[1], reverse=True)\n for country, count in countries_count:\n lines.append(f\"{country}: {count} download{pluralize(count)}\")\n\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": \"\\n\".join(lines)},\n }\n )\n\n # Post to Slack.\n requests.post(\n settings.ENGAGEMENTBOT_WEBHOOK,\n json={\n \"username\": \"EngagementBot\",\n \"icon_emoji\": \":halal:\",\n \"channel\": f\"#{channel}\",\n \"text\": fallback_text,\n \"blocks\": blocks,\n },\n headers={\"Content-Type\": \"application/json\"},\n timeout=10,\n )\n", "path": "api/data_refinery_api/management/commands/post_downloads_summary.py"}]}
2,462
731
gh_patches_debug_3279
rasdani/github-patches
git_diff
mozilla__pontoon-2667
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Entities of mass deleted terms do not get obsolete When terms are deleted from the Django Admin, we [obsolete corresponding Entities](https://github.com/mozilla/pontoon/blob/01cddfd0df2f5ddf85d1b5e26a13003f9f320d97/pontoon/terminology/models.py#L239). However, this function doesn't trigger in mass delete actions, which results in the following error when trying to translate deleted Terms: ``` RelatedObjectDoesNotExist: Entity has no term ``` </issue> <code> [start of pontoon/terminology/models.py] 1 import re 2 3 from django.db import models 4 5 from pontoon.base.models import Entity, ProjectLocale, Resource, TranslatedResource 6 7 8 def update_terminology_project_stats(): 9 resource = Resource.objects.get(project__slug="terminology") 10 project = resource.project 11 total_strings = Entity.objects.filter(resource=resource, obsolete=False).count() 12 resource.total_strings = total_strings 13 resource.save(update_fields=["total_strings"]) 14 15 translated_resources = list(TranslatedResource.objects.filter(resource=resource)) 16 17 for translated_resource in translated_resources: 18 translated_resource.calculate_stats(save=False) 19 20 TranslatedResource.objects.bulk_update( 21 translated_resources, 22 [ 23 "total_strings", 24 "approved_strings", 25 "pretranslated_strings", 26 "strings_with_errors", 27 "strings_with_warnings", 28 "unreviewed_strings", 29 ], 30 ) 31 32 project.aggregate_stats() 33 34 for locale in project.locales.all(): 35 locale.aggregate_stats() 36 37 for projectlocale in ProjectLocale.objects.filter(project=project): 38 projectlocale.aggregate_stats() 39 40 41 class TermQuerySet(models.QuerySet): 42 def for_string(self, string): 43 terms = [] 44 available_terms = self.exclude(definition="").exclude(forbidden=True) 45 46 for term in available_terms: 47 term_text = r"\b" + re.escape(term.text) 48 flags = 0 if term.case_sensitive else re.IGNORECASE 49 50 if re.search(term_text, string, flags): 51 terms.append(term) 52 53 return terms 54 55 56 class Term(models.Model): 57 text = models.CharField(max_length=255) 58 entity = models.OneToOneField("base.Entity", models.SET_NULL, null=True, blank=True) 59 60 class PartOfSpeech(models.TextChoices): 61 ADJECTIVE = "adjective", "Adjective" 62 ADVERB = "adverb", "Adverb" 63 NOUN = "noun", "Noun" 64 VERB = "verb", "Verb" 65 66 part_of_speech = models.CharField(max_length=50, choices=PartOfSpeech.choices) 67 68 definition = models.TextField(blank=True) 69 usage = models.TextField(blank=True) 70 notes = models.TextField(blank=True) 71 72 class Status(models.TextChoices): 73 APPROVED = "approved", "Approved" 74 NEW = "new", "New" 75 OBSOLETE = "obsolete", "Obsolete" 76 REVIEW = "review", "Review" 77 78 status = models.CharField( 79 max_length=20, choices=Status.choices, null=True, blank=True 80 ) 81 82 case_sensitive = models.BooleanField(default=False) 83 do_not_translate = models.BooleanField(default=False) 84 forbidden = models.BooleanField(default=False) 85 86 created_at = models.DateTimeField(auto_now_add=True) 87 created_by = models.ForeignKey( 88 "auth.User", models.SET_NULL, related_name="terms", null=True, blank=True 89 ) 90 91 objects = TermQuerySet.as_manager() 92 93 def translation(self, locale): 94 """ 95 Get locale translation of the term. 96 """ 97 if self.do_not_translate: 98 return self.text 99 else: 100 try: 101 return self.translations.get(locale=locale).text 102 except (AttributeError, TermTranslation.DoesNotExist): 103 return None 104 105 @property 106 def localizable(self): 107 """ 108 Check if the term is localizable. 109 """ 110 if self.do_not_translate: 111 return False 112 113 if self.forbidden: 114 return False 115 116 if self.definition == "": 117 return False 118 119 return True 120 121 def entity_comment(self): 122 """ 123 Generate entity comment from the term. 124 """ 125 comment = "{}. {}.".format( 126 self.part_of_speech.capitalize(), 127 self.definition.capitalize().rstrip("."), 128 ) 129 130 if self.usage: 131 comment += " E.g. {}.".format(self.usage.capitalize().rstrip(".")) 132 133 return comment 134 135 def create_entity(self): 136 """ 137 An Entity must be created (or deobsoleted) for a Term according to the 138 following rules: 139 - Entity.string contains content of Term.text. 140 - Entity.comment contains joint content of several fields: 141 Term.part_of_speech. Term.definition. E.g.: Term.usage. 142 """ 143 resource = Resource.objects.get(project__slug="terminology") 144 145 entity, created = Entity.objects.get_or_create( 146 string=self.text, 147 comment=self.entity_comment(), 148 resource=resource, 149 ) 150 151 # Using update() to avoid circular Term.save() call 152 Term.objects.filter(pk=self.pk).update(entity_id=entity.id) 153 154 if not created: 155 entity.obsolete = False 156 entity.save(update_fields=["obsolete"]) 157 158 # Make sure Term entities are ordered alphabetically 159 entities = list( 160 Entity.objects.filter(resource=resource, obsolete=False).order_by("string") 161 ) 162 for index, e in enumerate(entities): 163 e.order = index 164 Entity.objects.bulk_update(entities, ["order"]) 165 166 def obsolete_entity(self): 167 entity = self.entity 168 169 # Ignore if term doesn't have entity assigned 170 if entity is None: 171 return 172 173 entity.obsolete = True 174 entity.save(update_fields=["obsolete"]) 175 176 def handle_term_update(self): 177 """ 178 Before updating an existing Term, update its Entity if neccessary 179 """ 180 term = self 181 old_term = Term.objects.get(pk=term.pk) 182 183 # Ignore changes to non-localizable terms that stay non-localizable 184 if not old_term.localizable and not term.localizable: 185 return 186 187 # If localizable term becomes non-localizable, obsolete its Entity 188 if old_term.localizable and not term.localizable: 189 old_term.obsolete_entity() 190 191 # If non-localizable term becomes localizable, create a corresponding Entity 192 elif not old_term.localizable and term.localizable: 193 term.create_entity() 194 195 # If relevant changes are made to the localizable term that stays localizable 196 else: 197 # If Term.text changes, a new Entity instance gets created and the previous one becomes obsolete. 198 if old_term.text != term.text: 199 old_term.obsolete_entity() 200 term.create_entity() 201 202 # If Term.part_of_speech, Term.definition or Term.usage change, Entity.comment gets updated. 203 elif ( 204 old_term.part_of_speech != term.part_of_speech 205 or old_term.definition != term.definition 206 or old_term.usage != term.usage 207 ): 208 entity = term.entity 209 210 # Ignore if term doesn't have entity assigned 211 if entity is None: 212 return 213 214 entity.comment = term.entity_comment() 215 entity.save(update_fields=["comment"]) 216 217 return 218 219 update_terminology_project_stats() 220 221 def handle_term_create(self): 222 """ 223 After creating a new localizable Term, create its Entity 224 """ 225 self.create_entity() 226 update_terminology_project_stats() 227 228 def save(self, *args, **kwargs): 229 created = self.pk is None 230 231 if not created: 232 self.handle_term_update() 233 234 super().save(*args, **kwargs) 235 236 if created and self.localizable: 237 self.handle_term_create() 238 239 def delete(self, *args, **kwargs): 240 """ 241 Before deleting a Term, obsolete its Entity 242 """ 243 self.obsolete_entity() 244 update_terminology_project_stats() 245 246 super().delete(*args, **kwargs) 247 248 def __str__(self): 249 return self.text 250 251 252 class TermTranslation(models.Model): 253 term = models.ForeignKey(Term, models.CASCADE, related_name="translations") 254 locale = models.ForeignKey("base.Locale", models.CASCADE, related_name="terms") 255 256 text = models.CharField(max_length=255) 257 258 def __str__(self): 259 return self.text 260 [end of pontoon/terminology/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pontoon/terminology/models.py b/pontoon/terminology/models.py --- a/pontoon/terminology/models.py +++ b/pontoon/terminology/models.py @@ -52,6 +52,16 @@ return terms + def delete(self, *args, **kwargs): + """ + Before deleting Terms, obsolete their Entities + """ + for term in self: + term.obsolete_entity() + update_terminology_project_stats() + + super().delete(*args, **kwargs) + class Term(models.Model): text = models.CharField(max_length=255)
{"golden_diff": "diff --git a/pontoon/terminology/models.py b/pontoon/terminology/models.py\n--- a/pontoon/terminology/models.py\n+++ b/pontoon/terminology/models.py\n@@ -52,6 +52,16 @@\n \n return terms\n \n+ def delete(self, *args, **kwargs):\n+ \"\"\"\n+ Before deleting Terms, obsolete their Entities\n+ \"\"\"\n+ for term in self:\n+ term.obsolete_entity()\n+ update_terminology_project_stats()\n+\n+ super().delete(*args, **kwargs)\n+\n \n class Term(models.Model):\n text = models.CharField(max_length=255)\n", "issue": "Entities of mass deleted terms do not get obsolete\nWhen terms are deleted from the Django Admin, we [obsolete corresponding Entities](https://github.com/mozilla/pontoon/blob/01cddfd0df2f5ddf85d1b5e26a13003f9f320d97/pontoon/terminology/models.py#L239). However, this function doesn't trigger in mass delete actions, which results in the following error when trying to translate deleted Terms:\r\n\r\n```\r\nRelatedObjectDoesNotExist: Entity has no term\r\n```\n", "before_files": [{"content": "import re\n\nfrom django.db import models\n\nfrom pontoon.base.models import Entity, ProjectLocale, Resource, TranslatedResource\n\n\ndef update_terminology_project_stats():\n resource = Resource.objects.get(project__slug=\"terminology\")\n project = resource.project\n total_strings = Entity.objects.filter(resource=resource, obsolete=False).count()\n resource.total_strings = total_strings\n resource.save(update_fields=[\"total_strings\"])\n\n translated_resources = list(TranslatedResource.objects.filter(resource=resource))\n\n for translated_resource in translated_resources:\n translated_resource.calculate_stats(save=False)\n\n TranslatedResource.objects.bulk_update(\n translated_resources,\n [\n \"total_strings\",\n \"approved_strings\",\n \"pretranslated_strings\",\n \"strings_with_errors\",\n \"strings_with_warnings\",\n \"unreviewed_strings\",\n ],\n )\n\n project.aggregate_stats()\n\n for locale in project.locales.all():\n locale.aggregate_stats()\n\n for projectlocale in ProjectLocale.objects.filter(project=project):\n projectlocale.aggregate_stats()\n\n\nclass TermQuerySet(models.QuerySet):\n def for_string(self, string):\n terms = []\n available_terms = self.exclude(definition=\"\").exclude(forbidden=True)\n\n for term in available_terms:\n term_text = r\"\\b\" + re.escape(term.text)\n flags = 0 if term.case_sensitive else re.IGNORECASE\n\n if re.search(term_text, string, flags):\n terms.append(term)\n\n return terms\n\n\nclass Term(models.Model):\n text = models.CharField(max_length=255)\n entity = models.OneToOneField(\"base.Entity\", models.SET_NULL, null=True, blank=True)\n\n class PartOfSpeech(models.TextChoices):\n ADJECTIVE = \"adjective\", \"Adjective\"\n ADVERB = \"adverb\", \"Adverb\"\n NOUN = \"noun\", \"Noun\"\n VERB = \"verb\", \"Verb\"\n\n part_of_speech = models.CharField(max_length=50, choices=PartOfSpeech.choices)\n\n definition = models.TextField(blank=True)\n usage = models.TextField(blank=True)\n notes = models.TextField(blank=True)\n\n class Status(models.TextChoices):\n APPROVED = \"approved\", \"Approved\"\n NEW = \"new\", \"New\"\n OBSOLETE = \"obsolete\", \"Obsolete\"\n REVIEW = \"review\", \"Review\"\n\n status = models.CharField(\n max_length=20, choices=Status.choices, null=True, blank=True\n )\n\n case_sensitive = models.BooleanField(default=False)\n do_not_translate = models.BooleanField(default=False)\n forbidden = models.BooleanField(default=False)\n\n created_at = models.DateTimeField(auto_now_add=True)\n created_by = models.ForeignKey(\n \"auth.User\", models.SET_NULL, related_name=\"terms\", null=True, blank=True\n )\n\n objects = TermQuerySet.as_manager()\n\n def translation(self, locale):\n \"\"\"\n Get locale translation of the term.\n \"\"\"\n if self.do_not_translate:\n return self.text\n else:\n try:\n return self.translations.get(locale=locale).text\n except (AttributeError, TermTranslation.DoesNotExist):\n return None\n\n @property\n def localizable(self):\n \"\"\"\n Check if the term is localizable.\n \"\"\"\n if self.do_not_translate:\n return False\n\n if self.forbidden:\n return False\n\n if self.definition == \"\":\n return False\n\n return True\n\n def entity_comment(self):\n \"\"\"\n Generate entity comment from the term.\n \"\"\"\n comment = \"{}. {}.\".format(\n self.part_of_speech.capitalize(),\n self.definition.capitalize().rstrip(\".\"),\n )\n\n if self.usage:\n comment += \" E.g. {}.\".format(self.usage.capitalize().rstrip(\".\"))\n\n return comment\n\n def create_entity(self):\n \"\"\"\n An Entity must be created (or deobsoleted) for a Term according to the\n following rules:\n - Entity.string contains content of Term.text.\n - Entity.comment contains joint content of several fields:\n Term.part_of_speech. Term.definition. E.g.: Term.usage.\n \"\"\"\n resource = Resource.objects.get(project__slug=\"terminology\")\n\n entity, created = Entity.objects.get_or_create(\n string=self.text,\n comment=self.entity_comment(),\n resource=resource,\n )\n\n # Using update() to avoid circular Term.save() call\n Term.objects.filter(pk=self.pk).update(entity_id=entity.id)\n\n if not created:\n entity.obsolete = False\n entity.save(update_fields=[\"obsolete\"])\n\n # Make sure Term entities are ordered alphabetically\n entities = list(\n Entity.objects.filter(resource=resource, obsolete=False).order_by(\"string\")\n )\n for index, e in enumerate(entities):\n e.order = index\n Entity.objects.bulk_update(entities, [\"order\"])\n\n def obsolete_entity(self):\n entity = self.entity\n\n # Ignore if term doesn't have entity assigned\n if entity is None:\n return\n\n entity.obsolete = True\n entity.save(update_fields=[\"obsolete\"])\n\n def handle_term_update(self):\n \"\"\"\n Before updating an existing Term, update its Entity if neccessary\n \"\"\"\n term = self\n old_term = Term.objects.get(pk=term.pk)\n\n # Ignore changes to non-localizable terms that stay non-localizable\n if not old_term.localizable and not term.localizable:\n return\n\n # If localizable term becomes non-localizable, obsolete its Entity\n if old_term.localizable and not term.localizable:\n old_term.obsolete_entity()\n\n # If non-localizable term becomes localizable, create a corresponding Entity\n elif not old_term.localizable and term.localizable:\n term.create_entity()\n\n # If relevant changes are made to the localizable term that stays localizable\n else:\n # If Term.text changes, a new Entity instance gets created and the previous one becomes obsolete.\n if old_term.text != term.text:\n old_term.obsolete_entity()\n term.create_entity()\n\n # If Term.part_of_speech, Term.definition or Term.usage change, Entity.comment gets updated.\n elif (\n old_term.part_of_speech != term.part_of_speech\n or old_term.definition != term.definition\n or old_term.usage != term.usage\n ):\n entity = term.entity\n\n # Ignore if term doesn't have entity assigned\n if entity is None:\n return\n\n entity.comment = term.entity_comment()\n entity.save(update_fields=[\"comment\"])\n\n return\n\n update_terminology_project_stats()\n\n def handle_term_create(self):\n \"\"\"\n After creating a new localizable Term, create its Entity\n \"\"\"\n self.create_entity()\n update_terminology_project_stats()\n\n def save(self, *args, **kwargs):\n created = self.pk is None\n\n if not created:\n self.handle_term_update()\n\n super().save(*args, **kwargs)\n\n if created and self.localizable:\n self.handle_term_create()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n Before deleting a Term, obsolete its Entity\n \"\"\"\n self.obsolete_entity()\n update_terminology_project_stats()\n\n super().delete(*args, **kwargs)\n\n def __str__(self):\n return self.text\n\n\nclass TermTranslation(models.Model):\n term = models.ForeignKey(Term, models.CASCADE, related_name=\"translations\")\n locale = models.ForeignKey(\"base.Locale\", models.CASCADE, related_name=\"terms\")\n\n text = models.CharField(max_length=255)\n\n def __str__(self):\n return self.text\n", "path": "pontoon/terminology/models.py"}]}
2,968
144
gh_patches_debug_11088
rasdani/github-patches
git_diff
getsentry__sentry-python-773
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DjangoIntegration conflict with MiddlewareMixin & TemplateView django version: 3.0 sentry-sdk version: 0.14.3 My project has a middleware using Django's `django.utils.deprecation.MiddlewareMixin`. Visiting a view which subclasses `django.views.generic.TemplateView` while Sentry is active results in the following exception: ``` AttributeError: 'function' object has no attribute '__self__' File "django/core/handlers/exception.py", line 47, in inner response = get_response(request) File "django/core/handlers/base.py", line 196, in _get_response middleware_method.__self__.__class__.__name__, ``` The following classes & url config should be sufficient to demonstrate the problem: ``` from django.utils.deprecation import MiddlewareMixin from django.views.generic import TemplateView # ...in myapp.middleware.py class DemonstratesConflictMiddleware(MiddlewareMixin): def process_template_response(self, request, response): return response # ...in myapp.views.py class DemonstratesConflictView(TemplateView): template_name = "index.html" # ...in urls.py import myapp.views urlpatterns += [ path('/', myapp.views.DemonstratesConflictView.as_view(), name='throws-exception'), ] # ... in settings.py MIDDLEWARE += ['myapp.middleware.DemonstratesConflictMiddleware'] sentry_sdk.init( dsn="OMITTED", integrations=[DjangoIntegration()], ) ``` </issue> <code> [start of sentry_sdk/integrations/django/middleware.py] 1 """ 2 Create spans from Django middleware invocations 3 """ 4 5 from django import VERSION as DJANGO_VERSION 6 7 from sentry_sdk import Hub 8 from sentry_sdk._functools import wraps 9 from sentry_sdk._types import MYPY 10 from sentry_sdk.utils import ( 11 ContextVar, 12 transaction_from_function, 13 capture_internal_exceptions, 14 ) 15 16 if MYPY: 17 from typing import Any 18 from typing import Callable 19 from typing import TypeVar 20 21 F = TypeVar("F", bound=Callable[..., Any]) 22 23 _import_string_should_wrap_middleware = ContextVar( 24 "import_string_should_wrap_middleware" 25 ) 26 27 if DJANGO_VERSION < (1, 7): 28 import_string_name = "import_by_path" 29 else: 30 import_string_name = "import_string" 31 32 33 def patch_django_middlewares(): 34 # type: () -> None 35 from django.core.handlers import base 36 37 old_import_string = getattr(base, import_string_name) 38 39 def sentry_patched_import_string(dotted_path): 40 # type: (str) -> Any 41 rv = old_import_string(dotted_path) 42 43 if _import_string_should_wrap_middleware.get(None): 44 rv = _wrap_middleware(rv, dotted_path) 45 46 return rv 47 48 setattr(base, import_string_name, sentry_patched_import_string) 49 50 old_load_middleware = base.BaseHandler.load_middleware 51 52 def sentry_patched_load_middleware(*args, **kwargs): 53 # type: (Any, Any) -> Any 54 _import_string_should_wrap_middleware.set(True) 55 try: 56 return old_load_middleware(*args, **kwargs) 57 finally: 58 _import_string_should_wrap_middleware.set(False) 59 60 base.BaseHandler.load_middleware = sentry_patched_load_middleware 61 62 63 def _wrap_middleware(middleware, middleware_name): 64 # type: (Any, str) -> Any 65 from sentry_sdk.integrations.django import DjangoIntegration 66 67 def _get_wrapped_method(old_method): 68 # type: (F) -> F 69 with capture_internal_exceptions(): 70 71 def sentry_wrapped_method(*args, **kwargs): 72 # type: (*Any, **Any) -> Any 73 hub = Hub.current 74 integration = hub.get_integration(DjangoIntegration) 75 if integration is None or not integration.middleware_spans: 76 return old_method(*args, **kwargs) 77 78 function_name = transaction_from_function(old_method) 79 80 description = middleware_name 81 function_basename = getattr(old_method, "__name__", None) 82 if function_basename: 83 description = "{}.{}".format(description, function_basename) 84 85 with hub.start_span( 86 op="django.middleware", description=description 87 ) as span: 88 span.set_tag("django.function_name", function_name) 89 span.set_tag("django.middleware_name", middleware_name) 90 return old_method(*args, **kwargs) 91 92 try: 93 # fails for __call__ of function on Python 2 (see py2.7-django-1.11) 94 return wraps(old_method)(sentry_wrapped_method) # type: ignore 95 except Exception: 96 return sentry_wrapped_method # type: ignore 97 98 return old_method 99 100 class SentryWrappingMiddleware(object): 101 def __init__(self, *args, **kwargs): 102 # type: (*Any, **Any) -> None 103 self._inner = middleware(*args, **kwargs) 104 self._call_method = None 105 106 # We need correct behavior for `hasattr()`, which we can only determine 107 # when we have an instance of the middleware we're wrapping. 108 def __getattr__(self, method_name): 109 # type: (str) -> Any 110 if method_name not in ( 111 "process_request", 112 "process_view", 113 "process_template_response", 114 "process_response", 115 "process_exception", 116 ): 117 raise AttributeError() 118 119 old_method = getattr(self._inner, method_name) 120 rv = _get_wrapped_method(old_method) 121 self.__dict__[method_name] = rv 122 return rv 123 124 def __call__(self, *args, **kwargs): 125 # type: (*Any, **Any) -> Any 126 f = self._call_method 127 if f is None: 128 self._call_method = f = _get_wrapped_method(self._inner.__call__) 129 return f(*args, **kwargs) 130 131 if hasattr(middleware, "__name__"): 132 SentryWrappingMiddleware.__name__ = middleware.__name__ 133 134 return SentryWrappingMiddleware 135 [end of sentry_sdk/integrations/django/middleware.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sentry_sdk/integrations/django/middleware.py b/sentry_sdk/integrations/django/middleware.py --- a/sentry_sdk/integrations/django/middleware.py +++ b/sentry_sdk/integrations/django/middleware.py @@ -91,9 +91,14 @@ try: # fails for __call__ of function on Python 2 (see py2.7-django-1.11) - return wraps(old_method)(sentry_wrapped_method) # type: ignore + sentry_wrapped_method = wraps(old_method)(sentry_wrapped_method) + + # Necessary for Django 3.1 + sentry_wrapped_method.__self__ = old_method.__self__ # type: ignore except Exception: - return sentry_wrapped_method # type: ignore + pass + + return sentry_wrapped_method # type: ignore return old_method
{"golden_diff": "diff --git a/sentry_sdk/integrations/django/middleware.py b/sentry_sdk/integrations/django/middleware.py\n--- a/sentry_sdk/integrations/django/middleware.py\n+++ b/sentry_sdk/integrations/django/middleware.py\n@@ -91,9 +91,14 @@\n \n try:\n # fails for __call__ of function on Python 2 (see py2.7-django-1.11)\n- return wraps(old_method)(sentry_wrapped_method) # type: ignore\n+ sentry_wrapped_method = wraps(old_method)(sentry_wrapped_method)\n+\n+ # Necessary for Django 3.1\n+ sentry_wrapped_method.__self__ = old_method.__self__ # type: ignore\n except Exception:\n- return sentry_wrapped_method # type: ignore\n+ pass\n+\n+ return sentry_wrapped_method # type: ignore\n \n return old_method\n", "issue": "DjangoIntegration conflict with MiddlewareMixin & TemplateView\ndjango version: 3.0\r\nsentry-sdk version: 0.14.3\r\n\r\nMy project has a middleware using Django's `django.utils.deprecation.MiddlewareMixin`. Visiting a view which subclasses `django.views.generic.TemplateView` while Sentry is active results in the following exception:\r\n```\r\nAttributeError: 'function' object has no attribute '__self__'\r\n File \"django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"django/core/handlers/base.py\", line 196, in _get_response\r\n middleware_method.__self__.__class__.__name__,\r\n```\r\n\r\nThe following classes & url config should be sufficient to demonstrate the problem:\r\n\r\n```\r\nfrom django.utils.deprecation import MiddlewareMixin\r\nfrom django.views.generic import TemplateView\r\n\r\n# ...in myapp.middleware.py\r\nclass DemonstratesConflictMiddleware(MiddlewareMixin):\r\n def process_template_response(self, request, response):\r\n return response\r\n\r\n# ...in myapp.views.py\r\nclass DemonstratesConflictView(TemplateView): \r\n template_name = \"index.html\"\r\n\r\n\r\n# ...in urls.py\r\nimport myapp.views\r\nurlpatterns += [\r\n path('/', myapp.views.DemonstratesConflictView.as_view(), name='throws-exception'),\r\n]\r\n\r\n# ... in settings.py\r\n\r\nMIDDLEWARE += ['myapp.middleware.DemonstratesConflictMiddleware']\r\n\r\nsentry_sdk.init(\r\n dsn=\"OMITTED\",\r\n integrations=[DjangoIntegration()],\r\n)\r\n\r\n```\n", "before_files": [{"content": "\"\"\"\nCreate spans from Django middleware invocations\n\"\"\"\n\nfrom django import VERSION as DJANGO_VERSION\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._functools import wraps\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.utils import (\n ContextVar,\n transaction_from_function,\n capture_internal_exceptions,\n)\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import TypeVar\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n_import_string_should_wrap_middleware = ContextVar(\n \"import_string_should_wrap_middleware\"\n)\n\nif DJANGO_VERSION < (1, 7):\n import_string_name = \"import_by_path\"\nelse:\n import_string_name = \"import_string\"\n\n\ndef patch_django_middlewares():\n # type: () -> None\n from django.core.handlers import base\n\n old_import_string = getattr(base, import_string_name)\n\n def sentry_patched_import_string(dotted_path):\n # type: (str) -> Any\n rv = old_import_string(dotted_path)\n\n if _import_string_should_wrap_middleware.get(None):\n rv = _wrap_middleware(rv, dotted_path)\n\n return rv\n\n setattr(base, import_string_name, sentry_patched_import_string)\n\n old_load_middleware = base.BaseHandler.load_middleware\n\n def sentry_patched_load_middleware(*args, **kwargs):\n # type: (Any, Any) -> Any\n _import_string_should_wrap_middleware.set(True)\n try:\n return old_load_middleware(*args, **kwargs)\n finally:\n _import_string_should_wrap_middleware.set(False)\n\n base.BaseHandler.load_middleware = sentry_patched_load_middleware\n\n\ndef _wrap_middleware(middleware, middleware_name):\n # type: (Any, str) -> Any\n from sentry_sdk.integrations.django import DjangoIntegration\n\n def _get_wrapped_method(old_method):\n # type: (F) -> F\n with capture_internal_exceptions():\n\n def sentry_wrapped_method(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(DjangoIntegration)\n if integration is None or not integration.middleware_spans:\n return old_method(*args, **kwargs)\n\n function_name = transaction_from_function(old_method)\n\n description = middleware_name\n function_basename = getattr(old_method, \"__name__\", None)\n if function_basename:\n description = \"{}.{}\".format(description, function_basename)\n\n with hub.start_span(\n op=\"django.middleware\", description=description\n ) as span:\n span.set_tag(\"django.function_name\", function_name)\n span.set_tag(\"django.middleware_name\", middleware_name)\n return old_method(*args, **kwargs)\n\n try:\n # fails for __call__ of function on Python 2 (see py2.7-django-1.11)\n return wraps(old_method)(sentry_wrapped_method) # type: ignore\n except Exception:\n return sentry_wrapped_method # type: ignore\n\n return old_method\n\n class SentryWrappingMiddleware(object):\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n self._inner = middleware(*args, **kwargs)\n self._call_method = None\n\n # We need correct behavior for `hasattr()`, which we can only determine\n # when we have an instance of the middleware we're wrapping.\n def __getattr__(self, method_name):\n # type: (str) -> Any\n if method_name not in (\n \"process_request\",\n \"process_view\",\n \"process_template_response\",\n \"process_response\",\n \"process_exception\",\n ):\n raise AttributeError()\n\n old_method = getattr(self._inner, method_name)\n rv = _get_wrapped_method(old_method)\n self.__dict__[method_name] = rv\n return rv\n\n def __call__(self, *args, **kwargs):\n # type: (*Any, **Any) -> Any\n f = self._call_method\n if f is None:\n self._call_method = f = _get_wrapped_method(self._inner.__call__)\n return f(*args, **kwargs)\n\n if hasattr(middleware, \"__name__\"):\n SentryWrappingMiddleware.__name__ = middleware.__name__\n\n return SentryWrappingMiddleware\n", "path": "sentry_sdk/integrations/django/middleware.py"}]}
2,145
213
gh_patches_debug_37989
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-1270
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> API should validate input for number columns ## Description <!-- A clear and concise description of what the bug is. --> Currently, the API accepts strings for values input to number-typed columns. In some cases, these strings carry locale-sensitive information, i.e., using specific decimal points and negation styles. This is a problem since confusion will arise whenever the client, service, and database have different locale settings (it's likely the client and DB will have different locale settings by default). Even worse, the locale settings in the database (assuming PostgreSQL) may be applied differently in different contexts. ## Expected behavior <!-- A clear and concise description of what you expected to happen. --> Columns which use a number type for storage at the DB layer should only accept numbers in one of two formats: - an actual JSON number, or - A string conforming to the [JSON number spec](https://www.json.org/json-en.html), except wrapped in double-quotes. The validation of this should be locale-independent, and should happen in the Mathesar web service rather than the database. ## To Reproduce <!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. --> - Create a table with a number-typed column containing a decimal point (e.g., `FLOAT`). - Send an API request with input for that column as a string, with a comma for a decimal point. - You can do this easily from the browseable API, see `/api/db/v0/tables/<table_ID>/records/<record_ID>/` - Observe the database-layer error. </issue> <code> [start of mathesar/api/utils.py] 1 from rest_framework.exceptions import NotFound 2 3 from db.records.operations import group 4 from mathesar.models import Table 5 6 DATA_KEY = 'data' 7 METADATA_KEY = 'metadata' 8 9 10 def get_table_or_404(pk): 11 """ 12 Get table if it exists, otherwise throws a DRF NotFound error. 13 Args: 14 pk: id of table 15 Returns: 16 table: return the table based on a specific id 17 """ 18 try: 19 table = Table.objects.get(id=pk) 20 except Table.DoesNotExist: 21 raise NotFound 22 return table 23 24 25 def process_annotated_records(record_list, column_name_id_map): 26 27 RESULT_IDX = 'result_indices' 28 29 def _get_record_dict(record): 30 return record._asdict() if not isinstance(record, dict) else record 31 32 split_records = ( 33 {DATA_KEY: record_dict} 34 for record_dict in (_get_record_dict(record) for record in record_list) 35 ) 36 37 combined_records, groups = group.extract_group_metadata( 38 split_records, data_key=DATA_KEY, metadata_key=METADATA_KEY 39 ) 40 41 processed_records, record_metadata = zip( 42 *tuple(tuple(d.values()) for d in combined_records) 43 ) 44 45 def _replace_column_names_with_ids(group_metadata_item): 46 try: 47 processed_group_metadata_item = { 48 column_name_id_map[k]: v for k, v in group_metadata_item.items() 49 } 50 except AttributeError: 51 processed_group_metadata_item = group_metadata_item 52 return processed_group_metadata_item 53 54 if groups is not None: 55 groups_by_id = { 56 grp[group.GroupMetadataField.GROUP_ID.value]: { 57 k: _replace_column_names_with_ids(v) for k, v in grp.items() 58 if k != group.GroupMetadataField.GROUP_ID.value 59 } | {RESULT_IDX: []} 60 for grp in groups 61 } 62 63 for i, meta in enumerate(record_metadata): 64 groups_by_id[meta[group.GroupMetadataField.GROUP_ID.value]][RESULT_IDX].append(i) 65 66 output_groups = sorted(list(groups_by_id.values()), key=lambda x: x[RESULT_IDX][0]) 67 else: 68 output_groups = None 69 70 return processed_records, output_groups 71 [end of mathesar/api/utils.py] [start of mathesar/api/serializers/records.py] 1 from psycopg2.errors import NotNullViolation 2 from rest_framework import serializers 3 from rest_framework import status 4 from sqlalchemy.exc import IntegrityError 5 6 import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions 7 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin 8 9 10 class RecordListParameterSerializer(MathesarErrorMessageMixin, serializers.Serializer): 11 filter = serializers.JSONField(required=False, default=None) 12 order_by = serializers.JSONField(required=False, default=[]) 13 grouping = serializers.JSONField(required=False, default={}) 14 duplicate_only = serializers.JSONField(required=False, default=None) 15 16 17 class RecordSerializer(MathesarErrorMessageMixin, serializers.BaseSerializer): 18 def update(self, instance, validated_data): 19 table = self.context['table'] 20 record = table.update_record(instance['id'], validated_data) 21 return record 22 23 def create(self, validated_data): 24 table = self.context['table'] 25 try: 26 record = table.create_record_or_records(validated_data) 27 except IntegrityError as e: 28 if type(e.orig) == NotNullViolation: 29 raise database_api_exceptions.NotNullViolationAPIException( 30 e, 31 status_code=status.HTTP_400_BAD_REQUEST, 32 table=table 33 ) 34 else: 35 raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST) 36 return record 37 38 def to_representation(self, instance): 39 records = instance._asdict() if not isinstance(instance, dict) else instance 40 columns_map = self.context['columns_map'] 41 records = {columns_map[column_name]: column_value for column_name, column_value in records.items()} 42 return records 43 44 def to_internal_value(self, data): 45 columns_map = self.context['columns_map'].inverse 46 data = {columns_map[int(column_id)]: value for column_id, value in data.items()} 47 return data 48 [end of mathesar/api/serializers/records.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mathesar/api/serializers/records.py b/mathesar/api/serializers/records.py --- a/mathesar/api/serializers/records.py +++ b/mathesar/api/serializers/records.py @@ -5,6 +5,8 @@ import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin +from mathesar.models import Column +from mathesar.api.utils import follows_json_number_spec, is_number class RecordListParameterSerializer(MathesarErrorMessageMixin, serializers.Serializer): @@ -44,4 +46,17 @@ def to_internal_value(self, data): columns_map = self.context['columns_map'].inverse data = {columns_map[int(column_id)]: value for column_id, value in data.items()} + # If the data type of the column is number then the value must be an integer + # or a string which follows JSON number spec. + for column_name in data.keys(): + column = Column.objects.get(id=columns_map.inverse[column_name]) + column_type = column.type + value = data[column_name] + if is_number(column_type) and type(data[column_name]) is str and not follows_json_number_spec(value): + raise database_api_exceptions.MathesarAPIException( + IntegrityError, + status_code=status.HTTP_400_BAD_REQUEST, + message="Number strings should follow JSON number spec", + field=column_name + ) return data diff --git a/mathesar/api/utils.py b/mathesar/api/utils.py --- a/mathesar/api/utils.py +++ b/mathesar/api/utils.py @@ -1,7 +1,9 @@ from rest_framework.exceptions import NotFound +import re from db.records.operations import group from mathesar.models import Table +from mathesar.database.types import _get_type_map DATA_KEY = 'data' METADATA_KEY = 'metadata' @@ -68,3 +70,39 @@ output_groups = None return processed_records, output_groups + + +def is_number(column_type): + """ + Check if a column data type is a number + Args: + column_type: data type of column + """ + for type in _get_type_map(): + if type['name'] == 'Number': + if str(column_type).lower() in type['sa_type_names']: + return True + else: + return False + + +def follows_json_number_spec(number): + """ + Check if a string follows JSON number spec + Args: + number: number as string + """ + patterns = [ + r"^-?0$", + r"^-?0[\.][0-9]+$", + r"^-?0[eE][+-]?[0-9]*$", + r"^-?0[\.][0-9]+[eE][+-]?[0-9]+$", + r"^-?[1-9][0-9]*$", + r"^-?[1-9][0-9]*[\.][0-9]+$", + r"^-?[1-9][0-9]*[eE][+-]?[0-9]+$", + r"^-?[1-9][0-9]*[\.][0-9]+[eE][+-]?[0-9]+$", + ] + for pattern in patterns: + if re.search(pattern, number) is not None: + return True + return False
{"golden_diff": "diff --git a/mathesar/api/serializers/records.py b/mathesar/api/serializers/records.py\n--- a/mathesar/api/serializers/records.py\n+++ b/mathesar/api/serializers/records.py\n@@ -5,6 +5,8 @@\n \n import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\n from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\n+from mathesar.models import Column\n+from mathesar.api.utils import follows_json_number_spec, is_number\n \n \n class RecordListParameterSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n@@ -44,4 +46,17 @@\n def to_internal_value(self, data):\n columns_map = self.context['columns_map'].inverse\n data = {columns_map[int(column_id)]: value for column_id, value in data.items()}\n+ # If the data type of the column is number then the value must be an integer\n+ # or a string which follows JSON number spec.\n+ for column_name in data.keys():\n+ column = Column.objects.get(id=columns_map.inverse[column_name])\n+ column_type = column.type\n+ value = data[column_name]\n+ if is_number(column_type) and type(data[column_name]) is str and not follows_json_number_spec(value):\n+ raise database_api_exceptions.MathesarAPIException(\n+ IntegrityError,\n+ status_code=status.HTTP_400_BAD_REQUEST,\n+ message=\"Number strings should follow JSON number spec\",\n+ field=column_name\n+ )\n return data\ndiff --git a/mathesar/api/utils.py b/mathesar/api/utils.py\n--- a/mathesar/api/utils.py\n+++ b/mathesar/api/utils.py\n@@ -1,7 +1,9 @@\n from rest_framework.exceptions import NotFound\n+import re\n \n from db.records.operations import group\n from mathesar.models import Table\n+from mathesar.database.types import _get_type_map\n \n DATA_KEY = 'data'\n METADATA_KEY = 'metadata'\n@@ -68,3 +70,39 @@\n output_groups = None\n \n return processed_records, output_groups\n+\n+\n+def is_number(column_type):\n+ \"\"\"\n+ Check if a column data type is a number\n+ Args:\n+ column_type: data type of column\n+ \"\"\"\n+ for type in _get_type_map():\n+ if type['name'] == 'Number':\n+ if str(column_type).lower() in type['sa_type_names']:\n+ return True\n+ else:\n+ return False\n+\n+\n+def follows_json_number_spec(number):\n+ \"\"\"\n+ Check if a string follows JSON number spec\n+ Args:\n+ number: number as string\n+ \"\"\"\n+ patterns = [\n+ r\"^-?0$\",\n+ r\"^-?0[\\.][0-9]+$\",\n+ r\"^-?0[eE][+-]?[0-9]*$\",\n+ r\"^-?0[\\.][0-9]+[eE][+-]?[0-9]+$\",\n+ r\"^-?[1-9][0-9]*$\",\n+ r\"^-?[1-9][0-9]*[\\.][0-9]+$\",\n+ r\"^-?[1-9][0-9]*[eE][+-]?[0-9]+$\",\n+ r\"^-?[1-9][0-9]*[\\.][0-9]+[eE][+-]?[0-9]+$\",\n+ ]\n+ for pattern in patterns:\n+ if re.search(pattern, number) is not None:\n+ return True\n+ return False\n", "issue": "API should validate input for number columns\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nCurrently, the API accepts strings for values input to number-typed columns. In some cases, these strings carry locale-sensitive information, i.e., using specific decimal points and negation styles. This is a problem since confusion will arise whenever the client, service, and database have different locale settings (it's likely the client and DB will have different locale settings by default). Even worse, the locale settings in the database (assuming PostgreSQL) may be applied differently in different contexts.\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nColumns which use a number type for storage at the DB layer should only accept numbers in one of two formats:\r\n- an actual JSON number, or\r\n- A string conforming to the [JSON number spec](https://www.json.org/json-en.html), except wrapped in double-quotes.\r\n\r\nThe validation of this should be locale-independent, and should happen in the Mathesar web service rather than the database.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n\r\n- Create a table with a number-typed column containing a decimal point (e.g., `FLOAT`).\r\n- Send an API request with input for that column as a string, with a comma for a decimal point. \r\n - You can do this easily from the browseable API, see `/api/db/v0/tables/<table_ID>/records/<record_ID>/`\r\n- Observe the database-layer error.\r\n\n", "before_files": [{"content": "from rest_framework.exceptions import NotFound\n\nfrom db.records.operations import group\nfrom mathesar.models import Table\n\nDATA_KEY = 'data'\nMETADATA_KEY = 'metadata'\n\n\ndef get_table_or_404(pk):\n \"\"\"\n Get table if it exists, otherwise throws a DRF NotFound error.\n Args:\n pk: id of table\n Returns:\n table: return the table based on a specific id\n \"\"\"\n try:\n table = Table.objects.get(id=pk)\n except Table.DoesNotExist:\n raise NotFound\n return table\n\n\ndef process_annotated_records(record_list, column_name_id_map):\n\n RESULT_IDX = 'result_indices'\n\n def _get_record_dict(record):\n return record._asdict() if not isinstance(record, dict) else record\n\n split_records = (\n {DATA_KEY: record_dict}\n for record_dict in (_get_record_dict(record) for record in record_list)\n )\n\n combined_records, groups = group.extract_group_metadata(\n split_records, data_key=DATA_KEY, metadata_key=METADATA_KEY\n )\n\n processed_records, record_metadata = zip(\n *tuple(tuple(d.values()) for d in combined_records)\n )\n\n def _replace_column_names_with_ids(group_metadata_item):\n try:\n processed_group_metadata_item = {\n column_name_id_map[k]: v for k, v in group_metadata_item.items()\n }\n except AttributeError:\n processed_group_metadata_item = group_metadata_item\n return processed_group_metadata_item\n\n if groups is not None:\n groups_by_id = {\n grp[group.GroupMetadataField.GROUP_ID.value]: {\n k: _replace_column_names_with_ids(v) for k, v in grp.items()\n if k != group.GroupMetadataField.GROUP_ID.value\n } | {RESULT_IDX: []}\n for grp in groups\n }\n\n for i, meta in enumerate(record_metadata):\n groups_by_id[meta[group.GroupMetadataField.GROUP_ID.value]][RESULT_IDX].append(i)\n\n output_groups = sorted(list(groups_by_id.values()), key=lambda x: x[RESULT_IDX][0])\n else:\n output_groups = None\n\n return processed_records, output_groups\n", "path": "mathesar/api/utils.py"}, {"content": "from psycopg2.errors import NotNullViolation\nfrom rest_framework import serializers\nfrom rest_framework import status\nfrom sqlalchemy.exc import IntegrityError\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\n\n\nclass RecordListParameterSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n filter = serializers.JSONField(required=False, default=None)\n order_by = serializers.JSONField(required=False, default=[])\n grouping = serializers.JSONField(required=False, default={})\n duplicate_only = serializers.JSONField(required=False, default=None)\n\n\nclass RecordSerializer(MathesarErrorMessageMixin, serializers.BaseSerializer):\n def update(self, instance, validated_data):\n table = self.context['table']\n record = table.update_record(instance['id'], validated_data)\n return record\n\n def create(self, validated_data):\n table = self.context['table']\n try:\n record = table.create_record_or_records(validated_data)\n except IntegrityError as e:\n if type(e.orig) == NotNullViolation:\n raise database_api_exceptions.NotNullViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n table=table\n )\n else:\n raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n return record\n\n def to_representation(self, instance):\n records = instance._asdict() if not isinstance(instance, dict) else instance\n columns_map = self.context['columns_map']\n records = {columns_map[column_name]: column_value for column_name, column_value in records.items()}\n return records\n\n def to_internal_value(self, data):\n columns_map = self.context['columns_map'].inverse\n data = {columns_map[int(column_id)]: value for column_id, value in data.items()}\n return data\n", "path": "mathesar/api/serializers/records.py"}]}
1,983
782
gh_patches_debug_31432
rasdani/github-patches
git_diff
facebookresearch__CompilerGym-309
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CompilerGym cache directory defaults don't doesn't play nicely with shared access machines ## 🐛 Bug The default locations of the CompilerGym caches are in shared folders. On multi-user machines this can cause permission errors as the directory may be created by one user without write permissions to other users. I propose switching to user-specific defaults like so: - [x] `COMPILER_GYM_CACHE` -> `/tmp/compiler_gym-$user` - [x] `COMPILER_YM_TRANSIENT_CACHE` -> `/dev/shm/compiler_gym-$user` </issue> <code> [start of compiler_gym/util/runfiles_path.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. 2 # 3 # This source code is licensed under the MIT license found in the 4 # LICENSE file in the root directory of this source tree. 5 """Module for resolving a runfiles path.""" 6 import getpass 7 import os 8 from pathlib import Path 9 10 # NOTE(cummins): Moving this file may require updating this relative path. 11 _PACKAGE_ROOT = Path(os.path.join(os.path.dirname(__file__), "../../")).resolve( 12 strict=True 13 ) 14 15 16 def runfiles_path(relpath: str) -> Path: 17 """Resolve the path to a runfiles data path. 18 19 No checks are to made to ensure that the path, or the containing directory, 20 exist. 21 22 Use environment variable COMPILER_GYM_RUNFILES=/path/to/runfiles if running 23 outside of bazel. 24 25 :param relpath: The relative path within the runfiles tree. 26 27 :return: An absolute path. 28 """ 29 # There are three ways of determining a runfiles path: 30 # 1. Set the COMPILER_GYM_RUNFILES environment variable. 31 # 2. Using the rules_python library that is provided by bazel. This will 32 # fail if not being executed within a bazel sandbox. 33 # 3. Computing the path relative to the location of this file. This is the 34 # fallback approach that is used for when the code has been installed 35 # by setuptools. 36 runfiles_path = os.environ.get("COMPILER_GYM_RUNFILES") 37 if runfiles_path: 38 return Path(runfiles_path) / relpath 39 else: 40 try: 41 from rules_python.python.runfiles import runfiles 42 43 return Path( 44 runfiles.Create().Rlocation( 45 "CompilerGym" if relpath == "." else f"CompilerGym/{relpath}" 46 ) 47 ) 48 except (ModuleNotFoundError, TypeError): 49 return _PACKAGE_ROOT / relpath 50 51 52 def site_data_path(relpath: str) -> Path: 53 """Return a path within the site data directory. 54 55 CompilerGym uses a directory to store persistent site data files in, such as benchmark datasets. 56 The default location is :code:`~/.local/share/compiler_gym`. Set the environment variable 57 :code:`$COMPILER_GYM_SITE_DATA` to override this default location. 58 59 No checks are to made to ensure that the path, or the containing directory, 60 exist. 61 62 :param relpath: The relative path within the site data tree. 63 64 :return: An absolute path. 65 """ 66 # NOTE(cummins): This function has a matching implementation in the C++ 67 # sources, compiler_gym::service::getSiteDataPath(). Any change to behavior 68 # here must be reflected in the C++ version. 69 forced = os.environ.get("COMPILER_GYM_SITE_DATA") 70 if forced: 71 return Path(forced) / relpath 72 elif os.environ.get("HOME"): 73 return Path("~/.local/share/compiler_gym").expanduser() / relpath 74 else: 75 return Path("/tmp/compiler_gym/site_data") / relpath 76 77 78 def cache_path(relpath: str) -> Path: 79 """Return a path within the cache directory. 80 81 CompilerGym uses a directory to cache files in, such as downloaded content. 82 The default location for this cache is :code:`~/.cache/compiler_gym`. Set 83 the environment variable :code:`$COMPILER_GYM_CACHE` to override this 84 default location. 85 86 No checks are to made to ensure that the path, or the containing directory, 87 exist. 88 89 :param relpath: The relative path within the cache tree. 90 91 :return: An absolute path. 92 """ 93 forced = os.environ.get("COMPILER_GYM_CACHE") 94 if forced: 95 return Path(forced) / relpath 96 elif os.environ.get("HOME"): 97 return Path("~/.cache/compiler_gym").expanduser() / relpath 98 else: 99 return Path("/tmp/compiler_gym/cache") / relpath 100 101 102 def transient_cache_path(relpath: str) -> Path: 103 """Return a path within the transient cache directory. 104 105 The transient cache is a directory used to store files that do not need to 106 persist beyond the lifetime of the current process. When available, the 107 temporary filesystem :code:`/dev/shm` will be used. Else, 108 :meth:`cache_path() <compiler_gym.cache_path>` is used as a fallback. Set 109 the environment variable :code:`$COMPILER_GYM_TRANSIENT_CACHE` to override 110 the default location. 111 112 No checks are to made to ensure that the path, or the containing directory, 113 exist. 114 115 :param relpath: The relative path within the cache tree. 116 117 :return: An absolute path. 118 """ 119 forced = os.environ.get("COMPILER_GYM_TRANSIENT_CACHE") 120 if forced: 121 return Path(forced) / relpath 122 elif Path("/dev/shm").is_dir(): 123 return Path(f"/dev/shm/compiler_gym_{getpass.getuser()}") / relpath 124 else: 125 # Fallback to using the regular cache. 126 return cache_path(relpath) 127 [end of compiler_gym/util/runfiles_path.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/compiler_gym/util/runfiles_path.py b/compiler_gym/util/runfiles_path.py --- a/compiler_gym/util/runfiles_path.py +++ b/compiler_gym/util/runfiles_path.py @@ -3,8 +3,8 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Module for resolving a runfiles path.""" -import getpass import os +from getpass import getuser from pathlib import Path # NOTE(cummins): Moving this file may require updating this relative path. @@ -72,7 +72,7 @@ elif os.environ.get("HOME"): return Path("~/.local/share/compiler_gym").expanduser() / relpath else: - return Path("/tmp/compiler_gym/site_data") / relpath + return Path(f"/tmp/compiler_gym_{getuser()}/site_data") / relpath def cache_path(relpath: str) -> Path: @@ -96,7 +96,7 @@ elif os.environ.get("HOME"): return Path("~/.cache/compiler_gym").expanduser() / relpath else: - return Path("/tmp/compiler_gym/cache") / relpath + return Path(f"/tmp/compiler_gym_{getuser()}/cache") / relpath def transient_cache_path(relpath: str) -> Path: @@ -120,7 +120,7 @@ if forced: return Path(forced) / relpath elif Path("/dev/shm").is_dir(): - return Path(f"/dev/shm/compiler_gym_{getpass.getuser()}") / relpath + return Path(f"/dev/shm/compiler_gym_{getuser()}") / relpath else: # Fallback to using the regular cache. return cache_path(relpath)
{"golden_diff": "diff --git a/compiler_gym/util/runfiles_path.py b/compiler_gym/util/runfiles_path.py\n--- a/compiler_gym/util/runfiles_path.py\n+++ b/compiler_gym/util/runfiles_path.py\n@@ -3,8 +3,8 @@\n # This source code is licensed under the MIT license found in the\n # LICENSE file in the root directory of this source tree.\n \"\"\"Module for resolving a runfiles path.\"\"\"\n-import getpass\n import os\n+from getpass import getuser\n from pathlib import Path\n \n # NOTE(cummins): Moving this file may require updating this relative path.\n@@ -72,7 +72,7 @@\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.local/share/compiler_gym\").expanduser() / relpath\n else:\n- return Path(\"/tmp/compiler_gym/site_data\") / relpath\n+ return Path(f\"/tmp/compiler_gym_{getuser()}/site_data\") / relpath\n \n \n def cache_path(relpath: str) -> Path:\n@@ -96,7 +96,7 @@\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.cache/compiler_gym\").expanduser() / relpath\n else:\n- return Path(\"/tmp/compiler_gym/cache\") / relpath\n+ return Path(f\"/tmp/compiler_gym_{getuser()}/cache\") / relpath\n \n \n def transient_cache_path(relpath: str) -> Path:\n@@ -120,7 +120,7 @@\n if forced:\n return Path(forced) / relpath\n elif Path(\"/dev/shm\").is_dir():\n- return Path(f\"/dev/shm/compiler_gym_{getpass.getuser()}\") / relpath\n+ return Path(f\"/dev/shm/compiler_gym_{getuser()}\") / relpath\n else:\n # Fallback to using the regular cache.\n return cache_path(relpath)\n", "issue": "CompilerGym cache directory defaults don't doesn't play nicely with shared access machines\n## \ud83d\udc1b Bug\r\n\r\nThe default locations of the CompilerGym caches are in shared folders. On multi-user machines this can cause permission errors as the directory may be created by one user without write permissions to other users. I propose switching to user-specific defaults like so:\r\n\r\n- [x] `COMPILER_GYM_CACHE` -> `/tmp/compiler_gym-$user`\r\n- [x] `COMPILER_YM_TRANSIENT_CACHE` -> `/dev/shm/compiler_gym-$user`\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Module for resolving a runfiles path.\"\"\"\nimport getpass\nimport os\nfrom pathlib import Path\n\n# NOTE(cummins): Moving this file may require updating this relative path.\n_PACKAGE_ROOT = Path(os.path.join(os.path.dirname(__file__), \"../../\")).resolve(\n strict=True\n)\n\n\ndef runfiles_path(relpath: str) -> Path:\n \"\"\"Resolve the path to a runfiles data path.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n Use environment variable COMPILER_GYM_RUNFILES=/path/to/runfiles if running\n outside of bazel.\n\n :param relpath: The relative path within the runfiles tree.\n\n :return: An absolute path.\n \"\"\"\n # There are three ways of determining a runfiles path:\n # 1. Set the COMPILER_GYM_RUNFILES environment variable.\n # 2. Using the rules_python library that is provided by bazel. This will\n # fail if not being executed within a bazel sandbox.\n # 3. Computing the path relative to the location of this file. This is the\n # fallback approach that is used for when the code has been installed\n # by setuptools.\n runfiles_path = os.environ.get(\"COMPILER_GYM_RUNFILES\")\n if runfiles_path:\n return Path(runfiles_path) / relpath\n else:\n try:\n from rules_python.python.runfiles import runfiles\n\n return Path(\n runfiles.Create().Rlocation(\n \"CompilerGym\" if relpath == \".\" else f\"CompilerGym/{relpath}\"\n )\n )\n except (ModuleNotFoundError, TypeError):\n return _PACKAGE_ROOT / relpath\n\n\ndef site_data_path(relpath: str) -> Path:\n \"\"\"Return a path within the site data directory.\n\n CompilerGym uses a directory to store persistent site data files in, such as benchmark datasets.\n The default location is :code:`~/.local/share/compiler_gym`. Set the environment variable\n :code:`$COMPILER_GYM_SITE_DATA` to override this default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the site data tree.\n\n :return: An absolute path.\n \"\"\"\n # NOTE(cummins): This function has a matching implementation in the C++\n # sources, compiler_gym::service::getSiteDataPath(). Any change to behavior\n # here must be reflected in the C++ version.\n forced = os.environ.get(\"COMPILER_GYM_SITE_DATA\")\n if forced:\n return Path(forced) / relpath\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.local/share/compiler_gym\").expanduser() / relpath\n else:\n return Path(\"/tmp/compiler_gym/site_data\") / relpath\n\n\ndef cache_path(relpath: str) -> Path:\n \"\"\"Return a path within the cache directory.\n\n CompilerGym uses a directory to cache files in, such as downloaded content.\n The default location for this cache is :code:`~/.cache/compiler_gym`. Set\n the environment variable :code:`$COMPILER_GYM_CACHE` to override this\n default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the cache tree.\n\n :return: An absolute path.\n \"\"\"\n forced = os.environ.get(\"COMPILER_GYM_CACHE\")\n if forced:\n return Path(forced) / relpath\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.cache/compiler_gym\").expanduser() / relpath\n else:\n return Path(\"/tmp/compiler_gym/cache\") / relpath\n\n\ndef transient_cache_path(relpath: str) -> Path:\n \"\"\"Return a path within the transient cache directory.\n\n The transient cache is a directory used to store files that do not need to\n persist beyond the lifetime of the current process. When available, the\n temporary filesystem :code:`/dev/shm` will be used. Else,\n :meth:`cache_path() <compiler_gym.cache_path>` is used as a fallback. Set\n the environment variable :code:`$COMPILER_GYM_TRANSIENT_CACHE` to override\n the default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the cache tree.\n\n :return: An absolute path.\n \"\"\"\n forced = os.environ.get(\"COMPILER_GYM_TRANSIENT_CACHE\")\n if forced:\n return Path(forced) / relpath\n elif Path(\"/dev/shm\").is_dir():\n return Path(f\"/dev/shm/compiler_gym_{getpass.getuser()}\") / relpath\n else:\n # Fallback to using the regular cache.\n return cache_path(relpath)\n", "path": "compiler_gym/util/runfiles_path.py"}]}
2,046
408
gh_patches_debug_11562
rasdani/github-patches
git_diff
optuna__optuna-1627
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix docstrings `Sphinx>=3.1.0` doesn't build our documentation for some reason (I guess our `experimental` and `deprecated` decorators are the devils, not sure though). At first, as https://github.com/optuna/optuna/issues/1368 said, we were optimistic about this, i.e., we thought the next stable would work. However, it's not happened yet. So it's high time we dirtied our hands to enable the latest Sphinx. ~~The latest ongoing pull request is https://github.com/optuna/optuna/pull/1613.~~ </issue> <code> [start of setup.py] 1 import os 2 import sys 3 4 import pkg_resources 5 from setuptools import find_packages 6 from setuptools import setup 7 8 from typing import Dict 9 from typing import List 10 from typing import Optional 11 12 13 def get_version() -> str: 14 15 version_filepath = os.path.join(os.path.dirname(__file__), "optuna", "version.py") 16 with open(version_filepath) as f: 17 for line in f: 18 if line.startswith("__version__"): 19 return line.strip().split()[-1][1:-1] 20 assert False 21 22 23 def get_long_description() -> str: 24 25 readme_filepath = os.path.join(os.path.dirname(__file__), "README.md") 26 with open(readme_filepath) as f: 27 return f.read() 28 29 30 def get_install_requires() -> List[str]: 31 32 return [ 33 "alembic", 34 "cliff", 35 "cmaes>=0.5.1", 36 "colorlog", 37 "joblib", 38 "numpy", 39 "packaging>=20.0", 40 "scipy!=1.4.0", 41 "sqlalchemy>=1.1.0", 42 "tqdm", 43 ] 44 45 46 def get_tests_require() -> List[str]: 47 48 return get_extras_require()["testing"] 49 50 51 def get_extras_require() -> Dict[str, List[str]]: 52 53 requirements = { 54 "checking": ["black", "hacking", "mypy"], 55 "codecov": ["codecov", "pytest-cov"], 56 "doctest": [ 57 "cma", 58 "pandas", 59 "plotly>=4.0.0", 60 "scikit-learn>=0.19.0,<0.23.0", 61 "scikit-optimize", 62 "mlflow", 63 ], 64 "document": [ 65 # TODO(hvy): Unpin `sphinx` version after: 66 # https://github.com/sphinx-doc/sphinx/issues/7807. 67 "sphinx>=3.0.0,!=3.1.0,!=3.1.1,!=3.1.2,!=3.2.0", 68 # As reported in: https://github.com/readthedocs/sphinx_rtd_theme/issues/949, 69 # `sphinx_rtd_theme` 0.5.0 is still not compatible with `sphinx` >= 3.0. 70 "sphinx_rtd_theme<0.5.0", 71 "sphinx-gallery", 72 "pillow", 73 "matplotlib", 74 "scikit-learn", 75 ], 76 "example": [ 77 "catboost", 78 "chainer", 79 "lightgbm", 80 "mlflow", 81 "mpi4py", 82 "mxnet", 83 "nbval", 84 "scikit-image", 85 "scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py. 86 "xgboost", 87 "keras", 88 "tensorflow>=2.0.0", 89 "tensorflow-datasets", 90 ] 91 + ( 92 ( 93 ["torch==1.6.0", "torchvision==0.7.0"] 94 if sys.platform == "darwin" 95 else ["torch==1.6.0+cpu", "torchvision==0.7.0+cpu"] 96 ) 97 + ["pytorch-ignite", "thop"] 98 if (3, 5) < sys.version_info[:2] 99 else [] 100 ) 101 + (["stable-baselines3>=0.7.0"] if (3, 5) < sys.version_info[:2] else []) 102 + ( 103 ["allennlp==1.0.0", "fastai<2", "pytorch_lightning>=0.7.1"] 104 if (3, 5) < sys.version_info[:2] < (3, 8) 105 else [] 106 ) 107 + (["pytorch-lightning>=0.7.2"] if (3, 8) == sys.version_info[:2] else []) 108 + ( 109 ["llvmlite<=0.31.0", "fsspec<0.8.0"] if (3, 5) == sys.version_info[:2] else [] 110 ) # Newer `llvmlite` is not distributed with wheels for Python 3.5. 111 # Newer `fsspec` uses f-strings, which is not compatible with Python 3.5. 112 + (["dask[dataframe]", "dask-ml",] if sys.version_info[:2] < (3, 8) else []) 113 + (["catalyst"] if (3, 5) < sys.version_info[:2] else []), 114 "experimental": ["redis"], 115 "testing": [ 116 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue 117 # https://github.com/optuna/optuna/issues/1000. 118 "bokeh<2.0.0", 119 "chainer>=5.0.0", 120 "cma", 121 "fakeredis", 122 "lightgbm", 123 "mlflow", 124 "mpi4py", 125 "mxnet", 126 "pandas", 127 "plotly>=4.0.0", 128 "pytest", 129 "scikit-learn>=0.19.0,<0.23.0", 130 "scikit-optimize", 131 "xgboost", 132 "keras", 133 "tensorflow", 134 "tensorflow-datasets", 135 ] 136 + ( 137 ( 138 ["torch==1.6.0", "torchvision==0.7.0"] 139 if sys.platform == "darwin" 140 else ["torch==1.6.0+cpu", "torchvision==0.7.0+cpu"] 141 ) 142 + ["pytorch-ignite"] 143 if (3, 5) < sys.version_info[:2] 144 else [] 145 ) 146 + ( 147 ["allennlp==1.0.0", "fastai<2", "pytorch_lightning>=0.7.1"] 148 if (3, 5) < sys.version_info[:2] < (3, 8) 149 else [] 150 ) 151 + (["catalyst"] if (3, 5) < sys.version_info[:2] else []) 152 + (["pytorch-lightning>=0.7.2"] if (3, 8) == sys.version_info[:2] else []), 153 "tests": ["fakeredis", "pytest"], 154 "optional": [ 155 "bokeh<2.0.0", # optuna/cli.py, optuna/dashboard.py. 156 "pandas", # optuna/study.py 157 "plotly>=4.0.0", # optuna/visualization. 158 "redis", # optuna/storages/redis.py. 159 "scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py. 160 ], 161 "integration": [ 162 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue 163 # https://github.com/optuna/optuna/issues/1000. 164 "chainer>=5.0.0", 165 "cma", 166 "lightgbm", 167 "mlflow", 168 "mpi4py", 169 "mxnet", 170 "pandas", 171 "scikit-learn>=0.19.0,<0.23.0", 172 "scikit-optimize", 173 "xgboost", 174 "keras", 175 "tensorflow", 176 "tensorflow-datasets", 177 ] 178 + ( 179 ( 180 ["torch==1.6.0", "torchvision==0.7.0"] 181 if sys.platform == "darwin" 182 else ["torch==1.6.0+cpu", "torchvision==0.7.0+cpu"] 183 ) 184 + ["pytorch-ignite"] 185 if (3, 5) < sys.version_info[:2] 186 else [] 187 ) 188 + ( 189 ["allennlp==1.0.0", "fastai<2", "pytorch-lightning>=0.7.1"] 190 if (3, 5) < sys.version_info[:2] < (3, 8) 191 else [] 192 ) 193 + (["catalyst"] if (3, 5) < sys.version_info[:2] else []) 194 + (["pytorch-lightning>=0.7.2"] if (3, 8) == sys.version_info[:2] else []), 195 } 196 197 return requirements 198 199 200 def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]: 201 202 for pkg in pkgs: 203 try: 204 return pkg_resources.get_distribution(pkg) 205 except pkg_resources.DistributionNotFound: 206 pass 207 return None 208 209 210 setup( 211 name="optuna", 212 version=get_version(), 213 description="A hyperparameter optimization framework", 214 long_description=get_long_description(), 215 long_description_content_type="text/markdown", 216 author="Takuya Akiba", 217 author_email="[email protected]", 218 url="https://optuna.org/", 219 packages=find_packages(), 220 package_data={ 221 "optuna": [ 222 "storages/_rdb/alembic.ini", 223 "storages/_rdb/alembic/*.*", 224 "storages/_rdb/alembic/versions/*.*", 225 ] 226 }, 227 python_requires=">=3.5", 228 install_requires=get_install_requires(), 229 tests_require=get_tests_require(), 230 extras_require=get_extras_require(), 231 entry_points={ 232 "console_scripts": ["optuna = optuna.cli:main"], 233 "optuna.command": [ 234 "create-study = optuna.cli:_CreateStudy", 235 "delete-study = optuna.cli:_DeleteStudy", 236 "study set-user-attr = optuna.cli:_StudySetUserAttribute", 237 "studies = optuna.cli:_Studies", 238 "dashboard = optuna.cli:_Dashboard", 239 "study optimize = optuna.cli:_StudyOptimize", 240 "storage upgrade = optuna.cli:_StorageUpgrade", 241 ], 242 }, 243 classifiers=[ 244 "Development Status :: 5 - Production/Stable", 245 "Intended Audience :: Science/Research", 246 "Intended Audience :: Developers", 247 "License :: OSI Approved :: MIT License", 248 "Programming Language :: Python :: 3", 249 "Programming Language :: Python :: 3.5", 250 "Programming Language :: Python :: 3.6", 251 "Programming Language :: Python :: 3.7", 252 "Programming Language :: Python :: 3.8", 253 "Programming Language :: Python :: 3 :: Only", 254 "Topic :: Scientific/Engineering", 255 "Topic :: Scientific/Engineering :: Mathematics", 256 "Topic :: Scientific/Engineering :: Artificial Intelligence", 257 "Topic :: Software Development", 258 "Topic :: Software Development :: Libraries", 259 "Topic :: Software Development :: Libraries :: Python Modules", 260 ], 261 ) 262 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -63,8 +63,8 @@ ], "document": [ # TODO(hvy): Unpin `sphinx` version after: - # https://github.com/sphinx-doc/sphinx/issues/7807. - "sphinx>=3.0.0,!=3.1.0,!=3.1.1,!=3.1.2,!=3.2.0", + # https://github.com/sphinx-doc/sphinx/issues/8105. + "sphinx==3.0.4", # As reported in: https://github.com/readthedocs/sphinx_rtd_theme/issues/949, # `sphinx_rtd_theme` 0.5.0 is still not compatible with `sphinx` >= 3.0. "sphinx_rtd_theme<0.5.0",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,8 +63,8 @@\n ],\n \"document\": [\n # TODO(hvy): Unpin `sphinx` version after:\n- # https://github.com/sphinx-doc/sphinx/issues/7807.\n- \"sphinx>=3.0.0,!=3.1.0,!=3.1.1,!=3.1.2,!=3.2.0\",\n+ # https://github.com/sphinx-doc/sphinx/issues/8105.\n+ \"sphinx==3.0.4\",\n # As reported in: https://github.com/readthedocs/sphinx_rtd_theme/issues/949,\n # `sphinx_rtd_theme` 0.5.0 is still not compatible with `sphinx` >= 3.0.\n \"sphinx_rtd_theme<0.5.0\",\n", "issue": "Fix docstrings\n`Sphinx>=3.1.0` doesn't build our documentation for some reason (I guess our `experimental` and `deprecated` decorators are the devils, not sure though).\r\n\r\nAt first, as https://github.com/optuna/optuna/issues/1368 said, we were optimistic about this, i.e., we thought the next stable would work. However, it's not happened yet. So it's high time we dirtied our hands to enable the latest Sphinx.\r\n\r\n~~The latest ongoing pull request is https://github.com/optuna/optuna/pull/1613.~~\n", "before_files": [{"content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n return [\n \"alembic\",\n \"cliff\",\n \"cmaes>=0.5.1\",\n \"colorlog\",\n \"joblib\",\n \"numpy\",\n \"packaging>=20.0\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n ]\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n \"checking\": [\"black\", \"hacking\", \"mypy\"],\n \"codecov\": [\"codecov\", \"pytest-cov\"],\n \"doctest\": [\n \"cma\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"scikit-learn>=0.19.0,<0.23.0\",\n \"scikit-optimize\",\n \"mlflow\",\n ],\n \"document\": [\n # TODO(hvy): Unpin `sphinx` version after:\n # https://github.com/sphinx-doc/sphinx/issues/7807.\n \"sphinx>=3.0.0,!=3.1.0,!=3.1.1,!=3.1.2,!=3.2.0\",\n # As reported in: https://github.com/readthedocs/sphinx_rtd_theme/issues/949,\n # `sphinx_rtd_theme` 0.5.0 is still not compatible with `sphinx` >= 3.0.\n \"sphinx_rtd_theme<0.5.0\",\n \"sphinx-gallery\",\n \"pillow\",\n \"matplotlib\",\n \"scikit-learn\",\n ],\n \"example\": [\n \"catboost\",\n \"chainer\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"nbval\",\n \"scikit-image\",\n \"scikit-learn>=0.19.0,<0.23.0\", # optuna/visualization/param_importances.py.\n \"xgboost\",\n \"keras\",\n \"tensorflow>=2.0.0\",\n \"tensorflow-datasets\",\n ]\n + (\n (\n [\"torch==1.6.0\", \"torchvision==0.7.0\"]\n if sys.platform == \"darwin\"\n else [\"torch==1.6.0+cpu\", \"torchvision==0.7.0+cpu\"]\n )\n + [\"pytorch-ignite\", \"thop\"]\n if (3, 5) < sys.version_info[:2]\n else []\n )\n + ([\"stable-baselines3>=0.7.0\"] if (3, 5) < sys.version_info[:2] else [])\n + (\n [\"allennlp==1.0.0\", \"fastai<2\", \"pytorch_lightning>=0.7.1\"]\n if (3, 5) < sys.version_info[:2] < (3, 8)\n else []\n )\n + ([\"pytorch-lightning>=0.7.2\"] if (3, 8) == sys.version_info[:2] else [])\n + (\n [\"llvmlite<=0.31.0\", \"fsspec<0.8.0\"] if (3, 5) == sys.version_info[:2] else []\n ) # Newer `llvmlite` is not distributed with wheels for Python 3.5.\n # Newer `fsspec` uses f-strings, which is not compatible with Python 3.5.\n + ([\"dask[dataframe]\", \"dask-ml\",] if sys.version_info[:2] < (3, 8) else [])\n + ([\"catalyst\"] if (3, 5) < sys.version_info[:2] else []),\n \"experimental\": [\"redis\"],\n \"testing\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"bokeh<2.0.0\",\n \"chainer>=5.0.0\",\n \"cma\",\n \"fakeredis\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"scikit-learn>=0.19.0,<0.23.0\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n ]\n + (\n (\n [\"torch==1.6.0\", \"torchvision==0.7.0\"]\n if sys.platform == \"darwin\"\n else [\"torch==1.6.0+cpu\", \"torchvision==0.7.0+cpu\"]\n )\n + [\"pytorch-ignite\"]\n if (3, 5) < sys.version_info[:2]\n else []\n )\n + (\n [\"allennlp==1.0.0\", \"fastai<2\", \"pytorch_lightning>=0.7.1\"]\n if (3, 5) < sys.version_info[:2] < (3, 8)\n else []\n )\n + ([\"catalyst\"] if (3, 5) < sys.version_info[:2] else [])\n + ([\"pytorch-lightning>=0.7.2\"] if (3, 8) == sys.version_info[:2] else []),\n \"tests\": [\"fakeredis\", \"pytest\"],\n \"optional\": [\n \"bokeh<2.0.0\", # optuna/cli.py, optuna/dashboard.py.\n \"pandas\", # optuna/study.py\n \"plotly>=4.0.0\", # optuna/visualization.\n \"redis\", # optuna/storages/redis.py.\n \"scikit-learn>=0.19.0,<0.23.0\", # optuna/visualization/param_importances.py.\n ],\n \"integration\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"chainer>=5.0.0\",\n \"cma\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"scikit-learn>=0.19.0,<0.23.0\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n ]\n + (\n (\n [\"torch==1.6.0\", \"torchvision==0.7.0\"]\n if sys.platform == \"darwin\"\n else [\"torch==1.6.0+cpu\", \"torchvision==0.7.0+cpu\"]\n )\n + [\"pytorch-ignite\"]\n if (3, 5) < sys.version_info[:2]\n else []\n )\n + (\n [\"allennlp==1.0.0\", \"fastai<2\", \"pytorch-lightning>=0.7.1\"]\n if (3, 5) < sys.version_info[:2] < (3, 8)\n else []\n )\n + ([\"catalyst\"] if (3, 5) < sys.version_info[:2] else [])\n + ([\"pytorch-lightning>=0.7.2\"] if (3, 8) == sys.version_info[:2] else []),\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"[email protected]\",\n url=\"https://optuna.org/\",\n packages=find_packages(),\n package_data={\n \"optuna\": [\n \"storages/_rdb/alembic.ini\",\n \"storages/_rdb/alembic/*.*\",\n \"storages/_rdb/alembic/versions/*.*\",\n ]\n },\n python_requires=\">=3.5\",\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\n \"console_scripts\": [\"optuna = optuna.cli:main\"],\n \"optuna.command\": [\n \"create-study = optuna.cli:_CreateStudy\",\n \"delete-study = optuna.cli:_DeleteStudy\",\n \"study set-user-attr = optuna.cli:_StudySetUserAttribute\",\n \"studies = optuna.cli:_Studies\",\n \"dashboard = optuna.cli:_Dashboard\",\n \"study optimize = optuna.cli:_StudyOptimize\",\n \"storage upgrade = optuna.cli:_StorageUpgrade\",\n ],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}]}
3,708
212
gh_patches_debug_1369
rasdani/github-patches
git_diff
Parsl__parsl-972
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix `ModuleNotFoundError: No module named 'monitoring'` Looks like this bug was introduced with the recent merge of monitoring back into the parsl repo. ``` Traceback (most recent call last): File "/Users/awoodard/software/miniconda3/bin/parsl-visualize", line 11, in <module> load_entry_point('parsl==0.7.2', 'console_scripts', 'parsl-visualize')() File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 484, in load_entry_point return get_distribution(dist).load_entry_point(group, name) File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2707, in load_entry_point return ep.load() File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2325, in load return self.resolve() File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2331, in resolve module = __import__(self.module_name, fromlist=['__name__'], level=0) ModuleNotFoundError: No module named 'monitoring' ``` </issue> <code> [start of setup.py] 1 from setuptools import setup, find_packages 2 3 with open('parsl/version.py') as f: 4 exec(f.read()) 5 6 with open('requirements.txt') as f: 7 install_requires = f.readlines() 8 9 extras_require = { 10 'monitoring' : [ 11 'psutil', 12 'sqlalchemy', 13 'sqlalchemy_utils', 14 'pydot', 15 'networkx', 16 'Flask', 17 'flask_sqlalchemy', 18 'pandas', 19 'plotly', 20 'python-daemon' 21 ], 22 'aws' : ['boto3'], 23 'kubernetes' : ['kubernetes'], 24 'extreme_scale' : ['mpi4py'], 25 'docs' : ['nbsphinx', 'sphinx_rtd_theme'], 26 'google_cloud' : ['google-auth', 'google-api-python-client'], 27 'gssapi' : ['python-gssapi'], 28 } 29 extras_require['all'] = sum(extras_require.values(), []) 30 31 setup( 32 name='parsl', 33 version=VERSION, 34 description='Simple data dependent workflows in Python', 35 long_description='Simple parallel workflows system for Python', 36 url='https://github.com/Parsl/parsl', 37 author='The Parsl Team', 38 author_email='[email protected]', 39 license='Apache 2.0', 40 download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION), 41 include_package_data=True, 42 packages=find_packages(), 43 install_requires=install_requires, 44 scripts = ['parsl/executors/high_throughput/process_worker_pool.py', 45 'parsl/executors/extreme_scale/mpi_worker_pool.py', 46 'parsl/executors/low_latency/lowlatency_worker.py', 47 ], 48 extras_require=extras_require, 49 classifiers=[ 50 # Maturity 51 'Development Status :: 3 - Alpha', 52 # Intended audience 53 'Intended Audience :: Developers', 54 # Licence, must match with licence above 55 'License :: OSI Approved :: Apache Software License', 56 # Python versions supported 57 'Programming Language :: Python :: 3.5', 58 'Programming Language :: Python :: 3.6', 59 ], 60 keywords=['Workflows', 'Scientific computing'], 61 entry_points={'console_scripts': 62 [ 63 'parsl-globus-auth=parsl.data_provider.globus:cli_run', 64 'parsl-visualize=monitoring.visualization.app:cli_run', 65 ]} 66 ) 67 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -61,6 +61,6 @@ entry_points={'console_scripts': [ 'parsl-globus-auth=parsl.data_provider.globus:cli_run', - 'parsl-visualize=monitoring.visualization.app:cli_run', + 'parsl-visualize=parsl.monitoring.visualization.app:cli_run', ]} )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,6 +61,6 @@\n entry_points={'console_scripts':\n [\n 'parsl-globus-auth=parsl.data_provider.globus:cli_run',\n- 'parsl-visualize=monitoring.visualization.app:cli_run',\n+ 'parsl-visualize=parsl.monitoring.visualization.app:cli_run',\n ]}\n )\n", "issue": "Fix `ModuleNotFoundError: No module named 'monitoring'`\nLooks like this bug was introduced with the recent merge of monitoring back into the parsl repo.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/awoodard/software/miniconda3/bin/parsl-visualize\", line 11, in <module>\r\n load_entry_point('parsl==0.7.2', 'console_scripts', 'parsl-visualize')()\r\n File \"/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 484, in load_entry_point\r\n return get_distribution(dist).load_entry_point(group, name)\r\n File \"/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 2707, in load_entry_point\r\n return ep.load()\r\n File \"/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 2325, in load\r\n return self.resolve()\r\n File \"/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 2331, in resolve\r\n module = __import__(self.module_name, fromlist=['__name__'], level=0)\r\nModuleNotFoundError: No module named 'monitoring'\r\n```\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nextras_require = {\n 'monitoring' : [\n 'psutil',\n 'sqlalchemy',\n 'sqlalchemy_utils',\n 'pydot',\n 'networkx',\n 'Flask',\n 'flask_sqlalchemy',\n 'pandas',\n 'plotly',\n 'python-daemon'\n ],\n 'aws' : ['boto3'],\n 'kubernetes' : ['kubernetes'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n 'google_cloud' : ['google-auth', 'google-api-python-client'],\n 'gssapi' : ['python-gssapi'],\n}\nextras_require['all'] = sum(extras_require.values(), [])\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n include_package_data=True,\n packages=find_packages(),\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/extreme_scale/mpi_worker_pool.py',\n 'parsl/executors/low_latency/lowlatency_worker.py',\n ],\n extras_require=extras_require,\n classifiers=[\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n entry_points={'console_scripts':\n [\n 'parsl-globus-auth=parsl.data_provider.globus:cli_run',\n 'parsl-visualize=monitoring.visualization.app:cli_run',\n ]}\n)\n", "path": "setup.py"}]}
1,480
99
gh_patches_debug_24962
rasdani/github-patches
git_diff
translate__pootle-5560
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Suggestions count on stats is wrong See https://mozilla.locamotion.org/cy/ where it says 1 pending suggestion, but clicking on the link says that there are no results. </issue> <code> [start of pootle/apps/pootle_store/utils.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 from collections import OrderedDict 10 11 from django.conf import settings 12 from django.contrib.auth import get_user_model 13 from django.template import loader 14 from django.utils import timezone 15 16 from pootle.core.delegate import site 17 from pootle.core.mail import send_mail 18 from pootle.i18n.gettext import ugettext as _ 19 from pootle_comment.forms import UnsecuredCommentForm 20 from pootle_statistics.models import ( 21 Submission, SubmissionFields, SubmissionTypes) 22 23 from .constants import FUZZY, TRANSLATED 24 from .models import Suggestion, SuggestionStates 25 26 27 User = get_user_model() 28 29 30 class SuggestionsReview(object): 31 accept_email_template = 'editor/email/suggestions_accepted_with_comment.txt' 32 accept_email_subject = _(u"Suggestion accepted with comment") 33 reject_email_template = 'editor/email/suggestions_rejected_with_comment.txt' 34 reject_email_subject = _(u"Suggestion rejected with comment") 35 36 def __init__(self, suggestions=None, reviewer=None): 37 self.suggestions = suggestions 38 self.reviewer = reviewer 39 40 @property 41 def users_and_suggestions(self): 42 users = {} 43 for suggestion in self.suggestions: 44 users[suggestion.user] = users.get(suggestion.user, []) 45 users[suggestion.user].append(suggestion) 46 return users 47 48 def add_comments(self, comment): 49 for suggestion in self.suggestions: 50 UnsecuredCommentForm( 51 suggestion, 52 dict(comment=comment, 53 user=self.reviewer)).save() 54 55 def add(self, unit, translation, user=None, touch=True, 56 similarity=None, mt_similarity=None): 57 """Adds a new suggestion to the unit. 58 59 :param translation: suggested translation text 60 :param user: user who is making the suggestion. If it's ``None``, 61 the ``system`` user will be used. 62 :param touch: whether to update the unit's timestamp after adding 63 the suggestion or not. 64 :param similarity: human similarity for the new suggestion. 65 :param mt_similarity: MT similarity for the new suggestion. 66 67 :return: a tuple ``(suggestion, created)`` where ``created`` is a 68 boolean indicating if the suggestion was successfully added. 69 If the suggestion already exists it's returned as well. 70 """ 71 dont_add = ( 72 not filter(None, translation) 73 or translation == unit.target) 74 if dont_add: 75 return (None, False) 76 user = user or User.objects.get_system_user() 77 try: 78 suggestion = Suggestion.objects.pending().get( 79 unit=unit, 80 user=user, 81 target_f=translation) 82 return (suggestion, False) 83 except Suggestion.DoesNotExist: 84 suggestion = Suggestion.objects.create( 85 unit=unit, 86 user=user, 87 state=SuggestionStates.PENDING, 88 target=translation, 89 creation_time=timezone.now()) 90 self.create_submission( 91 suggestion, 92 SubmissionTypes.SUGG_ADD, 93 user, 94 similarity=similarity, 95 mt_similarity=mt_similarity).save() 96 if touch: 97 unit.save() 98 return (suggestion, True) 99 100 def create_submission(self, suggestion, suggestion_type, user, **kwargs): 101 return Submission( 102 creation_time=kwargs.get("creation_time", suggestion.creation_time), 103 translation_project=suggestion.unit.store.translation_project, 104 submitter=user, 105 unit=suggestion.unit, 106 store=suggestion.unit.store, 107 type=suggestion_type, 108 suggestion=suggestion, 109 similarity=kwargs.get("similarity"), 110 mt_similarity=kwargs.get("mt_similarity")) 111 112 def accept_suggestion(self, suggestion): 113 unit = suggestion.unit 114 translation_project = unit.store.translation_project 115 116 # Save for later 117 old_state = unit.state 118 old_target = unit.target 119 120 # Update some basic attributes so we can create submissions. Note 121 # these do not conflict with `ScoreLog`'s interests, so it's safe 122 unit.target = suggestion.target 123 if unit.state == FUZZY: 124 unit.state = TRANSLATED 125 126 current_time = timezone.now() 127 suggestion.state = SuggestionStates.ACCEPTED 128 suggestion.reviewer = self.reviewer 129 suggestion.review_time = current_time 130 suggestion.save() 131 create_subs = OrderedDict() 132 if old_state != unit.state: 133 create_subs[SubmissionFields.STATE] = [old_state, unit.state] 134 create_subs[SubmissionFields.TARGET] = [old_target, unit.target] 135 subs_created = [] 136 for field in create_subs: 137 kwargs = { 138 'creation_time': current_time, 139 'translation_project': translation_project, 140 'submitter': self.reviewer, 141 'unit': unit, 142 'store': unit.store, 143 'field': field, 144 'type': SubmissionTypes.SUGG_ACCEPT, 145 'old_value': create_subs[field][0], 146 'new_value': create_subs[field][1], 147 } 148 if field == SubmissionFields.TARGET: 149 kwargs['suggestion'] = suggestion 150 151 subs_created.append(Submission(**kwargs)) 152 if subs_created: 153 unit.submission_set.add(*subs_created, bulk=False) 154 155 # FIXME: remove such a dependency on `ScoreLog` 156 # Update current unit instance's attributes 157 # important to set these attributes after saving Submission 158 # because in the `ScoreLog` we need to access the unit's certain 159 # attributes before it was saved 160 # THIS NEEDS TO GO ^^ 161 unit.submitted_by = suggestion.user 162 unit.submitted_on = current_time 163 unit.reviewed_by = self.reviewer 164 unit.reviewed_on = unit.submitted_on 165 unit._log_user = self.reviewer 166 unit.save() 167 168 def reject_suggestion(self, suggestion): 169 suggestion.state = SuggestionStates.REJECTED 170 suggestion.review_time = timezone.now() 171 suggestion.reviewer = self.reviewer 172 suggestion.save() 173 self.create_submission( 174 suggestion, 175 SubmissionTypes.SUGG_REJECT, 176 self.reviewer, 177 creation_time=suggestion.review_time).save() 178 179 def accept_suggestions(self): 180 for suggestion in self.suggestions: 181 self.accept_suggestion(suggestion) 182 183 def accept(self, comment=""): 184 self.accept_suggestions() 185 if self.should_notify(comment): 186 self.notify_suggesters(rejected=False, comment=comment) 187 if comment: 188 self.add_comments(comment=comment) 189 190 def build_absolute_uri(self, url): 191 return site.get().build_absolute_uri(url) 192 193 def get_email_message(self, suggestions, comment, template): 194 for suggestion in suggestions: 195 suggestion.unit_url = ( 196 self.build_absolute_uri( 197 suggestion.unit.get_translate_url())) 198 return loader.render_to_string( 199 template, 200 context=dict(suggestions=suggestions, 201 comment=comment)) 202 203 def notify_suggesters(self, rejected=True, comment=""): 204 for suggester, suggestions in self.users_and_suggestions.items(): 205 if rejected: 206 template = self.reject_email_template 207 subject = self.reject_email_subject 208 else: 209 template = self.accept_email_template 210 subject = self.accept_email_subject 211 self.send_mail(template, subject, suggester, suggestions, comment) 212 213 def reject_suggestions(self): 214 for suggestion in self.suggestions: 215 self.reject_suggestion(suggestion) 216 217 def reject(self, comment=""): 218 self.reject_suggestions() 219 if self.should_notify(comment): 220 self.notify_suggesters(rejected=True, comment=comment) 221 if comment: 222 self.add_comments(comment) 223 224 def send_mail(self, template, subject, suggester, suggestions, comment): 225 send_mail( 226 subject, 227 self.get_email_message( 228 suggestions, 229 comment, 230 template), 231 from_email=None, 232 recipient_list=[suggester.email], 233 fail_silently=True) 234 235 def should_notify(self, comment): 236 return ( 237 comment 238 and settings.POOTLE_EMAIL_FEEDBACK_ENABLED) 239 [end of pootle/apps/pootle_store/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pootle/apps/pootle_store/utils.py b/pootle/apps/pootle_store/utils.py --- a/pootle/apps/pootle_store/utils.py +++ b/pootle/apps/pootle_store/utils.py @@ -15,6 +15,7 @@ from pootle.core.delegate import site from pootle.core.mail import send_mail +from pootle.core.signals import update_data from pootle.i18n.gettext import ugettext as _ from pootle_comment.forms import UnsecuredCommentForm from pootle_statistics.models import ( @@ -166,6 +167,7 @@ unit.save() def reject_suggestion(self, suggestion): + store = suggestion.unit.store suggestion.state = SuggestionStates.REJECTED suggestion.review_time = timezone.now() suggestion.reviewer = self.reviewer @@ -176,6 +178,8 @@ self.reviewer, creation_time=suggestion.review_time).save() + update_data.send(store.__class__, instance=store) + def accept_suggestions(self): for suggestion in self.suggestions: self.accept_suggestion(suggestion)
{"golden_diff": "diff --git a/pootle/apps/pootle_store/utils.py b/pootle/apps/pootle_store/utils.py\n--- a/pootle/apps/pootle_store/utils.py\n+++ b/pootle/apps/pootle_store/utils.py\n@@ -15,6 +15,7 @@\n \n from pootle.core.delegate import site\n from pootle.core.mail import send_mail\n+from pootle.core.signals import update_data\n from pootle.i18n.gettext import ugettext as _\n from pootle_comment.forms import UnsecuredCommentForm\n from pootle_statistics.models import (\n@@ -166,6 +167,7 @@\n unit.save()\n \n def reject_suggestion(self, suggestion):\n+ store = suggestion.unit.store\n suggestion.state = SuggestionStates.REJECTED\n suggestion.review_time = timezone.now()\n suggestion.reviewer = self.reviewer\n@@ -176,6 +178,8 @@\n self.reviewer,\n creation_time=suggestion.review_time).save()\n \n+ update_data.send(store.__class__, instance=store)\n+\n def accept_suggestions(self):\n for suggestion in self.suggestions:\n self.accept_suggestion(suggestion)\n", "issue": "Suggestions count on stats is wrong\nSee https://mozilla.locamotion.org/cy/ where it says 1 pending suggestion, but clicking on the link says that there are no results.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.template import loader\nfrom django.utils import timezone\n\nfrom pootle.core.delegate import site\nfrom pootle.core.mail import send_mail\nfrom pootle.i18n.gettext import ugettext as _\nfrom pootle_comment.forms import UnsecuredCommentForm\nfrom pootle_statistics.models import (\n Submission, SubmissionFields, SubmissionTypes)\n\nfrom .constants import FUZZY, TRANSLATED\nfrom .models import Suggestion, SuggestionStates\n\n\nUser = get_user_model()\n\n\nclass SuggestionsReview(object):\n accept_email_template = 'editor/email/suggestions_accepted_with_comment.txt'\n accept_email_subject = _(u\"Suggestion accepted with comment\")\n reject_email_template = 'editor/email/suggestions_rejected_with_comment.txt'\n reject_email_subject = _(u\"Suggestion rejected with comment\")\n\n def __init__(self, suggestions=None, reviewer=None):\n self.suggestions = suggestions\n self.reviewer = reviewer\n\n @property\n def users_and_suggestions(self):\n users = {}\n for suggestion in self.suggestions:\n users[suggestion.user] = users.get(suggestion.user, [])\n users[suggestion.user].append(suggestion)\n return users\n\n def add_comments(self, comment):\n for suggestion in self.suggestions:\n UnsecuredCommentForm(\n suggestion,\n dict(comment=comment,\n user=self.reviewer)).save()\n\n def add(self, unit, translation, user=None, touch=True,\n similarity=None, mt_similarity=None):\n \"\"\"Adds a new suggestion to the unit.\n\n :param translation: suggested translation text\n :param user: user who is making the suggestion. If it's ``None``,\n the ``system`` user will be used.\n :param touch: whether to update the unit's timestamp after adding\n the suggestion or not.\n :param similarity: human similarity for the new suggestion.\n :param mt_similarity: MT similarity for the new suggestion.\n\n :return: a tuple ``(suggestion, created)`` where ``created`` is a\n boolean indicating if the suggestion was successfully added.\n If the suggestion already exists it's returned as well.\n \"\"\"\n dont_add = (\n not filter(None, translation)\n or translation == unit.target)\n if dont_add:\n return (None, False)\n user = user or User.objects.get_system_user()\n try:\n suggestion = Suggestion.objects.pending().get(\n unit=unit,\n user=user,\n target_f=translation)\n return (suggestion, False)\n except Suggestion.DoesNotExist:\n suggestion = Suggestion.objects.create(\n unit=unit,\n user=user,\n state=SuggestionStates.PENDING,\n target=translation,\n creation_time=timezone.now())\n self.create_submission(\n suggestion,\n SubmissionTypes.SUGG_ADD,\n user,\n similarity=similarity,\n mt_similarity=mt_similarity).save()\n if touch:\n unit.save()\n return (suggestion, True)\n\n def create_submission(self, suggestion, suggestion_type, user, **kwargs):\n return Submission(\n creation_time=kwargs.get(\"creation_time\", suggestion.creation_time),\n translation_project=suggestion.unit.store.translation_project,\n submitter=user,\n unit=suggestion.unit,\n store=suggestion.unit.store,\n type=suggestion_type,\n suggestion=suggestion,\n similarity=kwargs.get(\"similarity\"),\n mt_similarity=kwargs.get(\"mt_similarity\"))\n\n def accept_suggestion(self, suggestion):\n unit = suggestion.unit\n translation_project = unit.store.translation_project\n\n # Save for later\n old_state = unit.state\n old_target = unit.target\n\n # Update some basic attributes so we can create submissions. Note\n # these do not conflict with `ScoreLog`'s interests, so it's safe\n unit.target = suggestion.target\n if unit.state == FUZZY:\n unit.state = TRANSLATED\n\n current_time = timezone.now()\n suggestion.state = SuggestionStates.ACCEPTED\n suggestion.reviewer = self.reviewer\n suggestion.review_time = current_time\n suggestion.save()\n create_subs = OrderedDict()\n if old_state != unit.state:\n create_subs[SubmissionFields.STATE] = [old_state, unit.state]\n create_subs[SubmissionFields.TARGET] = [old_target, unit.target]\n subs_created = []\n for field in create_subs:\n kwargs = {\n 'creation_time': current_time,\n 'translation_project': translation_project,\n 'submitter': self.reviewer,\n 'unit': unit,\n 'store': unit.store,\n 'field': field,\n 'type': SubmissionTypes.SUGG_ACCEPT,\n 'old_value': create_subs[field][0],\n 'new_value': create_subs[field][1],\n }\n if field == SubmissionFields.TARGET:\n kwargs['suggestion'] = suggestion\n\n subs_created.append(Submission(**kwargs))\n if subs_created:\n unit.submission_set.add(*subs_created, bulk=False)\n\n # FIXME: remove such a dependency on `ScoreLog`\n # Update current unit instance's attributes\n # important to set these attributes after saving Submission\n # because in the `ScoreLog` we need to access the unit's certain\n # attributes before it was saved\n # THIS NEEDS TO GO ^^\n unit.submitted_by = suggestion.user\n unit.submitted_on = current_time\n unit.reviewed_by = self.reviewer\n unit.reviewed_on = unit.submitted_on\n unit._log_user = self.reviewer\n unit.save()\n\n def reject_suggestion(self, suggestion):\n suggestion.state = SuggestionStates.REJECTED\n suggestion.review_time = timezone.now()\n suggestion.reviewer = self.reviewer\n suggestion.save()\n self.create_submission(\n suggestion,\n SubmissionTypes.SUGG_REJECT,\n self.reviewer,\n creation_time=suggestion.review_time).save()\n\n def accept_suggestions(self):\n for suggestion in self.suggestions:\n self.accept_suggestion(suggestion)\n\n def accept(self, comment=\"\"):\n self.accept_suggestions()\n if self.should_notify(comment):\n self.notify_suggesters(rejected=False, comment=comment)\n if comment:\n self.add_comments(comment=comment)\n\n def build_absolute_uri(self, url):\n return site.get().build_absolute_uri(url)\n\n def get_email_message(self, suggestions, comment, template):\n for suggestion in suggestions:\n suggestion.unit_url = (\n self.build_absolute_uri(\n suggestion.unit.get_translate_url()))\n return loader.render_to_string(\n template,\n context=dict(suggestions=suggestions,\n comment=comment))\n\n def notify_suggesters(self, rejected=True, comment=\"\"):\n for suggester, suggestions in self.users_and_suggestions.items():\n if rejected:\n template = self.reject_email_template\n subject = self.reject_email_subject\n else:\n template = self.accept_email_template\n subject = self.accept_email_subject\n self.send_mail(template, subject, suggester, suggestions, comment)\n\n def reject_suggestions(self):\n for suggestion in self.suggestions:\n self.reject_suggestion(suggestion)\n\n def reject(self, comment=\"\"):\n self.reject_suggestions()\n if self.should_notify(comment):\n self.notify_suggesters(rejected=True, comment=comment)\n if comment:\n self.add_comments(comment)\n\n def send_mail(self, template, subject, suggester, suggestions, comment):\n send_mail(\n subject,\n self.get_email_message(\n suggestions,\n comment,\n template),\n from_email=None,\n recipient_list=[suggester.email],\n fail_silently=True)\n\n def should_notify(self, comment):\n return (\n comment\n and settings.POOTLE_EMAIL_FEEDBACK_ENABLED)\n", "path": "pootle/apps/pootle_store/utils.py"}]}
2,925
260