problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_7415 | rasdani/github-patches | git_diff | fonttools__fonttools-2439 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ReemKufiInk crashes pyftsubset
```
pyftsubset --text=duck --output-file=/tmp/reem.otf ReemKufiInk-Bold.otf
Traceback (most recent call last):
File "/tmp/venv/bin/pyftsubset", line 8, in <module>
sys.exit(main())
File "/tmp/venv/lib/python3.9/site-packages/fontTools/misc/loggingTools.py", line 372, in wrapper
return func(*args, **kwds)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py", line 3104, in main
subsetter.subset(font)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py", line 2908, in subset
self._subset_glyphs(font)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py", line 2846, in _subset_glyphs
retain = table.subset_glyphs(self)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py", line 2086, in subset_glyphs
colorGlyphsV1 = unbuildColrV1(self.table.LayerList, self.table.BaseGlyphList)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/colorLib/unbuilder.py", line 6, in unbuildColrV1
unbuilder = LayerListUnbuilder(layerV1List.Paint)
AttributeError: 'NoneType' object has no attribute 'Paint'
```
TTX handles the font just fine. File from https://github.com/aliftype/reem-kufi/commits/colr-v1 at 93d6dcd693ae42bb4295701e88a07cc4d04db73c
</issue>
<code>
[start of Lib/fontTools/colorLib/unbuilder.py]
1 from fontTools.ttLib.tables import otTables as ot
2 from .table_builder import TableUnbuilder
3
4
5 def unbuildColrV1(layerV1List, baseGlyphV1List):
6 unbuilder = LayerListUnbuilder(layerV1List.Paint)
7 return {
8 rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)
9 for rec in baseGlyphV1List.BaseGlyphPaintRecord
10 }
11
12
13 def _flatten(lst):
14 for el in lst:
15 if isinstance(el, list):
16 yield from _flatten(el)
17 else:
18 yield el
19
20
21 class LayerListUnbuilder:
22 def __init__(self, layers):
23 self.layers = layers
24
25 callbacks = {
26 (
27 ot.Paint,
28 ot.PaintFormat.PaintColrLayers,
29 ): self._unbuildPaintColrLayers,
30 }
31 self.tableUnbuilder = TableUnbuilder(callbacks)
32
33 def unbuildPaint(self, paint):
34 assert isinstance(paint, ot.Paint)
35 return self.tableUnbuilder.unbuild(paint)
36
37 def _unbuildPaintColrLayers(self, source):
38 assert source["Format"] == ot.PaintFormat.PaintColrLayers
39
40 layers = list(
41 _flatten(
42 [
43 self.unbuildPaint(childPaint)
44 for childPaint in self.layers[
45 source["FirstLayerIndex"] : source["FirstLayerIndex"]
46 + source["NumLayers"]
47 ]
48 ]
49 )
50 )
51
52 if len(layers) == 1:
53 return layers[0]
54
55 return {"Format": source["Format"], "Layers": layers}
56
57
58 if __name__ == "__main__":
59 from pprint import pprint
60 import sys
61 from fontTools.ttLib import TTFont
62
63 try:
64 fontfile = sys.argv[1]
65 except IndexError:
66 sys.exit("usage: fonttools colorLib.unbuilder FONTFILE")
67
68 font = TTFont(fontfile)
69 colr = font["COLR"]
70 if colr.version < 1:
71 sys.exit(f"error: No COLR table version=1 found in {fontfile}")
72
73 colorGlyphs = unbuildColrV1(
74 colr.table.LayerList,
75 colr.table.BaseGlyphList,
76 ignoreVarIdx=not colr.table.VarStore,
77 )
78
79 pprint(colorGlyphs)
80
[end of Lib/fontTools/colorLib/unbuilder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Lib/fontTools/colorLib/unbuilder.py b/Lib/fontTools/colorLib/unbuilder.py
--- a/Lib/fontTools/colorLib/unbuilder.py
+++ b/Lib/fontTools/colorLib/unbuilder.py
@@ -2,11 +2,14 @@
from .table_builder import TableUnbuilder
-def unbuildColrV1(layerV1List, baseGlyphV1List):
- unbuilder = LayerListUnbuilder(layerV1List.Paint)
+def unbuildColrV1(layerList, baseGlyphList):
+ layers = []
+ if layerList:
+ layers = layerList.Paint
+ unbuilder = LayerListUnbuilder(layers)
return {
rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)
- for rec in baseGlyphV1List.BaseGlyphPaintRecord
+ for rec in baseGlyphList.BaseGlyphPaintRecord
}
| {"golden_diff": "diff --git a/Lib/fontTools/colorLib/unbuilder.py b/Lib/fontTools/colorLib/unbuilder.py\n--- a/Lib/fontTools/colorLib/unbuilder.py\n+++ b/Lib/fontTools/colorLib/unbuilder.py\n@@ -2,11 +2,14 @@\n from .table_builder import TableUnbuilder\n \n \n-def unbuildColrV1(layerV1List, baseGlyphV1List):\n- unbuilder = LayerListUnbuilder(layerV1List.Paint)\n+def unbuildColrV1(layerList, baseGlyphList):\n+ layers = []\n+ if layerList:\n+ layers = layerList.Paint\n+ unbuilder = LayerListUnbuilder(layers)\n return {\n rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)\n- for rec in baseGlyphV1List.BaseGlyphPaintRecord\n+ for rec in baseGlyphList.BaseGlyphPaintRecord\n }\n", "issue": "ReemKufiInk crashes pyftsubset\n```\r\npyftsubset --text=duck --output-file=/tmp/reem.otf ReemKufiInk-Bold.otf\r\n\r\nTraceback (most recent call last):\r\n File \"/tmp/venv/bin/pyftsubset\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/misc/loggingTools.py\", line 372, in wrapper\r\n return func(*args, **kwds)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py\", line 3104, in main\r\n subsetter.subset(font)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py\", line 2908, in subset\r\n self._subset_glyphs(font)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py\", line 2846, in _subset_glyphs\r\n retain = table.subset_glyphs(self)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py\", line 2086, in subset_glyphs\r\n colorGlyphsV1 = unbuildColrV1(self.table.LayerList, self.table.BaseGlyphList)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/colorLib/unbuilder.py\", line 6, in unbuildColrV1\r\n unbuilder = LayerListUnbuilder(layerV1List.Paint)\r\nAttributeError: 'NoneType' object has no attribute 'Paint'\r\n```\r\n\r\nTTX handles the font just fine. File from https://github.com/aliftype/reem-kufi/commits/colr-v1 at 93d6dcd693ae42bb4295701e88a07cc4d04db73c\n", "before_files": [{"content": "from fontTools.ttLib.tables import otTables as ot\nfrom .table_builder import TableUnbuilder\n\n\ndef unbuildColrV1(layerV1List, baseGlyphV1List):\n unbuilder = LayerListUnbuilder(layerV1List.Paint)\n return {\n rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)\n for rec in baseGlyphV1List.BaseGlyphPaintRecord\n }\n\n\ndef _flatten(lst):\n for el in lst:\n if isinstance(el, list):\n yield from _flatten(el)\n else:\n yield el\n\n\nclass LayerListUnbuilder:\n def __init__(self, layers):\n self.layers = layers\n\n callbacks = {\n (\n ot.Paint,\n ot.PaintFormat.PaintColrLayers,\n ): self._unbuildPaintColrLayers,\n }\n self.tableUnbuilder = TableUnbuilder(callbacks)\n\n def unbuildPaint(self, paint):\n assert isinstance(paint, ot.Paint)\n return self.tableUnbuilder.unbuild(paint)\n\n def _unbuildPaintColrLayers(self, source):\n assert source[\"Format\"] == ot.PaintFormat.PaintColrLayers\n\n layers = list(\n _flatten(\n [\n self.unbuildPaint(childPaint)\n for childPaint in self.layers[\n source[\"FirstLayerIndex\"] : source[\"FirstLayerIndex\"]\n + source[\"NumLayers\"]\n ]\n ]\n )\n )\n\n if len(layers) == 1:\n return layers[0]\n\n return {\"Format\": source[\"Format\"], \"Layers\": layers}\n\n\nif __name__ == \"__main__\":\n from pprint import pprint\n import sys\n from fontTools.ttLib import TTFont\n\n try:\n fontfile = sys.argv[1]\n except IndexError:\n sys.exit(\"usage: fonttools colorLib.unbuilder FONTFILE\")\n\n font = TTFont(fontfile)\n colr = font[\"COLR\"]\n if colr.version < 1:\n sys.exit(f\"error: No COLR table version=1 found in {fontfile}\")\n\n colorGlyphs = unbuildColrV1(\n colr.table.LayerList,\n colr.table.BaseGlyphList,\n ignoreVarIdx=not colr.table.VarStore,\n )\n\n pprint(colorGlyphs)\n", "path": "Lib/fontTools/colorLib/unbuilder.py"}]} | 1,618 | 194 |
gh_patches_debug_6818 | rasdani/github-patches | git_diff | sbi-dev__sbi-11 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Non-conditional density estimators are initialized with context_features=None, should support 0 as well
the CDEs have a default input context_features which defaults to None. When this is a positive integer, we get a CDE, for a DE we need to set it to None. It should support a value of zero, though.
</issue>
<code>
[start of setup.py]
1 from setuptools import find_packages, setup
2
3 exec(open("lfi/version.py").read())
4
5 setup(
6 name="lfi",
7 version=__version__,
8 description="LFI + CDE.",
9 url="https://github.com/mackelab/lfi",
10 author="Conor Durkan",
11 packages=find_packages(exclude=["tests"]),
12 license="GPLv3",
13 test_requires=["pytest", "deepdiff", "torchtestcase"],
14 install_requires=[
15 "matplotlib",
16 "numpy",
17 "pyro-ppl",
18 "scipy",
19 "tensorboard",
20 "torch",
21 "tqdm",
22 ],
23 extras_requires={"dev": ["autoflake", "black", "flake8", "isort", "pytest"]},
24 dependency_links=[],
25 )
26
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,12 +1,12 @@
from setuptools import find_packages, setup
-exec(open("lfi/version.py").read())
+exec(open("sbi/version.py").read())
setup(
- name="lfi",
+ name="sbi",
version=__version__,
- description="LFI + CDE.",
- url="https://github.com/mackelab/lfi",
+ description="Simulation-based inference",
+ url="https://github.com/mackelab/sbi",
author="Conor Durkan",
packages=find_packages(exclude=["tests"]),
license="GPLv3",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,12 +1,12 @@\n from setuptools import find_packages, setup\n \n-exec(open(\"lfi/version.py\").read())\n+exec(open(\"sbi/version.py\").read())\n \n setup(\n- name=\"lfi\",\n+ name=\"sbi\",\n version=__version__,\n- description=\"LFI + CDE.\",\n- url=\"https://github.com/mackelab/lfi\",\n+ description=\"Simulation-based inference\",\n+ url=\"https://github.com/mackelab/sbi\",\n author=\"Conor Durkan\",\n packages=find_packages(exclude=[\"tests\"]),\n license=\"GPLv3\",\n", "issue": "Non-conditional density estimators are initialized with context_features=None, should support 0 as well\nthe CDEs have a default input context_features which defaults to None. When this is a positive integer, we get a CDE, for a DE we need to set it to None. It should support a value of zero, though.\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nexec(open(\"lfi/version.py\").read())\n\nsetup(\n name=\"lfi\",\n version=__version__,\n description=\"LFI + CDE.\",\n url=\"https://github.com/mackelab/lfi\",\n author=\"Conor Durkan\",\n packages=find_packages(exclude=[\"tests\"]),\n license=\"GPLv3\",\n test_requires=[\"pytest\", \"deepdiff\", \"torchtestcase\"],\n install_requires=[\n \"matplotlib\",\n \"numpy\",\n \"pyro-ppl\",\n \"scipy\",\n \"tensorboard\",\n \"torch\",\n \"tqdm\",\n ],\n extras_requires={\"dev\": [\"autoflake\", \"black\", \"flake8\", \"isort\", \"pytest\"]},\n dependency_links=[],\n)\n", "path": "setup.py"}]} | 809 | 155 |
gh_patches_debug_834 | rasdani/github-patches | git_diff | craiga__will-of-the-prophets-26 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clean up login form
</issue>
<code>
[start of will_of_the_prophets/settings/__init__.py]
1 """
2 Django settings for will_of_the_prophets project.
3
4 Generated by 'django-admin startproject' using Django 2.0.4.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/2.0/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/2.0/ref/settings/
11 """
12
13 import os
14
15 import django_heroku
16
17 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
18 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
19
20
21 # Quick-start development settings - unsuitable for production
22 # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
23
24 # SECURITY WARNING: keep the secret key used in production secret!
25 SECRET_KEY = os.environ.get(
26 'SECRET_KEY', 'bah!osmx@cpaoulc-!eohvd3ldoy*^oki#l25-v6tvq04=6npz')
27
28 # SECURITY WARNING: don't run with debug turned on in production!
29 DEBUG = os.environ.get('DEBUG', False)
30
31 ALLOWED_HOSTS = ['*.herokuapp.com', 'localhost']
32
33
34 # Application definition
35
36 INSTALLED_APPS = [
37 'raven.contrib.django.raven_compat',
38 'django.contrib.admin',
39 'django.contrib.auth',
40 'django.contrib.contenttypes',
41 'django.contrib.sessions',
42 'django.contrib.messages',
43 'django.contrib.staticfiles',
44 'sass_processor',
45 'bootstrap',
46 'will_of_the_prophets',
47 ]
48
49 MIDDLEWARE = [
50 'django.middleware.security.SecurityMiddleware',
51 'django.contrib.sessions.middleware.SessionMiddleware',
52 'django.middleware.common.CommonMiddleware',
53 'django.middleware.csrf.CsrfViewMiddleware',
54 'django.contrib.auth.middleware.AuthenticationMiddleware',
55 'django.contrib.messages.middleware.MessageMiddleware',
56 'django.middleware.clickjacking.XFrameOptionsMiddleware',
57 ]
58
59 ROOT_URLCONF = 'will_of_the_prophets.urls'
60
61 TEMPLATES = [
62 {
63 'BACKEND': 'django.template.backends.django.DjangoTemplates',
64 'DIRS': [],
65 'APP_DIRS': True,
66 'OPTIONS': {
67 'context_processors': [
68 'django.template.context_processors.debug',
69 'django.template.context_processors.request',
70 'django.contrib.auth.context_processors.auth',
71 'django.contrib.messages.context_processors.messages',
72 ],
73 },
74 },
75 ]
76
77 WSGI_APPLICATION = 'will_of_the_prophets.wsgi.application'
78
79
80 # Database
81 # https://docs.djangoproject.com/en/2.0/ref/settings/#databases
82
83 DATABASES = {
84 'default': {
85 'ENGINE': 'django.db.backends.sqlite3',
86 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
87 }
88 }
89
90
91 # Password validation
92 # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
93
94 AUTH_PASSWORD_VALIDATORS = [
95 {
96 'NAME': ('django.contrib.auth.password_validation'
97 '.UserAttributeSimilarityValidator'),
98 },
99 {
100 'NAME': ('django.contrib.auth.password_validation'
101 '.MinimumLengthValidator'),
102 },
103 {
104 'NAME': ('django.contrib.auth.password_validation'
105 '.CommonPasswordValidator'),
106 },
107 {
108 'NAME': ('django.contrib.auth.password_validation'
109 '.NumericPasswordValidator'),
110 },
111 ]
112
113
114 # Internationalization
115 # https://docs.djangoproject.com/en/2.0/topics/i18n/
116
117 LANGUAGE_CODE = 'en-us'
118
119 TIME_ZONE = 'UTC'
120
121 USE_I18N = True
122
123 USE_L10N = True
124
125 USE_TZ = True
126
127
128 # Static files (CSS, JavaScript, Images)
129 # https://docs.djangoproject.com/en/2.0/howto/static-files/
130
131 STATIC_URL = '/static/'
132
133 STATICFILES_FINDERS = [
134 'django.contrib.staticfiles.finders.FileSystemFinder',
135 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
136 # https://github.com/jrief/django-sass-processor
137 'sass_processor.finders.CssFinder',
138 ]
139
140
141 # django-sass-processor
142 # https://github.com/jrief/django-sass-processor
143 SASS_OUTPUT_STYLE = 'compressed'
144
145
146 # Configure Django App for Heroku.
147 django_heroku.settings(locals())
148
[end of will_of_the_prophets/settings/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/will_of_the_prophets/settings/__init__.py b/will_of_the_prophets/settings/__init__.py
--- a/will_of_the_prophets/settings/__init__.py
+++ b/will_of_the_prophets/settings/__init__.py
@@ -42,6 +42,7 @@
'django.contrib.messages',
'django.contrib.staticfiles',
'sass_processor',
+ 'widget_tweaks',
'bootstrap',
'will_of_the_prophets',
]
| {"golden_diff": "diff --git a/will_of_the_prophets/settings/__init__.py b/will_of_the_prophets/settings/__init__.py\n--- a/will_of_the_prophets/settings/__init__.py\n+++ b/will_of_the_prophets/settings/__init__.py\n@@ -42,6 +42,7 @@\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'sass_processor',\n+ 'widget_tweaks',\n 'bootstrap',\n 'will_of_the_prophets',\n ]\n", "issue": "Clean up login form\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for will_of_the_prophets project.\n\nGenerated by 'django-admin startproject' using Django 2.0.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\"\"\"\n\nimport os\n\nimport django_heroku\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'SECRET_KEY', 'bah!osmx@cpaoulc-!eohvd3ldoy*^oki#l25-v6tvq04=6npz')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get('DEBUG', False)\n\nALLOWED_HOSTS = ['*.herokuapp.com', 'localhost']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'raven.contrib.django.raven_compat',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'sass_processor',\n 'bootstrap',\n 'will_of_the_prophets',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'will_of_the_prophets.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'will_of_the_prophets.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.UserAttributeSimilarityValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.MinimumLengthValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.CommonPasswordValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.NumericPasswordValidator'),\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # https://github.com/jrief/django-sass-processor\n 'sass_processor.finders.CssFinder',\n]\n\n\n# django-sass-processor\n# https://github.com/jrief/django-sass-processor\nSASS_OUTPUT_STYLE = 'compressed'\n\n\n# Configure Django App for Heroku.\ndjango_heroku.settings(locals())\n", "path": "will_of_the_prophets/settings/__init__.py"}]} | 1,801 | 112 |
gh_patches_debug_30796 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-756 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[cfn-lint] E0002:Unknown exception while processing rule E2541: unhashable type: 'dict_node'
*cfn-lint version: 0.16.0
I got the error message:
`[cfn-lint] E0002:Unknown exception while processing rule E2541: unhashable type: 'dict_node'
`
If I put a "!Ref Name" in the source action Name: see below
```
Stages:
- Name: GitHub
Actions:
- Name: !Ref GitHubSourceRepo1
ActionTypeId:
Category: Source
Owner: Custom
Version: 1
Provider: GitHUBcustom
.
.
.
.
```
If I remove the !Ref, the cfn-lint works fine.
</issue>
<code>
[start of src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 from cfnlint import CloudFormationLintRule
18 from cfnlint import RuleMatch
19
20
21 class CodepipelineStageActions(CloudFormationLintRule):
22 """Check if CodePipeline Stage Actions are set up properly."""
23 id = 'E2541'
24 shortdesc = 'CodePipeline Stage Actions'
25 description = 'See if CodePipeline stage actions are set correctly'
26 source_url = 'https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#pipeline-requirements'
27 tags = ['resources', 'codepipeline']
28
29 CONSTRAINTS = {
30 'AWS': {
31 'Source': {
32 'S3': {
33 'InputArtifactRange': 0,
34 'OutputArtifactRange': 1,
35 },
36 'CodeCommit': {
37 'InputArtifactRange': 0,
38 'OutputArtifactRange': 1,
39 }
40 },
41 'Test': {
42 'CodeBuild': {
43 'InputArtifactRange': (1, 5),
44 'OutputArtifactRange': (0, 5),
45 }
46 },
47 'Approval': {
48 'Manual': {
49 'InputArtifactRange': 0,
50 'OutputArtifactRange': 0,
51 }
52 },
53 'Deploy': {
54 'CloudFormation': {
55 'InputArtifactRange': (0, 10),
56 'OutputArtifactRange': (0, 1),
57 },
58 'CodeDeploy': {
59 'InputArtifactRange': 1,
60 'OutputArtifactRange': 0,
61 },
62 'ElasticBeanstalk': {
63 'InputArtifactRange': 1,
64 'OutputArtifactRange': 0,
65 },
66 'OpsWorks': {
67 'InputArtifactRange': 1,
68 'OutputArtifactRange': 0,
69 },
70 'ECS': {
71 'InputArtifactRange': 1,
72 'OutputArtifactRange': 0,
73 },
74 },
75 'Invoke': {
76 'Lambda': {
77 'InputArtifactRange': (0, 5),
78 'OutputArtifactRange': (0, 5),
79 }
80 }
81 },
82 'ThirdParty': {
83 'Source': {
84 'GitHub': {
85 'InputArtifactRange': 0,
86 'OutputArtifactRange': 1,
87 }
88 },
89 },
90 }
91
92 KEY_MAP = {
93 'InputArtifacts': 'InputArtifactRange',
94 'OutputArtifacts': 'OutputArtifactRange',
95 }
96
97 def check_artifact_counts(self, action, artifact_type, path):
98 """Check that artifact counts are within valid ranges."""
99 matches = []
100
101 action_type_id = action.get('ActionTypeId')
102 owner = action_type_id.get('Owner')
103 category = action_type_id.get('Category')
104 provider = action_type_id.get('Provider')
105
106 if isinstance(owner, dict) or isinstance(category, dict) or isinstance(provider, dict):
107 self.logger.debug('owner, category, provider need to be strings to validate. Skipping.')
108 return matches
109
110 constraints = self.CONSTRAINTS.get(owner, {}).get(category, {}).get(provider, {})
111 if not constraints:
112 return matches
113 artifact_count = len(action.get(artifact_type, []))
114
115 constraint_key = self.KEY_MAP[artifact_type]
116 if isinstance(constraints[constraint_key], tuple):
117 min_, max_ = constraints[constraint_key]
118 if not (min_ <= artifact_count <= max_):
119 message = (
120 'Action "{action}" declares {number} {artifact_type} which is not in '
121 'expected range [{a}, {b}].'
122 ).format(
123 action=action['Name'],
124 number=artifact_count,
125 artifact_type=artifact_type,
126 a=min_,
127 b=max_
128 )
129 matches.append(RuleMatch(
130 path + [artifact_type],
131 message
132 ))
133 else:
134 if artifact_count != constraints[constraint_key]:
135 message = (
136 'Action "{action}" declares {number} {artifact_type} which is not the '
137 'expected number [{a}].'
138 ).format(
139 action=action['Name'],
140 number=artifact_count,
141 artifact_type=artifact_type,
142 a=constraints[constraint_key]
143 )
144 matches.append(RuleMatch(
145 path + [artifact_type],
146 message
147 ))
148
149 return matches
150
151 def check_version(self, action, path):
152 """Check that action type version is valid."""
153 matches = []
154
155 version = action.get('ActionTypeId', {}).get('Version')
156 if isinstance(version, dict):
157 self.logger.debug('Unable to validate version when an object is used. Skipping')
158 elif version != '1':
159 message = 'For all currently supported action types, the only valid version string is "1".'
160 matches.append(RuleMatch(
161 path + ['ActionTypeId', 'Version'],
162 message
163 ))
164 return matches
165
166 def check_names_unique(self, action, path, action_names):
167 """Check that action names are unique."""
168 matches = []
169
170 if action.get('Name') in action_names:
171 message = 'All action names within a stage must be unique. ({name})'.format(
172 name=action.get('Name')
173 )
174 matches.append(RuleMatch(path + ['Name'], message))
175 action_names.add(action.get('Name'))
176
177 return matches
178
179 def match(self, cfn):
180 """Check that stage actions are set up properly."""
181 matches = []
182
183 resources = cfn.get_resource_properties(['AWS::CodePipeline::Pipeline'])
184 for resource in resources:
185 path = resource['Path']
186 properties = resource['Value']
187
188 s_stages = properties.get_safe('Stages', path)
189 for s_stage_v, s_stage_p in s_stages:
190 if not isinstance(s_stage_v, list):
191 self.logger.debug('Stages not list. Should have been caught by generic linting.')
192 return matches
193
194 for l_i_stage, l_i_path in s_stage_v.items_safe(s_stage_p):
195 action_names = set()
196 s_actions = l_i_stage.get_safe('Actions', l_i_path)
197 for s_action_v, s_action_p in s_actions:
198 if not isinstance(s_action_v, list):
199 self.logger.debug('Actions not list. Should have been caught by generic linting.')
200 return matches
201
202 for l_i_a_action, l_i_a_path in s_action_v.items_safe(s_action_p):
203 try:
204 full_path = path + l_i_a_path
205 matches.extend(self.check_names_unique(l_i_a_action, full_path, action_names))
206 matches.extend(self.check_version(l_i_a_action, full_path))
207 matches.extend(self.check_artifact_counts(l_i_a_action, 'InputArtifacts', full_path))
208 matches.extend(self.check_artifact_counts(l_i_a_action, 'OutputArtifacts', full_path))
209 except AttributeError as err:
210 self.logger.debug('Got AttributeError. Should have been caught by generic linting. '
211 'Ignoring the error here: %s', str(err))
212
213 return matches
214
[end of src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py b/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py
--- a/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py
+++ b/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py
@@ -14,6 +14,7 @@
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
+import six
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
@@ -44,6 +45,12 @@
'OutputArtifactRange': (0, 5),
}
},
+ 'Build': {
+ 'CodeBuild': {
+ 'InputArtifactRange': (1, 5),
+ 'OutputArtifactRange': (0, 5),
+ }
+ },
'Approval': {
'Manual': {
'InputArtifactRange': 0,
@@ -167,12 +174,14 @@
"""Check that action names are unique."""
matches = []
- if action.get('Name') in action_names:
- message = 'All action names within a stage must be unique. ({name})'.format(
- name=action.get('Name')
- )
- matches.append(RuleMatch(path + ['Name'], message))
- action_names.add(action.get('Name'))
+ action_name = action.get('Name')
+ if isinstance(action_name, six.string_types):
+ if action.get('Name') in action_names:
+ message = 'All action names within a stage must be unique. ({name})'.format(
+ name=action.get('Name')
+ )
+ matches.append(RuleMatch(path + ['Name'], message))
+ action_names.add(action.get('Name'))
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py b/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py\n--- a/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py\n+++ b/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py\n@@ -14,6 +14,7 @@\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \"\"\"\n+import six\n from cfnlint import CloudFormationLintRule\n from cfnlint import RuleMatch\n \n@@ -44,6 +45,12 @@\n 'OutputArtifactRange': (0, 5),\n }\n },\n+ 'Build': {\n+ 'CodeBuild': {\n+ 'InputArtifactRange': (1, 5),\n+ 'OutputArtifactRange': (0, 5),\n+ }\n+ },\n 'Approval': {\n 'Manual': {\n 'InputArtifactRange': 0,\n@@ -167,12 +174,14 @@\n \"\"\"Check that action names are unique.\"\"\"\n matches = []\n \n- if action.get('Name') in action_names:\n- message = 'All action names within a stage must be unique. ({name})'.format(\n- name=action.get('Name')\n- )\n- matches.append(RuleMatch(path + ['Name'], message))\n- action_names.add(action.get('Name'))\n+ action_name = action.get('Name')\n+ if isinstance(action_name, six.string_types):\n+ if action.get('Name') in action_names:\n+ message = 'All action names within a stage must be unique. ({name})'.format(\n+ name=action.get('Name')\n+ )\n+ matches.append(RuleMatch(path + ['Name'], message))\n+ action_names.add(action.get('Name'))\n \n return matches\n", "issue": "[cfn-lint] E0002:Unknown exception while processing rule E2541: unhashable type: 'dict_node'\n*cfn-lint version: 0.16.0\r\n\r\nI got the error message:\r\n\r\n`[cfn-lint] E0002:Unknown exception while processing rule E2541: unhashable type: 'dict_node'\r\n`\r\n\r\nIf I put a \"!Ref Name\" in the source action Name: see below\r\n \r\n```\r\nStages:\r\n - Name: GitHub\r\n Actions:\r\n - Name: !Ref GitHubSourceRepo1\r\n ActionTypeId:\r\n Category: Source\r\n Owner: Custom\r\n Version: 1\r\n Provider: GitHUBcustom\r\n.\r\n.\r\n.\r\n.\r\n```\r\n\r\nIf I remove the !Ref, the cfn-lint works fine.\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass CodepipelineStageActions(CloudFormationLintRule):\n \"\"\"Check if CodePipeline Stage Actions are set up properly.\"\"\"\n id = 'E2541'\n shortdesc = 'CodePipeline Stage Actions'\n description = 'See if CodePipeline stage actions are set correctly'\n source_url = 'https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#pipeline-requirements'\n tags = ['resources', 'codepipeline']\n\n CONSTRAINTS = {\n 'AWS': {\n 'Source': {\n 'S3': {\n 'InputArtifactRange': 0,\n 'OutputArtifactRange': 1,\n },\n 'CodeCommit': {\n 'InputArtifactRange': 0,\n 'OutputArtifactRange': 1,\n }\n },\n 'Test': {\n 'CodeBuild': {\n 'InputArtifactRange': (1, 5),\n 'OutputArtifactRange': (0, 5),\n }\n },\n 'Approval': {\n 'Manual': {\n 'InputArtifactRange': 0,\n 'OutputArtifactRange': 0,\n }\n },\n 'Deploy': {\n 'CloudFormation': {\n 'InputArtifactRange': (0, 10),\n 'OutputArtifactRange': (0, 1),\n },\n 'CodeDeploy': {\n 'InputArtifactRange': 1,\n 'OutputArtifactRange': 0,\n },\n 'ElasticBeanstalk': {\n 'InputArtifactRange': 1,\n 'OutputArtifactRange': 0,\n },\n 'OpsWorks': {\n 'InputArtifactRange': 1,\n 'OutputArtifactRange': 0,\n },\n 'ECS': {\n 'InputArtifactRange': 1,\n 'OutputArtifactRange': 0,\n },\n },\n 'Invoke': {\n 'Lambda': {\n 'InputArtifactRange': (0, 5),\n 'OutputArtifactRange': (0, 5),\n }\n }\n },\n 'ThirdParty': {\n 'Source': {\n 'GitHub': {\n 'InputArtifactRange': 0,\n 'OutputArtifactRange': 1,\n }\n },\n },\n }\n\n KEY_MAP = {\n 'InputArtifacts': 'InputArtifactRange',\n 'OutputArtifacts': 'OutputArtifactRange',\n }\n\n def check_artifact_counts(self, action, artifact_type, path):\n \"\"\"Check that artifact counts are within valid ranges.\"\"\"\n matches = []\n\n action_type_id = action.get('ActionTypeId')\n owner = action_type_id.get('Owner')\n category = action_type_id.get('Category')\n provider = action_type_id.get('Provider')\n\n if isinstance(owner, dict) or isinstance(category, dict) or isinstance(provider, dict):\n self.logger.debug('owner, category, provider need to be strings to validate. Skipping.')\n return matches\n\n constraints = self.CONSTRAINTS.get(owner, {}).get(category, {}).get(provider, {})\n if not constraints:\n return matches\n artifact_count = len(action.get(artifact_type, []))\n\n constraint_key = self.KEY_MAP[artifact_type]\n if isinstance(constraints[constraint_key], tuple):\n min_, max_ = constraints[constraint_key]\n if not (min_ <= artifact_count <= max_):\n message = (\n 'Action \"{action}\" declares {number} {artifact_type} which is not in '\n 'expected range [{a}, {b}].'\n ).format(\n action=action['Name'],\n number=artifact_count,\n artifact_type=artifact_type,\n a=min_,\n b=max_\n )\n matches.append(RuleMatch(\n path + [artifact_type],\n message\n ))\n else:\n if artifact_count != constraints[constraint_key]:\n message = (\n 'Action \"{action}\" declares {number} {artifact_type} which is not the '\n 'expected number [{a}].'\n ).format(\n action=action['Name'],\n number=artifact_count,\n artifact_type=artifact_type,\n a=constraints[constraint_key]\n )\n matches.append(RuleMatch(\n path + [artifact_type],\n message\n ))\n\n return matches\n\n def check_version(self, action, path):\n \"\"\"Check that action type version is valid.\"\"\"\n matches = []\n\n version = action.get('ActionTypeId', {}).get('Version')\n if isinstance(version, dict):\n self.logger.debug('Unable to validate version when an object is used. Skipping')\n elif version != '1':\n message = 'For all currently supported action types, the only valid version string is \"1\".'\n matches.append(RuleMatch(\n path + ['ActionTypeId', 'Version'],\n message\n ))\n return matches\n\n def check_names_unique(self, action, path, action_names):\n \"\"\"Check that action names are unique.\"\"\"\n matches = []\n\n if action.get('Name') in action_names:\n message = 'All action names within a stage must be unique. ({name})'.format(\n name=action.get('Name')\n )\n matches.append(RuleMatch(path + ['Name'], message))\n action_names.add(action.get('Name'))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check that stage actions are set up properly.\"\"\"\n matches = []\n\n resources = cfn.get_resource_properties(['AWS::CodePipeline::Pipeline'])\n for resource in resources:\n path = resource['Path']\n properties = resource['Value']\n\n s_stages = properties.get_safe('Stages', path)\n for s_stage_v, s_stage_p in s_stages:\n if not isinstance(s_stage_v, list):\n self.logger.debug('Stages not list. Should have been caught by generic linting.')\n return matches\n\n for l_i_stage, l_i_path in s_stage_v.items_safe(s_stage_p):\n action_names = set()\n s_actions = l_i_stage.get_safe('Actions', l_i_path)\n for s_action_v, s_action_p in s_actions:\n if not isinstance(s_action_v, list):\n self.logger.debug('Actions not list. Should have been caught by generic linting.')\n return matches\n\n for l_i_a_action, l_i_a_path in s_action_v.items_safe(s_action_p):\n try:\n full_path = path + l_i_a_path\n matches.extend(self.check_names_unique(l_i_a_action, full_path, action_names))\n matches.extend(self.check_version(l_i_a_action, full_path))\n matches.extend(self.check_artifact_counts(l_i_a_action, 'InputArtifacts', full_path))\n matches.extend(self.check_artifact_counts(l_i_a_action, 'OutputArtifacts', full_path))\n except AttributeError as err:\n self.logger.debug('Got AttributeError. Should have been caught by generic linting. '\n 'Ignoring the error here: %s', str(err))\n\n return matches\n", "path": "src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py"}]} | 2,962 | 421 |
gh_patches_debug_9798 | rasdani/github-patches | git_diff | netbox-community__netbox-15788 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New User model has a 32-bit integer `id` field
### Deployment Type
NetBox Cloud
### NetBox Version
v4.0-beta1
### Python Version
3.10
### Steps to Reproduce
1. Upgrade a v3.7 database to v4.0
2. Inspect the `users_user` table
### Expected Behavior
The `id` column of the `users_user` table should be a `bigint` (64-bit integer), like all other models in NetBox.
### Observed Behavior
The `id` column is a regular 32-bit integer. This is because we renamed the stock Django table, which uses a 32-bit integer `id` field.
</issue>
<code>
[start of netbox/users/migrations/0005_alter_user_table.py]
1 from django.db import migrations
2
3
4 def update_content_types(apps, schema_editor):
5 ContentType = apps.get_model('contenttypes', 'ContentType')
6 # Delete the new ContentTypes effected by the new models in the users app
7 ContentType.objects.filter(app_label='users', model='user').delete()
8
9 # Update the app labels of the original ContentTypes for auth.User to ensure
10 # that any foreign key references are preserved
11 ContentType.objects.filter(app_label='auth', model='user').update(app_label='users')
12
13 netboxuser_ct = ContentType.objects.filter(app_label='users', model='netboxuser').first()
14 if netboxuser_ct:
15 user_ct = ContentType.objects.filter(app_label='users', model='user').first()
16 CustomField = apps.get_model('extras', 'CustomField')
17 CustomField.objects.filter(related_object_type_id=netboxuser_ct.id).update(related_object_type_id=user_ct.id)
18 netboxuser_ct.delete()
19
20
21 class Migration(migrations.Migration):
22
23 dependencies = [
24 ('users', '0002_squashed_0004'),
25 ]
26
27 operations = [
28 # The User table was originally created as 'auth_user'. Now we nullify the model's
29 # db_table option, so that it defaults to the app & model name (users_user). This
30 # causes the database table to be renamed.
31 migrations.AlterModelTable(
32 name='user',
33 table=None,
34 ),
35
36 # Rename auth_user_* sequences
37 migrations.RunSQL("ALTER TABLE auth_user_groups_id_seq RENAME TO users_user_groups_id_seq"),
38 migrations.RunSQL("ALTER TABLE auth_user_id_seq RENAME TO users_user_id_seq"),
39 migrations.RunSQL("ALTER TABLE auth_user_user_permissions_id_seq RENAME TO users_user_user_permissions_id_seq"),
40
41 # Rename auth_user_* indexes
42 migrations.RunSQL("ALTER INDEX auth_user_pkey RENAME TO users_user_pkey"),
43 # Hash is deterministic; generated via schema_editor._create_index_name()
44 migrations.RunSQL("ALTER INDEX auth_user_username_6821ab7c_like RENAME TO users_user_username_06e46fe6_like"),
45 migrations.RunSQL("ALTER INDEX auth_user_username_key RENAME TO users_user_username_key"),
46
47 # Update ContentTypes
48 migrations.RunPython(
49 code=update_content_types,
50 reverse_code=migrations.RunPython.noop
51 ),
52 ]
53
[end of netbox/users/migrations/0005_alter_user_table.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/users/migrations/0005_alter_user_table.py b/netbox/users/migrations/0005_alter_user_table.py
--- a/netbox/users/migrations/0005_alter_user_table.py
+++ b/netbox/users/migrations/0005_alter_user_table.py
@@ -33,6 +33,9 @@
table=None,
),
+ # Convert the `id` column to a 64-bit integer (BigAutoField is implied by DEFAULT_AUTO_FIELD)
+ migrations.RunSQL("ALTER TABLE users_user ALTER COLUMN id TYPE bigint"),
+
# Rename auth_user_* sequences
migrations.RunSQL("ALTER TABLE auth_user_groups_id_seq RENAME TO users_user_groups_id_seq"),
migrations.RunSQL("ALTER TABLE auth_user_id_seq RENAME TO users_user_id_seq"),
| {"golden_diff": "diff --git a/netbox/users/migrations/0005_alter_user_table.py b/netbox/users/migrations/0005_alter_user_table.py\n--- a/netbox/users/migrations/0005_alter_user_table.py\n+++ b/netbox/users/migrations/0005_alter_user_table.py\n@@ -33,6 +33,9 @@\n table=None,\n ),\n \n+ # Convert the `id` column to a 64-bit integer (BigAutoField is implied by DEFAULT_AUTO_FIELD)\n+ migrations.RunSQL(\"ALTER TABLE users_user ALTER COLUMN id TYPE bigint\"),\n+\n # Rename auth_user_* sequences\n migrations.RunSQL(\"ALTER TABLE auth_user_groups_id_seq RENAME TO users_user_groups_id_seq\"),\n migrations.RunSQL(\"ALTER TABLE auth_user_id_seq RENAME TO users_user_id_seq\"),\n", "issue": "New User model has a 32-bit integer `id` field\n### Deployment Type\n\nNetBox Cloud\n\n### NetBox Version\n\nv4.0-beta1\n\n### Python Version\n\n3.10\n\n### Steps to Reproduce\n\n1. Upgrade a v3.7 database to v4.0\r\n2. Inspect the `users_user` table\n\n### Expected Behavior\n\nThe `id` column of the `users_user` table should be a `bigint` (64-bit integer), like all other models in NetBox.\n\n### Observed Behavior\n\nThe `id` column is a regular 32-bit integer. This is because we renamed the stock Django table, which uses a 32-bit integer `id` field.\n", "before_files": [{"content": "from django.db import migrations\n\n\ndef update_content_types(apps, schema_editor):\n ContentType = apps.get_model('contenttypes', 'ContentType')\n # Delete the new ContentTypes effected by the new models in the users app\n ContentType.objects.filter(app_label='users', model='user').delete()\n\n # Update the app labels of the original ContentTypes for auth.User to ensure\n # that any foreign key references are preserved\n ContentType.objects.filter(app_label='auth', model='user').update(app_label='users')\n\n netboxuser_ct = ContentType.objects.filter(app_label='users', model='netboxuser').first()\n if netboxuser_ct:\n user_ct = ContentType.objects.filter(app_label='users', model='user').first()\n CustomField = apps.get_model('extras', 'CustomField')\n CustomField.objects.filter(related_object_type_id=netboxuser_ct.id).update(related_object_type_id=user_ct.id)\n netboxuser_ct.delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0002_squashed_0004'),\n ]\n\n operations = [\n # The User table was originally created as 'auth_user'. Now we nullify the model's\n # db_table option, so that it defaults to the app & model name (users_user). This\n # causes the database table to be renamed.\n migrations.AlterModelTable(\n name='user',\n table=None,\n ),\n\n # Rename auth_user_* sequences\n migrations.RunSQL(\"ALTER TABLE auth_user_groups_id_seq RENAME TO users_user_groups_id_seq\"),\n migrations.RunSQL(\"ALTER TABLE auth_user_id_seq RENAME TO users_user_id_seq\"),\n migrations.RunSQL(\"ALTER TABLE auth_user_user_permissions_id_seq RENAME TO users_user_user_permissions_id_seq\"),\n\n # Rename auth_user_* indexes\n migrations.RunSQL(\"ALTER INDEX auth_user_pkey RENAME TO users_user_pkey\"),\n # Hash is deterministic; generated via schema_editor._create_index_name()\n migrations.RunSQL(\"ALTER INDEX auth_user_username_6821ab7c_like RENAME TO users_user_username_06e46fe6_like\"),\n migrations.RunSQL(\"ALTER INDEX auth_user_username_key RENAME TO users_user_username_key\"),\n\n # Update ContentTypes\n migrations.RunPython(\n code=update_content_types,\n reverse_code=migrations.RunPython.noop\n ),\n ]\n", "path": "netbox/users/migrations/0005_alter_user_table.py"}]} | 1,323 | 183 |
gh_patches_debug_32866 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-4451 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Documentation for the v2 of the configuration file
At first, I was thinking to automate this given the schema, but the spec isn't very large so we can just hand-write this without too much effort.
</issue>
<code>
[start of docs/doc_extensions.py]
1 """
2 Read the Docs documentation extensions for Sphinx
3
4 Adds the following roles:
5
6 djangosetting
7 Output an inline literal of the corresponding setting value. Useful for
8 keeping documentation up to date without editing on settings changes.
9 """
10
11 from docutils import nodes, utils
12
13 from django.conf import settings
14
15 from readthedocs.projects.models import Feature
16
17
18 def django_setting_role(typ, rawtext, text, lineno, inliner, options=None,
19 content=None):
20 """Always up to date Django settings from the application"""
21 dj_setting = getattr(settings, utils.unescape(text), 'None')
22 node = nodes.literal(dj_setting, dj_setting)
23 return [node], []
24
25
26 def feature_flags_role(typ, rawtext, text, lineno, inliner, options=None,
27 content=None):
28 """Up to date feature flags from the application."""
29 all_features = Feature.FEATURES
30 requested_feature = utils.unescape(text)
31 for feature in all_features:
32 if requested_feature.lower() == feature[0].lower():
33 desc = nodes.Text(feature[1], feature[1])
34 return [desc], []
35
36
37 def setup(_):
38 from docutils.parsers.rst import roles
39 roles.register_local_role(
40 'djangosetting',
41 django_setting_role
42 )
43 roles.register_local_role(
44 'featureflags',
45 feature_flags_role
46 )
47
48 return {
49 'version': 'builtin',
50 'parallel_read_safe': True,
51 'parallel_write_safe': True,
52 }
53
[end of docs/doc_extensions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/doc_extensions.py b/docs/doc_extensions.py
--- a/docs/doc_extensions.py
+++ b/docs/doc_extensions.py
@@ -6,11 +6,14 @@
djangosetting
Output an inline literal of the corresponding setting value. Useful for
keeping documentation up to date without editing on settings changes.
-"""
-from docutils import nodes, utils
+buildpyversions
+ Output a comma separated list of the supported python versions for a
+ Read the Docs build image.
+"""
from django.conf import settings
+from docutils import nodes, utils
from readthedocs.projects.models import Feature
@@ -23,8 +26,23 @@
return [node], []
+def python_supported_versions_role(typ, rawtext, text, lineno, inliner,
+ options=None, content=None):
+ """Up to date supported python versions for each build image."""
+ image = '{}:{}'.format(settings.DOCKER_DEFAULT_IMAGE, text)
+ image_settings = settings.DOCKER_IMAGE_SETTINGS[image]
+ python_versions = image_settings['python']['supported_versions']
+ node_list = []
+ separator = ', '
+ for i, version in enumerate(python_versions):
+ node_list.append(nodes.literal(version, version))
+ if i < len(python_versions) - 1:
+ node_list.append(nodes.Text(separator))
+ return (node_list, [])
+
+
def feature_flags_role(typ, rawtext, text, lineno, inliner, options=None,
- content=None):
+ content=None):
"""Up to date feature flags from the application."""
all_features = Feature.FEATURES
requested_feature = utils.unescape(text)
@@ -40,9 +58,13 @@
'djangosetting',
django_setting_role
)
+ roles.register_local_role(
+ 'buildpyversions',
+ python_supported_versions_role,
+ )
roles.register_local_role(
'featureflags',
- feature_flags_role
+ feature_flags_role,
)
return {
| {"golden_diff": "diff --git a/docs/doc_extensions.py b/docs/doc_extensions.py\n--- a/docs/doc_extensions.py\n+++ b/docs/doc_extensions.py\n@@ -6,11 +6,14 @@\n djangosetting\n Output an inline literal of the corresponding setting value. Useful for\n keeping documentation up to date without editing on settings changes.\n-\"\"\"\n \n-from docutils import nodes, utils\n+buildpyversions\n+ Output a comma separated list of the supported python versions for a\n+ Read the Docs build image.\n+\"\"\"\n \n from django.conf import settings\n+from docutils import nodes, utils\n \n from readthedocs.projects.models import Feature\n \n@@ -23,8 +26,23 @@\n return [node], []\n \n \n+def python_supported_versions_role(typ, rawtext, text, lineno, inliner,\n+ options=None, content=None):\n+ \"\"\"Up to date supported python versions for each build image.\"\"\"\n+ image = '{}:{}'.format(settings.DOCKER_DEFAULT_IMAGE, text)\n+ image_settings = settings.DOCKER_IMAGE_SETTINGS[image]\n+ python_versions = image_settings['python']['supported_versions']\n+ node_list = []\n+ separator = ', '\n+ for i, version in enumerate(python_versions):\n+ node_list.append(nodes.literal(version, version))\n+ if i < len(python_versions) - 1:\n+ node_list.append(nodes.Text(separator))\n+ return (node_list, [])\n+\n+\n def feature_flags_role(typ, rawtext, text, lineno, inliner, options=None,\n- content=None):\n+ content=None):\n \"\"\"Up to date feature flags from the application.\"\"\"\n all_features = Feature.FEATURES\n requested_feature = utils.unescape(text)\n@@ -40,9 +58,13 @@\n 'djangosetting',\n django_setting_role\n )\n+ roles.register_local_role(\n+ 'buildpyversions',\n+ python_supported_versions_role,\n+ )\n roles.register_local_role(\n 'featureflags',\n- feature_flags_role\n+ feature_flags_role,\n )\n \n return {\n", "issue": "Documentation for the v2 of the configuration file\nAt first, I was thinking to automate this given the schema, but the spec isn't very large so we can just hand-write this without too much effort.\n", "before_files": [{"content": "\"\"\"\nRead the Docs documentation extensions for Sphinx\n\nAdds the following roles:\n\ndjangosetting\n Output an inline literal of the corresponding setting value. Useful for\n keeping documentation up to date without editing on settings changes.\n\"\"\"\n\nfrom docutils import nodes, utils\n\nfrom django.conf import settings\n\nfrom readthedocs.projects.models import Feature\n\n\ndef django_setting_role(typ, rawtext, text, lineno, inliner, options=None,\n content=None):\n \"\"\"Always up to date Django settings from the application\"\"\"\n dj_setting = getattr(settings, utils.unescape(text), 'None')\n node = nodes.literal(dj_setting, dj_setting)\n return [node], []\n\n\ndef feature_flags_role(typ, rawtext, text, lineno, inliner, options=None,\n content=None):\n \"\"\"Up to date feature flags from the application.\"\"\"\n all_features = Feature.FEATURES\n requested_feature = utils.unescape(text)\n for feature in all_features:\n if requested_feature.lower() == feature[0].lower():\n desc = nodes.Text(feature[1], feature[1])\n return [desc], []\n\n\ndef setup(_):\n from docutils.parsers.rst import roles\n roles.register_local_role(\n 'djangosetting',\n django_setting_role\n )\n roles.register_local_role(\n 'featureflags',\n feature_flags_role\n )\n\n return {\n 'version': 'builtin',\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n", "path": "docs/doc_extensions.py"}]} | 992 | 446 |
gh_patches_debug_4629 | rasdani/github-patches | git_diff | pyro-ppl__pyro-2244 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ConditionedPlanarFlow's event_dim is wrong [bug]
--------------------------------------------------------------------------------------------------
### Issue Description
Provide a brief description of the issue.
I think the ConditionedPlanarFlow's event_dim is incorrect. I think the class needs to be set as such:
```
class ConditionedPlanarFlow(Transform):
event_dim=1 #this line is not in the current release version
def __init__(self, bias=None, u=None, w=None):
super(ConditionedPlanarFlow, self).__init__(cache_size=1)
self.bias = bias
self.u = u
self.w = w
self._cached_logDetJ = None
```
The Transform super class default is event_dim=0.
Maybe I misunderstand this class, but I notice the PlanarFlow class has event_dim=1, as do many of the other normalizing flows and when I tried using this flow for my models my ELBO would go negative. I think the confusion is that ConditionalPlanarFlow has the event_dim=1, but that isn't setting it when it the actual flows are created (from what I can tell anyways).
### Environment
For any bugs, please provide the following:
- Found on: Ubuntu
- PyTorch version: I think I was using 1.2 ...but again this is a super minor bug
You can see it in the the pyro docs http://docs.pyro.ai/en/stable/_modules/pyro/distributions/transforms/planar.html#ConditionalPlanarFlow
- Pyro version: 3.6.10 (I think...pretty sure a single boolean value is pretty robust to version though)
Was originally found in...nightly build (not sure on term... I built from sourceb asically)
</issue>
<code>
[start of pyro/distributions/transforms/planar.py]
1 import math
2
3 import torch
4 import torch.nn as nn
5 from torch.distributions import constraints
6 import torch.nn.functional as F
7
8 from torch.distributions import Transform
9 from pyro.distributions.conditional import ConditionalTransformModule
10 from pyro.distributions.torch_transform import TransformModule
11 from pyro.distributions.util import copy_docs_from
12 from pyro.nn import DenseNN
13
14
15 @copy_docs_from(Transform)
16 class ConditionedPlanar(Transform):
17 def __init__(self, bias=None, u=None, w=None):
18 super(ConditionedPlanar, self).__init__(cache_size=1)
19 self.bias = bias
20 self.u = u
21 self.w = w
22 self._cached_logDetJ = None
23
24 # This method ensures that torch(u_hat, w) > -1, required for invertibility
25 def u_hat(self, u, w):
26 alpha = torch.matmul(u.unsqueeze(-2), w.unsqueeze(-1)).squeeze(-1)
27 a_prime = -1 + F.softplus(alpha)
28 return u + (a_prime - alpha) * w.div(w.pow(2).sum(dim=-1, keepdim=True))
29
30 def _call(self, x):
31 """
32 :param x: the input into the bijection
33 :type x: torch.Tensor
34 Invokes the bijection x => y; in the prototypical context of a
35 :class:`~pyro.distributions.TransformedDistribution` `x` is a sample from the base distribution (or the output
36 of a previous transform)
37 """
38
39 # x ~ (batch_size, dim_size, 1)
40 # w ~ (batch_size, 1, dim_size)
41 # bias ~ (batch_size, 1)
42 act = torch.tanh(torch.matmul(self.w.unsqueeze(-2), x.unsqueeze(-1)).squeeze(-1) + self.bias)
43 u_hat = self.u_hat(self.u, self.w)
44 y = x + u_hat * act
45
46 psi_z = (1. - act.pow(2)) * self.w
47 self._cached_logDetJ = torch.log(
48 torch.abs(1 + torch.matmul(psi_z.unsqueeze(-2), u_hat.unsqueeze(-1)).squeeze(-1).squeeze(-1)))
49
50 return y
51
52 def _inverse(self, y):
53 """
54 :param y: the output of the bijection
55 :type y: torch.Tensor
56 Inverts y => x. As noted above, this implementation is incapable of inverting arbitrary values
57 `y`; rather it assumes `y` is the result of a previously computed application of the bijector
58 to some `x` (which was cached on the forward call)
59 """
60
61 raise KeyError("ConditionalPlanar object expected to find key in intermediates cache but didn't")
62
63 def log_abs_det_jacobian(self, x, y):
64 """
65 Calculates the elementwise determinant of the log Jacobian
66 """
67 return self._cached_logDetJ
68
69
70 @copy_docs_from(ConditionedPlanar)
71 class Planar(ConditionedPlanar, TransformModule):
72 """
73 A 'planar' bijective transform with equation,
74
75 :math:`\\mathbf{y} = \\mathbf{x} + \\mathbf{u}\\tanh(\\mathbf{w}^T\\mathbf{z}+b)`
76
77 where :math:`\\mathbf{x}` are the inputs, :math:`\\mathbf{y}` are the outputs, and the learnable parameters
78 are :math:`b\\in\\mathbb{R}`, :math:`\\mathbf{u}\\in\\mathbb{R}^D`, :math:`\\mathbf{w}\\in\\mathbb{R}^D` for input
79 dimension :math:`D`. For this to be an invertible transformation, the condition
80 :math:`\\mathbf{w}^T\\mathbf{u}>-1` is enforced.
81
82 Together with :class:`~pyro.distributions.TransformedDistribution` this provides a way to create richer
83 variational approximations.
84
85 Example usage:
86
87 >>> base_dist = dist.Normal(torch.zeros(10), torch.ones(10))
88 >>> transform = Planar(10)
89 >>> pyro.module("my_transform", transform) # doctest: +SKIP
90 >>> flow_dist = dist.TransformedDistribution(base_dist, [transform])
91 >>> flow_dist.sample() # doctest: +SKIP
92 tensor([-0.4071, -0.5030, 0.7924, -0.2366, -0.2387, -0.1417, 0.0868,
93 0.1389, -0.4629, 0.0986])
94
95 The inverse of this transform does not possess an analytical solution and is left unimplemented. However,
96 the inverse is cached when the forward operation is called during sampling, and so samples drawn using
97 the planar transform can be scored.
98
99 :param input_dim: the dimension of the input (and output) variable.
100 :type input_dim: int
101
102 References:
103
104 Variational Inference with Normalizing Flows [arXiv:1505.05770]
105 Danilo Jimenez Rezende, Shakir Mohamed
106
107 """
108
109 domain = constraints.real
110 codomain = constraints.real
111 bijective = True
112 event_dim = 1
113
114 def __init__(self, input_dim):
115 super(Planar, self).__init__()
116
117 self.bias = nn.Parameter(torch.Tensor(1,))
118 self.u = nn.Parameter(torch.Tensor(input_dim,))
119 self.w = nn.Parameter(torch.Tensor(input_dim,))
120 self.input_dim = input_dim
121 self.reset_parameters()
122
123 def reset_parameters(self):
124 stdv = 1. / math.sqrt(self.u.size(0))
125 self.w.data.uniform_(-stdv, stdv)
126 self.u.data.uniform_(-stdv, stdv)
127 self.bias.data.zero_()
128
129
130 @copy_docs_from(ConditionalTransformModule)
131 class ConditionalPlanar(ConditionalTransformModule):
132 """
133 A conditional 'planar' bijective transform using the equation,
134
135 :math:`\\mathbf{y} = \\mathbf{x} + \\mathbf{u}\\tanh(\\mathbf{w}^T\\mathbf{z}+b)`
136
137 where :math:`\\mathbf{x}` are the inputs with dimension :math:`D`, :math:`\\mathbf{y}` are the outputs,
138 and the pseudo-parameters :math:`b\\in\\mathbb{R}`, :math:`\\mathbf{u}\\in\\mathbb{R}^D`, and
139 :math:`\\mathbf{w}\\in\\mathbb{R}^D` are the output of a function, e.g. a NN, with input
140 :math:`z\\in\\mathbb{R}^{M}` representing the context variable to condition on. For this to be an
141 invertible transformation, the condition :math:`\\mathbf{w}^T\\mathbf{u}>-1` is enforced.
142
143 Together with :class:`~pyro.distributions.ConditionalTransformedDistribution` this provides a way to create
144 richer variational approximations.
145
146 Example usage:
147
148 >>> from pyro.nn.dense_nn import DenseNN
149 >>> input_dim = 10
150 >>> context_dim = 5
151 >>> batch_size = 3
152 >>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))
153 >>> hypernet = DenseNN(context_dim, [50, 50], param_dims=[1, input_dim, input_dim])
154 >>> transform = ConditionalPlanar(hypernet)
155 >>> z = torch.rand(batch_size, context_dim)
156 >>> flow_dist = dist.ConditionalTransformedDistribution(base_dist, [transform]).condition(z)
157 >>> flow_dist.sample(sample_shape=torch.Size([batch_size])) # doctest: +SKIP
158
159 The inverse of this transform does not possess an analytical solution and is left unimplemented. However,
160 the inverse is cached when the forward operation is called during sampling, and so samples drawn using
161 the planar transform can be scored.
162
163 :param nn: a function inputting the context variable and outputting a triplet of real-valued parameters
164 of dimensions :math:`(1, D, D)`.
165 :type nn: callable
166
167 References:
168 Variational Inference with Normalizing Flows [arXiv:1505.05770]
169 Danilo Jimenez Rezende, Shakir Mohamed
170
171 """
172
173 domain = constraints.real
174 codomain = constraints.real
175 bijective = True
176 event_dim = 1
177
178 def __init__(self, nn):
179 super(ConditionalPlanar, self).__init__()
180 self.nn = nn
181
182 def condition(self, context):
183 bias, u, w = self.nn(context)
184 return ConditionedPlanar(bias, u, w)
185
186
187 def planar(input_dim):
188 """
189 A helper function to create a :class:`~pyro.distributions.transforms.Planar` object for consistency with other
190 helpers.
191
192 :param input_dim: Dimension of input variable
193 :type input_dim: int
194
195 """
196
197 return Planar(input_dim)
198
199
200 def conditional_planar(input_dim, context_dim, hidden_dims=None):
201 """
202 A helper function to create a :class:`~pyro.distributions.transforms.ConditionalPlanar` object that takes care of
203 constructing a dense network with the correct input/output dimensions.
204
205 :param input_dim: Dimension of input variable
206 :type input_dim: int
207 :param context_dim: Dimension of context variable
208 :type context_dim: int
209 :param hidden_dims: The desired hidden dimensions of the dense network. Defaults
210 to using [input_dim * 10, input_dim * 10]
211 :type hidden_dims: list[int]
212
213
214 """
215
216 if hidden_dims is None:
217 hidden_dims = [input_dim * 10, input_dim * 10]
218 hypernet = DenseNN(context_dim, hidden_dims, param_dims=[1, input_dim, input_dim])
219 return ConditionalPlanar(hypernet)
220
[end of pyro/distributions/transforms/planar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyro/distributions/transforms/planar.py b/pyro/distributions/transforms/planar.py
--- a/pyro/distributions/transforms/planar.py
+++ b/pyro/distributions/transforms/planar.py
@@ -14,6 +14,11 @@
@copy_docs_from(Transform)
class ConditionedPlanar(Transform):
+ domain = constraints.real
+ codomain = constraints.real
+ bijective = True
+ event_dim = 1
+
def __init__(self, bias=None, u=None, w=None):
super(ConditionedPlanar, self).__init__(cache_size=1)
self.bias = bias
| {"golden_diff": "diff --git a/pyro/distributions/transforms/planar.py b/pyro/distributions/transforms/planar.py\n--- a/pyro/distributions/transforms/planar.py\n+++ b/pyro/distributions/transforms/planar.py\n@@ -14,6 +14,11 @@\n \n @copy_docs_from(Transform)\n class ConditionedPlanar(Transform):\n+ domain = constraints.real\n+ codomain = constraints.real\n+ bijective = True\n+ event_dim = 1\n+\n def __init__(self, bias=None, u=None, w=None):\n super(ConditionedPlanar, self).__init__(cache_size=1)\n self.bias = bias\n", "issue": "ConditionedPlanarFlow's event_dim is wrong [bug]\n\r\n--------------------------------------------------------------------------------------------------\r\n### Issue Description\r\nProvide a brief description of the issue.\r\n\r\nI think the ConditionedPlanarFlow's event_dim is incorrect. I think the class needs to be set as such:\r\n```\r\n\r\nclass ConditionedPlanarFlow(Transform):\r\n event_dim=1 #this line is not in the current release version\r\n def __init__(self, bias=None, u=None, w=None):\r\n super(ConditionedPlanarFlow, self).__init__(cache_size=1)\r\n self.bias = bias\r\n self.u = u\r\n self.w = w\r\n self._cached_logDetJ = None\r\n\r\n```\r\nThe Transform super class default is event_dim=0.\r\n\r\nMaybe I misunderstand this class, but I notice the PlanarFlow class has event_dim=1, as do many of the other normalizing flows and when I tried using this flow for my models my ELBO would go negative. I think the confusion is that ConditionalPlanarFlow has the event_dim=1, but that isn't setting it when it the actual flows are created (from what I can tell anyways).\r\n\r\n### Environment\r\nFor any bugs, please provide the following:\r\n - Found on: Ubuntu \r\n - PyTorch version: I think I was using 1.2 ...but again this is a super minor bug\r\nYou can see it in the the pyro docs http://docs.pyro.ai/en/stable/_modules/pyro/distributions/transforms/planar.html#ConditionalPlanarFlow\r\n\r\n - Pyro version: 3.6.10 (I think...pretty sure a single boolean value is pretty robust to version though)\r\nWas originally found in...nightly build (not sure on term... I built from sourceb asically)\r\n\r\n\n", "before_files": [{"content": "import math\n\nimport torch\nimport torch.nn as nn\nfrom torch.distributions import constraints\nimport torch.nn.functional as F\n\nfrom torch.distributions import Transform\nfrom pyro.distributions.conditional import ConditionalTransformModule\nfrom pyro.distributions.torch_transform import TransformModule\nfrom pyro.distributions.util import copy_docs_from\nfrom pyro.nn import DenseNN\n\n\n@copy_docs_from(Transform)\nclass ConditionedPlanar(Transform):\n def __init__(self, bias=None, u=None, w=None):\n super(ConditionedPlanar, self).__init__(cache_size=1)\n self.bias = bias\n self.u = u\n self.w = w\n self._cached_logDetJ = None\n\n # This method ensures that torch(u_hat, w) > -1, required for invertibility\n def u_hat(self, u, w):\n alpha = torch.matmul(u.unsqueeze(-2), w.unsqueeze(-1)).squeeze(-1)\n a_prime = -1 + F.softplus(alpha)\n return u + (a_prime - alpha) * w.div(w.pow(2).sum(dim=-1, keepdim=True))\n\n def _call(self, x):\n \"\"\"\n :param x: the input into the bijection\n :type x: torch.Tensor\n Invokes the bijection x => y; in the prototypical context of a\n :class:`~pyro.distributions.TransformedDistribution` `x` is a sample from the base distribution (or the output\n of a previous transform)\n \"\"\"\n\n # x ~ (batch_size, dim_size, 1)\n # w ~ (batch_size, 1, dim_size)\n # bias ~ (batch_size, 1)\n act = torch.tanh(torch.matmul(self.w.unsqueeze(-2), x.unsqueeze(-1)).squeeze(-1) + self.bias)\n u_hat = self.u_hat(self.u, self.w)\n y = x + u_hat * act\n\n psi_z = (1. - act.pow(2)) * self.w\n self._cached_logDetJ = torch.log(\n torch.abs(1 + torch.matmul(psi_z.unsqueeze(-2), u_hat.unsqueeze(-1)).squeeze(-1).squeeze(-1)))\n\n return y\n\n def _inverse(self, y):\n \"\"\"\n :param y: the output of the bijection\n :type y: torch.Tensor\n Inverts y => x. As noted above, this implementation is incapable of inverting arbitrary values\n `y`; rather it assumes `y` is the result of a previously computed application of the bijector\n to some `x` (which was cached on the forward call)\n \"\"\"\n\n raise KeyError(\"ConditionalPlanar object expected to find key in intermediates cache but didn't\")\n\n def log_abs_det_jacobian(self, x, y):\n \"\"\"\n Calculates the elementwise determinant of the log Jacobian\n \"\"\"\n return self._cached_logDetJ\n\n\n@copy_docs_from(ConditionedPlanar)\nclass Planar(ConditionedPlanar, TransformModule):\n \"\"\"\n A 'planar' bijective transform with equation,\n\n :math:`\\\\mathbf{y} = \\\\mathbf{x} + \\\\mathbf{u}\\\\tanh(\\\\mathbf{w}^T\\\\mathbf{z}+b)`\n\n where :math:`\\\\mathbf{x}` are the inputs, :math:`\\\\mathbf{y}` are the outputs, and the learnable parameters\n are :math:`b\\\\in\\\\mathbb{R}`, :math:`\\\\mathbf{u}\\\\in\\\\mathbb{R}^D`, :math:`\\\\mathbf{w}\\\\in\\\\mathbb{R}^D` for input\n dimension :math:`D`. For this to be an invertible transformation, the condition\n :math:`\\\\mathbf{w}^T\\\\mathbf{u}>-1` is enforced.\n\n Together with :class:`~pyro.distributions.TransformedDistribution` this provides a way to create richer\n variational approximations.\n\n Example usage:\n\n >>> base_dist = dist.Normal(torch.zeros(10), torch.ones(10))\n >>> transform = Planar(10)\n >>> pyro.module(\"my_transform\", transform) # doctest: +SKIP\n >>> flow_dist = dist.TransformedDistribution(base_dist, [transform])\n >>> flow_dist.sample() # doctest: +SKIP\n tensor([-0.4071, -0.5030, 0.7924, -0.2366, -0.2387, -0.1417, 0.0868,\n 0.1389, -0.4629, 0.0986])\n\n The inverse of this transform does not possess an analytical solution and is left unimplemented. However,\n the inverse is cached when the forward operation is called during sampling, and so samples drawn using\n the planar transform can be scored.\n\n :param input_dim: the dimension of the input (and output) variable.\n :type input_dim: int\n\n References:\n\n Variational Inference with Normalizing Flows [arXiv:1505.05770]\n Danilo Jimenez Rezende, Shakir Mohamed\n\n \"\"\"\n\n domain = constraints.real\n codomain = constraints.real\n bijective = True\n event_dim = 1\n\n def __init__(self, input_dim):\n super(Planar, self).__init__()\n\n self.bias = nn.Parameter(torch.Tensor(1,))\n self.u = nn.Parameter(torch.Tensor(input_dim,))\n self.w = nn.Parameter(torch.Tensor(input_dim,))\n self.input_dim = input_dim\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.u.size(0))\n self.w.data.uniform_(-stdv, stdv)\n self.u.data.uniform_(-stdv, stdv)\n self.bias.data.zero_()\n\n\n@copy_docs_from(ConditionalTransformModule)\nclass ConditionalPlanar(ConditionalTransformModule):\n \"\"\"\n A conditional 'planar' bijective transform using the equation,\n\n :math:`\\\\mathbf{y} = \\\\mathbf{x} + \\\\mathbf{u}\\\\tanh(\\\\mathbf{w}^T\\\\mathbf{z}+b)`\n\n where :math:`\\\\mathbf{x}` are the inputs with dimension :math:`D`, :math:`\\\\mathbf{y}` are the outputs,\n and the pseudo-parameters :math:`b\\\\in\\\\mathbb{R}`, :math:`\\\\mathbf{u}\\\\in\\\\mathbb{R}^D`, and\n :math:`\\\\mathbf{w}\\\\in\\\\mathbb{R}^D` are the output of a function, e.g. a NN, with input\n :math:`z\\\\in\\\\mathbb{R}^{M}` representing the context variable to condition on. For this to be an\n invertible transformation, the condition :math:`\\\\mathbf{w}^T\\\\mathbf{u}>-1` is enforced.\n\n Together with :class:`~pyro.distributions.ConditionalTransformedDistribution` this provides a way to create\n richer variational approximations.\n\n Example usage:\n\n >>> from pyro.nn.dense_nn import DenseNN\n >>> input_dim = 10\n >>> context_dim = 5\n >>> batch_size = 3\n >>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))\n >>> hypernet = DenseNN(context_dim, [50, 50], param_dims=[1, input_dim, input_dim])\n >>> transform = ConditionalPlanar(hypernet)\n >>> z = torch.rand(batch_size, context_dim)\n >>> flow_dist = dist.ConditionalTransformedDistribution(base_dist, [transform]).condition(z)\n >>> flow_dist.sample(sample_shape=torch.Size([batch_size])) # doctest: +SKIP\n\n The inverse of this transform does not possess an analytical solution and is left unimplemented. However,\n the inverse is cached when the forward operation is called during sampling, and so samples drawn using\n the planar transform can be scored.\n\n :param nn: a function inputting the context variable and outputting a triplet of real-valued parameters\n of dimensions :math:`(1, D, D)`.\n :type nn: callable\n\n References:\n Variational Inference with Normalizing Flows [arXiv:1505.05770]\n Danilo Jimenez Rezende, Shakir Mohamed\n\n \"\"\"\n\n domain = constraints.real\n codomain = constraints.real\n bijective = True\n event_dim = 1\n\n def __init__(self, nn):\n super(ConditionalPlanar, self).__init__()\n self.nn = nn\n\n def condition(self, context):\n bias, u, w = self.nn(context)\n return ConditionedPlanar(bias, u, w)\n\n\ndef planar(input_dim):\n \"\"\"\n A helper function to create a :class:`~pyro.distributions.transforms.Planar` object for consistency with other\n helpers.\n\n :param input_dim: Dimension of input variable\n :type input_dim: int\n\n \"\"\"\n\n return Planar(input_dim)\n\n\ndef conditional_planar(input_dim, context_dim, hidden_dims=None):\n \"\"\"\n A helper function to create a :class:`~pyro.distributions.transforms.ConditionalPlanar` object that takes care of\n constructing a dense network with the correct input/output dimensions.\n\n :param input_dim: Dimension of input variable\n :type input_dim: int\n :param context_dim: Dimension of context variable\n :type context_dim: int\n :param hidden_dims: The desired hidden dimensions of the dense network. Defaults\n to using [input_dim * 10, input_dim * 10]\n :type hidden_dims: list[int]\n\n\n \"\"\"\n\n if hidden_dims is None:\n hidden_dims = [input_dim * 10, input_dim * 10]\n hypernet = DenseNN(context_dim, hidden_dims, param_dims=[1, input_dim, input_dim])\n return ConditionalPlanar(hypernet)\n", "path": "pyro/distributions/transforms/planar.py"}]} | 3,718 | 149 |
gh_patches_debug_2000 | rasdani/github-patches | git_diff | automl__auto-sklearn-190 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add warning if dependencies are not met
There should be a warning if one of the following dependencies is not met:
- scikit-learn==0.17
- smac==0.0.1
- lockfile>=0.10
- ConfigSpace>=0.2.1
- pyrfr==0.2.1
</issue>
<code>
[start of autosklearn/util/dependencies.py]
1 from warnings import warn
2
3 import pkg_resources
4 import re
5
6 from distutils.version import LooseVersion
7
8
9 RE_PATTERN = re.compile('^(?P<name>[\w\-]+)((?P<operation>==|>=|>)(?P<version>(\d+\.)?(\d+\.)?(\d+)))?$')
10
11
12 def verify_packages(packages):
13 if not packages:
14 return
15 if isinstance(packages, str):
16 packages = packages.splitlines()
17
18 for package in packages:
19 if not package:
20 continue
21
22 match = RE_PATTERN.match(package)
23 if match:
24 name = match.group('name')
25 operation = match.group('operation')
26 version = match.group('version')
27 _verify_package(name, operation, version)
28 else:
29 raise ValueError('Unable to read requirement: %s' % package)
30
31
32 def _verify_package(name, operation, version):
33 try:
34 module = pkg_resources.get_distribution(name)
35 except pkg_resources.DistributionNotFound:
36 raise MissingPackageError(name) from None
37
38 if not operation:
39 return
40
41 required_version = LooseVersion(version)
42 installed_version = LooseVersion(module.version)
43
44 if operation == '==':
45 check = required_version == installed_version
46 elif operation == '>':
47 check = installed_version > required_version
48 elif operation == '>=':
49 check = installed_version > required_version or \
50 installed_version == required_version
51 else:
52 raise NotImplementedError('operation \'%s\' is not supported' % operation)
53 if not check:
54 raise IncorrectPackageVersionError(name, installed_version, operation, required_version)
55
56
57 class MissingPackageError(Exception):
58
59 error_message = 'mandatory package \'{name}\' not found'
60
61 def __init__(self, package_name):
62 self.package_name = package_name
63 super(MissingPackageError, self).__init__(self.error_message.format(name=package_name))
64
65
66 class IncorrectPackageVersionError(Exception):
67
68 error_message = '\'{name} {installed_version}\' version mismatch ({operation}{required_version})'
69
70 def __init__(self, package_name, installed_version, operation, required_version):
71 self.package_name = package_name
72 self.installed_version = installed_version
73 self.operation = operation
74 self.required_version = required_version
75 message = self.error_message.format(name=package_name,
76 installed_version=installed_version,
77 operation=operation,
78 required_version=required_version)
79 super(IncorrectPackageVersionError, self).__init__(message)
80
[end of autosklearn/util/dependencies.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/autosklearn/util/dependencies.py b/autosklearn/util/dependencies.py
--- a/autosklearn/util/dependencies.py
+++ b/autosklearn/util/dependencies.py
@@ -33,7 +33,7 @@
try:
module = pkg_resources.get_distribution(name)
except pkg_resources.DistributionNotFound:
- raise MissingPackageError(name) from None
+ raise MissingPackageError(name)
if not operation:
return
| {"golden_diff": "diff --git a/autosklearn/util/dependencies.py b/autosklearn/util/dependencies.py\n--- a/autosklearn/util/dependencies.py\n+++ b/autosklearn/util/dependencies.py\n@@ -33,7 +33,7 @@\n try:\n module = pkg_resources.get_distribution(name)\n except pkg_resources.DistributionNotFound:\n- raise MissingPackageError(name) from None\n+ raise MissingPackageError(name)\n \n if not operation:\n return\n", "issue": "Add warning if dependencies are not met\nThere should be a warning if one of the following dependencies is not met:\r\n- scikit-learn==0.17\r\n- smac==0.0.1\r\n- lockfile>=0.10\r\n- ConfigSpace>=0.2.1\r\n- pyrfr==0.2.1\r\n\n", "before_files": [{"content": "from warnings import warn\n\nimport pkg_resources\nimport re\n\nfrom distutils.version import LooseVersion\n\n\nRE_PATTERN = re.compile('^(?P<name>[\\w\\-]+)((?P<operation>==|>=|>)(?P<version>(\\d+\\.)?(\\d+\\.)?(\\d+)))?$')\n\n\ndef verify_packages(packages):\n if not packages:\n return\n if isinstance(packages, str):\n packages = packages.splitlines()\n\n for package in packages:\n if not package:\n continue\n\n match = RE_PATTERN.match(package)\n if match:\n name = match.group('name')\n operation = match.group('operation')\n version = match.group('version')\n _verify_package(name, operation, version)\n else:\n raise ValueError('Unable to read requirement: %s' % package)\n\n\ndef _verify_package(name, operation, version):\n try:\n module = pkg_resources.get_distribution(name)\n except pkg_resources.DistributionNotFound:\n raise MissingPackageError(name) from None\n\n if not operation:\n return\n\n required_version = LooseVersion(version)\n installed_version = LooseVersion(module.version)\n\n if operation == '==':\n check = required_version == installed_version\n elif operation == '>':\n check = installed_version > required_version\n elif operation == '>=':\n check = installed_version > required_version or \\\n installed_version == required_version\n else:\n raise NotImplementedError('operation \\'%s\\' is not supported' % operation)\n if not check:\n raise IncorrectPackageVersionError(name, installed_version, operation, required_version)\n\n\nclass MissingPackageError(Exception):\n\n error_message = 'mandatory package \\'{name}\\' not found'\n\n def __init__(self, package_name):\n self.package_name = package_name\n super(MissingPackageError, self).__init__(self.error_message.format(name=package_name))\n\n\nclass IncorrectPackageVersionError(Exception):\n\n error_message = '\\'{name} {installed_version}\\' version mismatch ({operation}{required_version})'\n\n def __init__(self, package_name, installed_version, operation, required_version):\n self.package_name = package_name\n self.installed_version = installed_version\n self.operation = operation\n self.required_version = required_version\n message = self.error_message.format(name=package_name,\n installed_version=installed_version,\n operation=operation,\n required_version=required_version)\n super(IncorrectPackageVersionError, self).__init__(message)\n", "path": "autosklearn/util/dependencies.py"}]} | 1,304 | 104 |
gh_patches_debug_9533 | rasdani/github-patches | git_diff | quantumlib__Cirq-5478 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
WaitGate on multiple qubits gets confused about num_qubits when resolved
**Description of the issue**
`WaitGate.num_qubits()` appears to mutate when going through parameter resolution.
**How to reproduce the issue**
```python
g = cirq.wait(cirq.LineQubit(1), cirq.LineQubit(2))
cirq.resolve_parameters(g, {'a': 1})
```
<details>
File ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/protocols/resolve_parameters.py:184, in resolve_parameters(val, param_resolver, recursive)
182 result = NotImplemented
183 else:
--> 184 result = getter(param_resolver, recursive)
186 if result is not NotImplemented:
187 return result
File ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/gate_operation.py:278, in GateOperation._resolve_parameters_(self, resolver, recursive)
274 def _resolve_parameters_(
275 self, resolver: 'cirq.ParamResolver', recursive: bool
276 ) -> 'cirq.Operation':
277 resolved_gate = protocols.resolve_parameters(self.gate, resolver, recursive)
--> 278 return self.with_gate(resolved_gate)
File ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/gate_operation.py:82, in GateOperation.with_gate(self, new_gate)
79 if self.gate is new_gate:
80 # As GateOperation is immutable, this can return the original.
81 return self
---> 82 return new_gate.on(*self.qubits)
File ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/raw_types.py:219, in Gate.on(self, *qubits)
213 def on(self, *qubits: Qid) -> 'Operation':
214 """Returns an application of this gate to the given qubits.
215
216 Args:
217 *qubits: The collection of qubits to potentially apply the gate to.
218 """
--> 219 return ops.gate_operation.GateOperation(self, list(qubits))
File ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/gate_operation.py:61, in GateOperation.__init__(self, gate, qubits)
54 def __init__(self, gate: 'cirq.Gate', qubits: Sequence['cirq.Qid']) -> None:
55 """Inits GateOperation.
56
57 Args:
58 gate: The gate to apply.
59 qubits: The qubits to operate on.
60 """
---> 61 gate.validate_args(qubits)
62 self._gate = gate
63 self._qubits = tuple(qubits)
File ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/raw_types.py:211, in Gate.validate_args(self, qubits)
194 def validate_args(self, qubits: Sequence['cirq.Qid']) -> None:
195 """Checks if this gate can be applied to the given qubits.
196
197 By default checks that:
(...)
209 ValueError: The gate can't be applied to the qubits.
210 """
--> 211 _validate_qid_shape(self, qubits)
File ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/raw_types.py:929, in _validate_qid_shape(val, qubits)
927 qid_shape = protocols.qid_shape(val)
928 if len(qubits) != len(qid_shape):
--> 929 raise ValueError(
930 'Wrong number of qubits for <{!r}>. '
931 'Expected {} qubits but got <{!r}>.'.format(val, len(qid_shape), qubits)
932 )
933 if any(qid.dimension != dimension for qid, dimension in zip(qubits, qid_shape)):
934 raise ValueError(
935 'Wrong shape of qids for <{!r}>. '
936 'Expected {} but got {} <{!r}>.'.format(
937 val, qid_shape, tuple(qid.dimension for qid in qubits), qubits
938 )
939 )
ValueError: Wrong number of qubits for <cirq.WaitGate(cirq.Duration(millis=0))>. Expected 1 qubits but got <[cirq.LineQubit(1), cirq.LineQubit(2)]>.
</details>
**Cirq version**
'0.15.0.dev20220503224557'
</issue>
<code>
[start of cirq-core/cirq/ops/wait_gate.py]
1 # Copyright 2019 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import AbstractSet, Any, Dict, Optional, Tuple, TYPE_CHECKING, Union
15
16 import sympy
17
18 from cirq import value, protocols
19 from cirq.ops import raw_types
20
21 if TYPE_CHECKING:
22 import cirq
23
24
25 @value.value_equality
26 class WaitGate(raw_types.Gate):
27 r"""An idle gate that represents waiting.
28
29 In non-noisy simulators, this gate is just an identity gate. But noisy
30 simulators and noise models may insert more error for longer waits.
31 """
32
33 def __init__(
34 self,
35 duration: 'cirq.DURATION_LIKE',
36 num_qubits: Optional[int] = None,
37 qid_shape: Tuple[int, ...] = None,
38 ) -> None:
39 """Initialize a wait gate with the given duration.
40
41 Args:
42 duration: A constant or parameterized wait duration. This can be
43 an instance of `datetime.timedelta` or `cirq.Duration`.
44 num_qubits: The number of qubits the gate operates on. If None and `qid_shape` is None,
45 this defaults to one qubit.
46 qid_shape: Can be specified instead of `num_qubits` for the case that the gate should
47 act on qudits.
48
49 Raises:
50 ValueError: If the `qid_shape` provided is empty or `num_qubits` contradicts
51 `qid_shape`.
52 """
53 self._duration = value.Duration(duration)
54 if not protocols.is_parameterized(self.duration) and self.duration < 0:
55 raise ValueError('duration < 0')
56 if qid_shape is None:
57 if num_qubits is None:
58 # Assume one qubit for backwards compatibility
59 qid_shape = (2,)
60 else:
61 qid_shape = (2,) * num_qubits
62 if num_qubits is None:
63 num_qubits = len(qid_shape)
64 if not qid_shape:
65 raise ValueError('Waiting on an empty set of qubits.')
66 if num_qubits != len(qid_shape):
67 raise ValueError('len(qid_shape) != num_qubits')
68 self._qid_shape = qid_shape
69
70 @property
71 def duration(self) -> 'cirq.Duration':
72 return self._duration
73
74 def _is_parameterized_(self) -> bool:
75 return protocols.is_parameterized(self.duration)
76
77 def _parameter_names_(self) -> AbstractSet[str]:
78 return protocols.parameter_names(self.duration)
79
80 def _resolve_parameters_(self, resolver: 'cirq.ParamResolver', recursive: bool) -> 'WaitGate':
81 return WaitGate(protocols.resolve_parameters(self.duration, resolver, recursive))
82
83 def _qid_shape_(self) -> Tuple[int, ...]:
84 return self._qid_shape
85
86 def _has_unitary_(self) -> bool:
87 return True
88
89 def _apply_unitary_(self, args):
90 return args.target_tensor # Identity.
91
92 def _decompose_(self, qubits):
93 return []
94
95 def _trace_distance_bound_(self):
96 return 0
97
98 def __pow__(self, power):
99 if power == 1 or power == -1:
100 # The inverse of a wait is still a wait.
101 return self
102 # Other scalar exponents could scale the wait... but ultimately it is
103 # ambiguous whether the user wanted to scale the duration or just wanted
104 # to affect the unitary. Play it safe and fail.
105 return NotImplemented
106
107 def __str__(self) -> str:
108 return f'WaitGate({self.duration})'
109
110 def __repr__(self) -> str:
111 return f'cirq.WaitGate({repr(self.duration)})'
112
113 def _json_dict_(self) -> Dict[str, Any]:
114 d = protocols.obj_to_dict_helper(self, ['duration'])
115 if len(self._qid_shape) != 1:
116 d['num_qubits'] = len(self._qid_shape)
117 if any(d != 2 for d in self._qid_shape):
118 d['qid_shape'] = self._qid_shape
119 return d
120
121 @classmethod
122 def _from_json_dict_(cls, duration, num_qubits=None, qid_shape=None, **kwargs):
123 return cls(
124 duration=duration,
125 num_qubits=num_qubits,
126 qid_shape=None if qid_shape is None else tuple(qid_shape),
127 )
128
129 def _value_equality_values_(self) -> Any:
130 return self.duration
131
132 def _quil_(self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'):
133 return 'WAIT\n'
134
135
136 def wait(
137 *target: 'cirq.Qid',
138 duration: 'cirq.DURATION_LIKE' = None,
139 picos: Union[int, float, sympy.Expr] = 0,
140 nanos: Union[int, float, sympy.Expr] = 0,
141 micros: Union[int, float, sympy.Expr] = 0,
142 millis: Union[int, float, sympy.Expr] = 0,
143 ) -> raw_types.Operation:
144 """Creates a WaitGate applied to all the given qubits.
145
146 The duration can be specified as a DURATION_LIKE or using keyword args with
147 numbers in the appropriate units. See Duration for details.
148
149 Args:
150 *target: The qubits that should wait.
151 duration: Wait duration (see Duration).
152 picos: Picoseconds to wait (see Duration).
153 nanos: Nanoseconds to wait (see Duration).
154 micros: Microseconds to wait (see Duration).
155 millis: Milliseconds to wait (see Duration).
156 """
157 return WaitGate(
158 duration=value.Duration(duration, picos=picos, nanos=nanos, micros=micros, millis=millis),
159 qid_shape=protocols.qid_shape(target),
160 ).on(*target)
161
[end of cirq-core/cirq/ops/wait_gate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq-core/cirq/ops/wait_gate.py b/cirq-core/cirq/ops/wait_gate.py
--- a/cirq-core/cirq/ops/wait_gate.py
+++ b/cirq-core/cirq/ops/wait_gate.py
@@ -78,7 +78,10 @@
return protocols.parameter_names(self.duration)
def _resolve_parameters_(self, resolver: 'cirq.ParamResolver', recursive: bool) -> 'WaitGate':
- return WaitGate(protocols.resolve_parameters(self.duration, resolver, recursive))
+ return WaitGate(
+ protocols.resolve_parameters(self.duration, resolver, recursive),
+ qid_shape=self._qid_shape,
+ )
def _qid_shape_(self) -> Tuple[int, ...]:
return self._qid_shape
| {"golden_diff": "diff --git a/cirq-core/cirq/ops/wait_gate.py b/cirq-core/cirq/ops/wait_gate.py\n--- a/cirq-core/cirq/ops/wait_gate.py\n+++ b/cirq-core/cirq/ops/wait_gate.py\n@@ -78,7 +78,10 @@\n return protocols.parameter_names(self.duration)\n \n def _resolve_parameters_(self, resolver: 'cirq.ParamResolver', recursive: bool) -> 'WaitGate':\n- return WaitGate(protocols.resolve_parameters(self.duration, resolver, recursive))\n+ return WaitGate(\n+ protocols.resolve_parameters(self.duration, resolver, recursive),\n+ qid_shape=self._qid_shape,\n+ )\n \n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n", "issue": "WaitGate on multiple qubits gets confused about num_qubits when resolved\n**Description of the issue**\r\n`WaitGate.num_qubits()` appears to mutate when going through parameter resolution.\r\n\r\n**How to reproduce the issue**\r\n\r\n```python\r\ng = cirq.wait(cirq.LineQubit(1), cirq.LineQubit(2))\r\ncirq.resolve_parameters(g, {'a': 1})\r\n```\r\n\r\n<details>\r\nFile ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/protocols/resolve_parameters.py:184, in resolve_parameters(val, param_resolver, recursive)\r\n 182 result = NotImplemented\r\n 183 else:\r\n--> 184 result = getter(param_resolver, recursive)\r\n 186 if result is not NotImplemented:\r\n 187 return result\r\n\r\nFile ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/gate_operation.py:278, in GateOperation._resolve_parameters_(self, resolver, recursive)\r\n 274 def _resolve_parameters_(\r\n 275 self, resolver: 'cirq.ParamResolver', recursive: bool\r\n 276 ) -> 'cirq.Operation':\r\n 277 resolved_gate = protocols.resolve_parameters(self.gate, resolver, recursive)\r\n--> 278 return self.with_gate(resolved_gate)\r\n\r\nFile ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/gate_operation.py:82, in GateOperation.with_gate(self, new_gate)\r\n 79 if self.gate is new_gate:\r\n 80 # As GateOperation is immutable, this can return the original.\r\n 81 return self\r\n---> 82 return new_gate.on(*self.qubits)\r\n\r\nFile ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/raw_types.py:219, in Gate.on(self, *qubits)\r\n 213 def on(self, *qubits: Qid) -> 'Operation':\r\n 214 \"\"\"Returns an application of this gate to the given qubits.\r\n 215 \r\n 216 Args:\r\n 217 *qubits: The collection of qubits to potentially apply the gate to.\r\n 218 \"\"\"\r\n--> 219 return ops.gate_operation.GateOperation(self, list(qubits))\r\n\r\nFile ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/gate_operation.py:61, in GateOperation.__init__(self, gate, qubits)\r\n 54 def __init__(self, gate: 'cirq.Gate', qubits: Sequence['cirq.Qid']) -> None:\r\n 55 \"\"\"Inits GateOperation.\r\n 56 \r\n 57 Args:\r\n 58 gate: The gate to apply.\r\n 59 qubits: The qubits to operate on.\r\n 60 \"\"\"\r\n---> 61 gate.validate_args(qubits)\r\n 62 self._gate = gate\r\n 63 self._qubits = tuple(qubits)\r\n\r\nFile ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/raw_types.py:211, in Gate.validate_args(self, qubits)\r\n 194 def validate_args(self, qubits: Sequence['cirq.Qid']) -> None:\r\n 195 \"\"\"Checks if this gate can be applied to the given qubits.\r\n 196 \r\n 197 By default checks that:\r\n (...)\r\n 209 ValueError: The gate can't be applied to the qubits.\r\n 210 \"\"\"\r\n--> 211 _validate_qid_shape(self, qubits)\r\n\r\nFile ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/raw_types.py:929, in _validate_qid_shape(val, qubits)\r\n 927 qid_shape = protocols.qid_shape(val)\r\n 928 if len(qubits) != len(qid_shape):\r\n--> 929 raise ValueError(\r\n 930 'Wrong number of qubits for <{!r}>. '\r\n 931 'Expected {} qubits but got <{!r}>.'.format(val, len(qid_shape), qubits)\r\n 932 )\r\n 933 if any(qid.dimension != dimension for qid, dimension in zip(qubits, qid_shape)):\r\n 934 raise ValueError(\r\n 935 'Wrong shape of qids for <{!r}>. '\r\n 936 'Expected {} but got {} <{!r}>.'.format(\r\n 937 val, qid_shape, tuple(qid.dimension for qid in qubits), qubits\r\n 938 )\r\n 939 )\r\n\r\nValueError: Wrong number of qubits for <cirq.WaitGate(cirq.Duration(millis=0))>. Expected 1 qubits but got <[cirq.LineQubit(1), cirq.LineQubit(2)]>.\r\n</details>\r\n\r\n**Cirq version**\r\n'0.15.0.dev20220503224557'\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import AbstractSet, Any, Dict, Optional, Tuple, TYPE_CHECKING, Union\n\nimport sympy\n\nfrom cirq import value, protocols\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality\nclass WaitGate(raw_types.Gate):\n r\"\"\"An idle gate that represents waiting.\n\n In non-noisy simulators, this gate is just an identity gate. But noisy\n simulators and noise models may insert more error for longer waits.\n \"\"\"\n\n def __init__(\n self,\n duration: 'cirq.DURATION_LIKE',\n num_qubits: Optional[int] = None,\n qid_shape: Tuple[int, ...] = None,\n ) -> None:\n \"\"\"Initialize a wait gate with the given duration.\n\n Args:\n duration: A constant or parameterized wait duration. This can be\n an instance of `datetime.timedelta` or `cirq.Duration`.\n num_qubits: The number of qubits the gate operates on. If None and `qid_shape` is None,\n this defaults to one qubit.\n qid_shape: Can be specified instead of `num_qubits` for the case that the gate should\n act on qudits.\n\n Raises:\n ValueError: If the `qid_shape` provided is empty or `num_qubits` contradicts\n `qid_shape`.\n \"\"\"\n self._duration = value.Duration(duration)\n if not protocols.is_parameterized(self.duration) and self.duration < 0:\n raise ValueError('duration < 0')\n if qid_shape is None:\n if num_qubits is None:\n # Assume one qubit for backwards compatibility\n qid_shape = (2,)\n else:\n qid_shape = (2,) * num_qubits\n if num_qubits is None:\n num_qubits = len(qid_shape)\n if not qid_shape:\n raise ValueError('Waiting on an empty set of qubits.')\n if num_qubits != len(qid_shape):\n raise ValueError('len(qid_shape) != num_qubits')\n self._qid_shape = qid_shape\n\n @property\n def duration(self) -> 'cirq.Duration':\n return self._duration\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self.duration)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return protocols.parameter_names(self.duration)\n\n def _resolve_parameters_(self, resolver: 'cirq.ParamResolver', recursive: bool) -> 'WaitGate':\n return WaitGate(protocols.resolve_parameters(self.duration, resolver, recursive))\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n\n def _has_unitary_(self) -> bool:\n return True\n\n def _apply_unitary_(self, args):\n return args.target_tensor # Identity.\n\n def _decompose_(self, qubits):\n return []\n\n def _trace_distance_bound_(self):\n return 0\n\n def __pow__(self, power):\n if power == 1 or power == -1:\n # The inverse of a wait is still a wait.\n return self\n # Other scalar exponents could scale the wait... but ultimately it is\n # ambiguous whether the user wanted to scale the duration or just wanted\n # to affect the unitary. Play it safe and fail.\n return NotImplemented\n\n def __str__(self) -> str:\n return f'WaitGate({self.duration})'\n\n def __repr__(self) -> str:\n return f'cirq.WaitGate({repr(self.duration)})'\n\n def _json_dict_(self) -> Dict[str, Any]:\n d = protocols.obj_to_dict_helper(self, ['duration'])\n if len(self._qid_shape) != 1:\n d['num_qubits'] = len(self._qid_shape)\n if any(d != 2 for d in self._qid_shape):\n d['qid_shape'] = self._qid_shape\n return d\n\n @classmethod\n def _from_json_dict_(cls, duration, num_qubits=None, qid_shape=None, **kwargs):\n return cls(\n duration=duration,\n num_qubits=num_qubits,\n qid_shape=None if qid_shape is None else tuple(qid_shape),\n )\n\n def _value_equality_values_(self) -> Any:\n return self.duration\n\n def _quil_(self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'):\n return 'WAIT\\n'\n\n\ndef wait(\n *target: 'cirq.Qid',\n duration: 'cirq.DURATION_LIKE' = None,\n picos: Union[int, float, sympy.Expr] = 0,\n nanos: Union[int, float, sympy.Expr] = 0,\n micros: Union[int, float, sympy.Expr] = 0,\n millis: Union[int, float, sympy.Expr] = 0,\n) -> raw_types.Operation:\n \"\"\"Creates a WaitGate applied to all the given qubits.\n\n The duration can be specified as a DURATION_LIKE or using keyword args with\n numbers in the appropriate units. See Duration for details.\n\n Args:\n *target: The qubits that should wait.\n duration: Wait duration (see Duration).\n picos: Picoseconds to wait (see Duration).\n nanos: Nanoseconds to wait (see Duration).\n micros: Microseconds to wait (see Duration).\n millis: Milliseconds to wait (see Duration).\n \"\"\"\n return WaitGate(\n duration=value.Duration(duration, picos=picos, nanos=nanos, micros=micros, millis=millis),\n qid_shape=protocols.qid_shape(target),\n ).on(*target)\n", "path": "cirq-core/cirq/ops/wait_gate.py"}]} | 3,470 | 168 |
gh_patches_debug_17268 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-py-1436 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Endless loop when loading dataset
Hi,
I'm a beginner of OpenNMT-py and I also encounter loop on loading training dataset. I have checked Issues 1079 and 1354 but it still not work. My training set is small and I have not use multiple GPU, just one. Please give me some comments, thank you!
My OpenNMT is the latest version and my os is Ubuntu18.04, here is the parameters I used:
python preprocess.py -train_src data/src-train.txt -train_tgt data/tgt-train.txt -valid_src data/src-val.txt -valid_tgt data/tgt-val.txt -save_data data/demo -src_seq_length 1000 -tgt_seq_length 1000 -dynamic_dict -share_vocab
CUDA_VISIBLE_DEVICES=0 python train.py -data data/demo -save_model demo-model -world_size 1 -gpu_ranks 0 -copy_attn
[2019-05-16 16:35:16,797 INFO] * src vocab size = 988
[2019-05-16 16:35:16,797 INFO] * tgt vocab size = 988
[2019-05-16 16:35:16,797 INFO] Building model...
[2019-05-16 16:35:19,271 INFO] NMTModel(
(encoder): RNNEncoder(
(embeddings): Embeddings(
(make_embedding): Sequential(
(emb_luts): Elementwise(
(0): Embedding(988, 500, padding_idx=1)
)
)
)
(rnn): LSTM(500, 500, num_layers=2, dropout=0.3)
)
(decoder): InputFeedRNNDecoder(
(embeddings): Embeddings(
(make_embedding): Sequential(
(emb_luts): Elementwise(
(0): Embedding(988, 500, padding_idx=1)
)
)
)
(dropout): Dropout(p=0.3)
(rnn): StackedLSTM(
(dropout): Dropout(p=0.3)
(layers): ModuleList(
(0): LSTMCell(1000, 500)
(1): LSTMCell(500, 500)
)
)
(attn): GlobalAttention(
(linear_in): Linear(in_features=500, out_features=500, bias=False)
(linear_out): Linear(in_features=1000, out_features=500, bias=False)
)
(copy_attn): GlobalAttention(
(linear_in): Linear(in_features=500, out_features=500, bias=False)
(linear_out): Linear(in_features=1000, out_features=500, bias=False)
)
)
(generator): CopyGenerator(
(linear): Linear(in_features=500, out_features=988, bias=True)
(linear_copy): Linear(in_features=500, out_features=1, bias=True)
)
)
[2019-05-16 16:35:19,272 INFO] encoder: 4502000
[2019-05-16 16:35:19,272 INFO] decoder: 7497489
[2019-05-16 16:35:19,272 INFO] * number of parameters: 11999489
[2019-05-16 16:35:19,308 INFO] Starting training on GPU: [0]
[2019-05-16 16:35:19,308 INFO] Start training loop and validate every 10000 steps...
[2019-05-16 16:35:19,330 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,382 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,437 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,498 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,562 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,632 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,705 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,782 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,864 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,950 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:20,041 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:20,136 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:20,235 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:20,339 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:20,448 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:20,560 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:20,678 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
I have tried using CPU only and default parameters, but this situation also happens. How can I do to solve this problem?
</issue>
<code>
[start of onmt/train_single.py]
1 #!/usr/bin/env python
2 """Training on a single process."""
3 import os
4
5 import torch
6
7 from onmt.inputters.inputter import build_dataset_iter, \
8 load_old_vocab, old_style_vocab, MultipleDatasetIterator
9 from onmt.model_builder import build_model
10 from onmt.utils.optimizers import Optimizer
11 from onmt.utils.misc import set_random_seed
12 from onmt.trainer import build_trainer
13 from onmt.models import build_model_saver
14 from onmt.utils.logging import init_logger, logger
15 from onmt.utils.parse import ArgumentParser
16
17
18 def _check_save_model_path(opt):
19 save_model_path = os.path.abspath(opt.save_model)
20 model_dirname = os.path.dirname(save_model_path)
21 if not os.path.exists(model_dirname):
22 os.makedirs(model_dirname)
23
24
25 def _tally_parameters(model):
26 enc = 0
27 dec = 0
28 for name, param in model.named_parameters():
29 if 'encoder' in name:
30 enc += param.nelement()
31 else:
32 dec += param.nelement()
33 return enc + dec, enc, dec
34
35
36 def configure_process(opt, device_id):
37 if device_id >= 0:
38 torch.cuda.set_device(device_id)
39 set_random_seed(opt.seed, device_id >= 0)
40
41
42 def main(opt, device_id):
43 # NOTE: It's important that ``opt`` has been validated and updated
44 # at this point.
45 configure_process(opt, device_id)
46 init_logger(opt.log_file)
47 assert len(opt.accum_count) == len(opt.accum_steps), \
48 'Number of accum_count values must match number of accum_steps'
49 # Load checkpoint if we resume from a previous training.
50 if opt.train_from:
51 logger.info('Loading checkpoint from %s' % opt.train_from)
52 checkpoint = torch.load(opt.train_from,
53 map_location=lambda storage, loc: storage)
54
55 model_opt = ArgumentParser.ckpt_model_opts(checkpoint["opt"])
56 ArgumentParser.update_model_opts(model_opt)
57 ArgumentParser.validate_model_opts(model_opt)
58 logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
59 vocab = checkpoint['vocab']
60 else:
61 checkpoint = None
62 model_opt = opt
63 vocab = torch.load(opt.data + '.vocab.pt')
64
65 # check for code where vocab is saved instead of fields
66 # (in the future this will be done in a smarter way)
67 if old_style_vocab(vocab):
68 fields = load_old_vocab(
69 vocab, opt.model_type, dynamic_dict=opt.copy_attn)
70 else:
71 fields = vocab
72
73 # Report src and tgt vocab sizes, including for features
74 for side in ['src', 'tgt']:
75 f = fields[side]
76 try:
77 f_iter = iter(f)
78 except TypeError:
79 f_iter = [(side, f)]
80 for sn, sf in f_iter:
81 if sf.use_vocab:
82 logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))
83
84 # Build model.
85 model = build_model(model_opt, opt, fields, checkpoint)
86 n_params, enc, dec = _tally_parameters(model)
87 logger.info('encoder: %d' % enc)
88 logger.info('decoder: %d' % dec)
89 logger.info('* number of parameters: %d' % n_params)
90 _check_save_model_path(opt)
91
92 # Build optimizer.
93 optim = Optimizer.from_opt(model, opt, checkpoint=checkpoint)
94
95 # Build model saver
96 model_saver = build_model_saver(model_opt, opt, model, fields, optim)
97
98 trainer = build_trainer(
99 opt, device_id, model, fields, optim, model_saver=model_saver)
100
101 train_iterables = []
102 for train_id in opt.data_ids:
103 if train_id:
104 shard_base = "train_" + train_id
105 else:
106 shard_base = "train"
107 iterable = build_dataset_iter(shard_base, fields, opt, multi=True)
108 train_iterables.append(iterable)
109
110 train_iter = MultipleDatasetIterator(train_iterables, device_id, opt)
111
112 valid_iter = build_dataset_iter(
113 "valid", fields, opt, is_train=False)
114
115 if len(opt.gpu_ranks):
116 logger.info('Starting training on GPU: %s' % opt.gpu_ranks)
117 else:
118 logger.info('Starting training on CPU, could be very slow')
119 train_steps = opt.train_steps
120 if opt.single_pass and train_steps > 0:
121 logger.warning("Option single_pass is enabled, ignoring train_steps.")
122 train_steps = 0
123 trainer.train(
124 train_iter,
125 train_steps,
126 save_checkpoint_steps=opt.save_checkpoint_steps,
127 valid_iter=valid_iter,
128 valid_steps=opt.valid_steps)
129
130 if opt.tensorboard:
131 trainer.report_manager.tensorboard_writer.close()
132
[end of onmt/train_single.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/onmt/train_single.py b/onmt/train_single.py
--- a/onmt/train_single.py
+++ b/onmt/train_single.py
@@ -99,15 +99,14 @@
opt, device_id, model, fields, optim, model_saver=model_saver)
train_iterables = []
- for train_id in opt.data_ids:
- if train_id:
+ if len(opt.data_ids) > 1:
+ for train_id in opt.data_ids:
shard_base = "train_" + train_id
- else:
- shard_base = "train"
- iterable = build_dataset_iter(shard_base, fields, opt, multi=True)
- train_iterables.append(iterable)
-
- train_iter = MultipleDatasetIterator(train_iterables, device_id, opt)
+ iterable = build_dataset_iter(shard_base, fields, opt, multi=True)
+ train_iterables.append(iterable)
+ train_iter = MultipleDatasetIterator(train_iterables, device_id, opt)
+ else:
+ train_iter = build_dataset_iter("train", fields, opt)
valid_iter = build_dataset_iter(
"valid", fields, opt, is_train=False)
| {"golden_diff": "diff --git a/onmt/train_single.py b/onmt/train_single.py\n--- a/onmt/train_single.py\n+++ b/onmt/train_single.py\n@@ -99,15 +99,14 @@\n opt, device_id, model, fields, optim, model_saver=model_saver)\n \n train_iterables = []\n- for train_id in opt.data_ids:\n- if train_id:\n+ if len(opt.data_ids) > 1:\n+ for train_id in opt.data_ids:\n shard_base = \"train_\" + train_id\n- else:\n- shard_base = \"train\"\n- iterable = build_dataset_iter(shard_base, fields, opt, multi=True)\n- train_iterables.append(iterable)\n-\n- train_iter = MultipleDatasetIterator(train_iterables, device_id, opt)\n+ iterable = build_dataset_iter(shard_base, fields, opt, multi=True)\n+ train_iterables.append(iterable)\n+ train_iter = MultipleDatasetIterator(train_iterables, device_id, opt)\n+ else:\n+ train_iter = build_dataset_iter(\"train\", fields, opt)\n \n valid_iter = build_dataset_iter(\n \"valid\", fields, opt, is_train=False)\n", "issue": "Endless loop when loading dataset\nHi,\r\nI'm a beginner of OpenNMT-py and I also encounter loop on loading training dataset. I have checked Issues 1079 and 1354 but it still not work. My training set is small and I have not use multiple GPU, just one. Please give me some comments, thank you!\r\nMy OpenNMT is the latest version and my os is Ubuntu18.04, here is the parameters I used:\r\n\r\npython preprocess.py -train_src data/src-train.txt -train_tgt data/tgt-train.txt -valid_src data/src-val.txt -valid_tgt data/tgt-val.txt -save_data data/demo -src_seq_length 1000 -tgt_seq_length 1000 -dynamic_dict -share_vocab\r\n\r\nCUDA_VISIBLE_DEVICES=0 python train.py -data data/demo -save_model demo-model -world_size 1 -gpu_ranks 0 -copy_attn\r\n[2019-05-16 16:35:16,797 INFO] * src vocab size = 988\r\n[2019-05-16 16:35:16,797 INFO] * tgt vocab size = 988\r\n[2019-05-16 16:35:16,797 INFO] Building model...\r\n[2019-05-16 16:35:19,271 INFO] NMTModel(\r\n (encoder): RNNEncoder(\r\n (embeddings): Embeddings(\r\n (make_embedding): Sequential(\r\n (emb_luts): Elementwise(\r\n (0): Embedding(988, 500, padding_idx=1)\r\n )\r\n )\r\n )\r\n (rnn): LSTM(500, 500, num_layers=2, dropout=0.3)\r\n )\r\n (decoder): InputFeedRNNDecoder(\r\n (embeddings): Embeddings(\r\n (make_embedding): Sequential(\r\n (emb_luts): Elementwise(\r\n (0): Embedding(988, 500, padding_idx=1)\r\n )\r\n )\r\n )\r\n (dropout): Dropout(p=0.3)\r\n (rnn): StackedLSTM(\r\n (dropout): Dropout(p=0.3)\r\n (layers): ModuleList(\r\n (0): LSTMCell(1000, 500)\r\n (1): LSTMCell(500, 500)\r\n )\r\n )\r\n (attn): GlobalAttention(\r\n (linear_in): Linear(in_features=500, out_features=500, bias=False)\r\n (linear_out): Linear(in_features=1000, out_features=500, bias=False)\r\n )\r\n (copy_attn): GlobalAttention(\r\n (linear_in): Linear(in_features=500, out_features=500, bias=False)\r\n (linear_out): Linear(in_features=1000, out_features=500, bias=False)\r\n )\r\n )\r\n (generator): CopyGenerator(\r\n (linear): Linear(in_features=500, out_features=988, bias=True)\r\n (linear_copy): Linear(in_features=500, out_features=1, bias=True)\r\n )\r\n)\r\n[2019-05-16 16:35:19,272 INFO] encoder: 4502000\r\n[2019-05-16 16:35:19,272 INFO] decoder: 7497489\r\n[2019-05-16 16:35:19,272 INFO] * number of parameters: 11999489\r\n[2019-05-16 16:35:19,308 INFO] Starting training on GPU: [0]\r\n[2019-05-16 16:35:19,308 INFO] Start training loop and validate every 10000 steps...\r\n[2019-05-16 16:35:19,330 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,382 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,437 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,498 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,562 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,632 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,705 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,782 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,864 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,950 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:20,041 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:20,136 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:20,235 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:20,339 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:20,448 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:20,560 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:20,678 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n\r\nI have tried using CPU only and default parameters, but this situation also happens. How can I do to solve this problem?\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Training on a single process.\"\"\"\nimport os\n\nimport torch\n\nfrom onmt.inputters.inputter import build_dataset_iter, \\\n load_old_vocab, old_style_vocab, MultipleDatasetIterator\nfrom onmt.model_builder import build_model\nfrom onmt.utils.optimizers import Optimizer\nfrom onmt.utils.misc import set_random_seed\nfrom onmt.trainer import build_trainer\nfrom onmt.models import build_model_saver\nfrom onmt.utils.logging import init_logger, logger\nfrom onmt.utils.parse import ArgumentParser\n\n\ndef _check_save_model_path(opt):\n save_model_path = os.path.abspath(opt.save_model)\n model_dirname = os.path.dirname(save_model_path)\n if not os.path.exists(model_dirname):\n os.makedirs(model_dirname)\n\n\ndef _tally_parameters(model):\n enc = 0\n dec = 0\n for name, param in model.named_parameters():\n if 'encoder' in name:\n enc += param.nelement()\n else:\n dec += param.nelement()\n return enc + dec, enc, dec\n\n\ndef configure_process(opt, device_id):\n if device_id >= 0:\n torch.cuda.set_device(device_id)\n set_random_seed(opt.seed, device_id >= 0)\n\n\ndef main(opt, device_id):\n # NOTE: It's important that ``opt`` has been validated and updated\n # at this point.\n configure_process(opt, device_id)\n init_logger(opt.log_file)\n assert len(opt.accum_count) == len(opt.accum_steps), \\\n 'Number of accum_count values must match number of accum_steps'\n # Load checkpoint if we resume from a previous training.\n if opt.train_from:\n logger.info('Loading checkpoint from %s' % opt.train_from)\n checkpoint = torch.load(opt.train_from,\n map_location=lambda storage, loc: storage)\n\n model_opt = ArgumentParser.ckpt_model_opts(checkpoint[\"opt\"])\n ArgumentParser.update_model_opts(model_opt)\n ArgumentParser.validate_model_opts(model_opt)\n logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)\n vocab = checkpoint['vocab']\n else:\n checkpoint = None\n model_opt = opt\n vocab = torch.load(opt.data + '.vocab.pt')\n\n # check for code where vocab is saved instead of fields\n # (in the future this will be done in a smarter way)\n if old_style_vocab(vocab):\n fields = load_old_vocab(\n vocab, opt.model_type, dynamic_dict=opt.copy_attn)\n else:\n fields = vocab\n\n # Report src and tgt vocab sizes, including for features\n for side in ['src', 'tgt']:\n f = fields[side]\n try:\n f_iter = iter(f)\n except TypeError:\n f_iter = [(side, f)]\n for sn, sf in f_iter:\n if sf.use_vocab:\n logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))\n\n # Build model.\n model = build_model(model_opt, opt, fields, checkpoint)\n n_params, enc, dec = _tally_parameters(model)\n logger.info('encoder: %d' % enc)\n logger.info('decoder: %d' % dec)\n logger.info('* number of parameters: %d' % n_params)\n _check_save_model_path(opt)\n\n # Build optimizer.\n optim = Optimizer.from_opt(model, opt, checkpoint=checkpoint)\n\n # Build model saver\n model_saver = build_model_saver(model_opt, opt, model, fields, optim)\n\n trainer = build_trainer(\n opt, device_id, model, fields, optim, model_saver=model_saver)\n\n train_iterables = []\n for train_id in opt.data_ids:\n if train_id:\n shard_base = \"train_\" + train_id\n else:\n shard_base = \"train\"\n iterable = build_dataset_iter(shard_base, fields, opt, multi=True)\n train_iterables.append(iterable)\n\n train_iter = MultipleDatasetIterator(train_iterables, device_id, opt)\n\n valid_iter = build_dataset_iter(\n \"valid\", fields, opt, is_train=False)\n\n if len(opt.gpu_ranks):\n logger.info('Starting training on GPU: %s' % opt.gpu_ranks)\n else:\n logger.info('Starting training on CPU, could be very slow')\n train_steps = opt.train_steps\n if opt.single_pass and train_steps > 0:\n logger.warning(\"Option single_pass is enabled, ignoring train_steps.\")\n train_steps = 0\n trainer.train(\n train_iter,\n train_steps,\n save_checkpoint_steps=opt.save_checkpoint_steps,\n valid_iter=valid_iter,\n valid_steps=opt.valid_steps)\n\n if opt.tensorboard:\n trainer.report_manager.tensorboard_writer.close()\n", "path": "onmt/train_single.py"}]} | 3,574 | 262 |
gh_patches_debug_20066 | rasdani/github-patches | git_diff | pulp__pulpcore-4682 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
app entrypoint no longer supports --max-requests-jitter
**Version**
3.39
**Describe the bug**
--max-requests-jitter is not recognized
**To Reproduce**
Run the pulpcore-api entrypoint with --max-requests-jitter
**Expected behavior**
Accepts the argument
**Additional context**
Requested for Katello.
</issue>
<code>
[start of pulpcore/app/entrypoint.py]
1 from contextvars import ContextVar
2 from logging import getLogger
3 import os
4 import socket
5
6 import click
7 import django
8 from django.conf import settings
9 from django.db import connection
10 from django.db.utils import InterfaceError, OperationalError
11 from gunicorn.workers.sync import SyncWorker
12 from gunicorn.app.base import BaseApplication
13
14 from pulpcore.app.apps import pulp_plugin_configs
15
16 logger = getLogger(__name__)
17
18
19 using_pulp_api_worker = ContextVar("using_pulp_api_worker", default=False)
20
21
22 class PulpApiWorker(SyncWorker):
23 def notify(self):
24 super().notify()
25 self.heartbeat()
26
27 def heartbeat(self):
28 try:
29 self.api_app_status, created = self.ApiAppStatus.objects.get_or_create(
30 name=self.name, defaults={"versions": self.versions}
31 )
32
33 if not created:
34 self.api_app_status.save_heartbeat()
35
36 if self.api_app_status.versions != self.versions:
37 self.api_app_status.versions = self.versions
38 self.api_app_status.save(update_fields=["versions"])
39
40 logger.debug(self.beat_msg)
41 except (InterfaceError, OperationalError):
42 connection.close_if_unusable_or_obsolete()
43 logger.info(self.fail_beat_msg)
44
45 def init_process(self):
46 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pulpcore.app.settings")
47 django.setup()
48 from pulpcore.app.models import ApiAppStatus
49
50 if settings.API_APP_TTL < 2 * self.timeout:
51 logger.warn(
52 "API_APP_TTL (%s) is smaller than half the gunicorn timeout (%s). "
53 "You may experience workers wrongly reporting as missing",
54 settings.API_APP_TTL,
55 self.timeout,
56 )
57
58 self.ApiAppStatus = ApiAppStatus
59 self.api_app_status = None
60
61 self.name = "{pid}@{hostname}".format(pid=self.pid, hostname=socket.gethostname())
62 self.versions = {app.label: app.version for app in pulp_plugin_configs()}
63 self.beat_msg = (
64 "Api App '{name}' heartbeat written, sleeping for '{interarrival}' seconds".format(
65 name=self.name, interarrival=self.timeout
66 )
67 )
68 self.fail_beat_msg = (
69 "Api App '{name}' failed to write a heartbeat to the database, sleeping for "
70 "'{interarrival}' seconds."
71 ).format(name=self.name, interarrival=self.timeout)
72 super().init_process()
73
74 def run(self):
75 try:
76 super().run()
77 finally:
78 # cleanup
79 if self.api_app_status:
80 self.api_app_status.delete()
81
82
83 class PulpcoreApiApplication(BaseApplication):
84 def __init__(self, options):
85 self.options = options or {}
86 super().__init__()
87
88 def load_config(self):
89 [
90 self.cfg.set(key.lower(), value)
91 for key, value in self.options.items()
92 if value is not None
93 ]
94 self.cfg.set("default_proc_name", "pulpcore-api")
95 self.cfg.set("worker_class", PulpApiWorker.__module__ + "." + PulpApiWorker.__qualname__)
96
97 def load(self):
98 using_pulp_api_worker.set(True)
99
100 import pulpcore.app.wsgi
101
102 using_pulp_api_worker.set(False)
103 return pulpcore.app.wsgi.application
104
105
106 # Gunicorn options are adapted from:
107 # https://github.com/benoitc/gunicorn/blob/master/gunicorn/config.py
108
109
110 @click.option("--bind", "-b", default="[::]:24817")
111 @click.option("--workers", "-w", type=int)
112 # @click.option("--threads", "-w", type=int) # We don't use a threaded worker...
113 @click.option("--name", "-n", "proc_name")
114 @click.option("--timeout", "-t", type=int)
115 @click.option("--graceful-timeout", type=int)
116 @click.option("--keep-alive", "keepalive", type=int)
117 @click.option("--limit-request-line", type=int)
118 @click.option("--limit-request-fields", type=int)
119 @click.option("--limit-request-field-size", type=int)
120 @click.option("--max-requests", type=int)
121 @click.option("--access-logfile", "accesslog")
122 @click.option(
123 "--access-logformat",
124 "access_log_format",
125 default=(
126 "pulp [%({correlation-id}o)s]: "
127 '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"',
128 ),
129 )
130 @click.option("--error-logfile", "--log-file", "errorlog")
131 @click.option(
132 "--log-level", "loglevel", type=click.Choice(["debug", "info", "warning", "error", "critical"])
133 )
134 @click.option("--reload/--no-reload")
135 @click.option("--reload-engine", type=click.Choice(["auto", "poll", "inotify"]))
136 @click.option("--reload-extra-file", "reload_extra_files", multiple=True)
137 @click.option("--preload/--no-preload", "preload_app")
138 @click.option("--reuse-port/--no-reuse-port")
139 @click.option("--chdir")
140 @click.option("--user", "-u")
141 @click.option("--group", "-g")
142 @click.command()
143 def main(**options):
144 PulpcoreApiApplication(options).run()
145
[end of pulpcore/app/entrypoint.py]
[start of pulpcore/content/entrypoint.py]
1 import click
2 from gunicorn.app.base import BaseApplication
3
4
5 class PulpcoreContentApplication(BaseApplication):
6 def __init__(self, options):
7 self.options = options or {}
8 super().__init__()
9
10 def load_config(self):
11 [
12 self.cfg.set(key.lower(), value)
13 for key, value in self.options.items()
14 if value is not None
15 ]
16 self.cfg.set("default_proc_name", "pulpcore-content")
17 self.cfg.set("worker_class", "aiohttp.GunicornWebWorker")
18
19 def load(self):
20 import pulpcore.content
21
22 return pulpcore.content.server
23
24
25 @click.option("--bind", "-b", default="[::]:24816")
26 @click.option("--workers", "-w", type=int)
27 # @click.option("--threads", "-w", type=int) # We don't use a threaded worker...
28 @click.option("--name", "-n", "proc_name")
29 @click.option("--timeout", "-t", type=int)
30 @click.option("--graceful-timeout", type=int)
31 @click.option("--keep-alive", "keepalive", type=int)
32 @click.option("--limit-request-line", type=int)
33 @click.option("--limit-request-fields", type=int)
34 @click.option("--limit-request-field-size", type=int)
35 @click.option("--max-requests", type=int)
36 @click.option("--access-logfile", "accesslog")
37 @click.option("--access-logformat", "access_log_format")
38 @click.option("--error-logfile", "--log-file", "errorlog")
39 @click.option(
40 "--log-level", "loglevel", type=click.Choice(["debug", "info", "warning", "error", "critical"])
41 )
42 @click.option("--reload/--no-reload")
43 @click.option("--reload-engine", type=click.Choice(["auto", "poll", "inotify"]))
44 @click.option("--reload-extra-file", "reload_extra_files", multiple=True)
45 @click.option("--preload/--no-preload", "preload_app")
46 @click.option("--reuse-port/--no-reuse-port")
47 @click.option("--chdir")
48 @click.option("--user", "-u")
49 @click.option("--group", "-g")
50 @click.command()
51 def main(**options):
52 PulpcoreContentApplication(options).run()
53
[end of pulpcore/content/entrypoint.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/app/entrypoint.py b/pulpcore/app/entrypoint.py
--- a/pulpcore/app/entrypoint.py
+++ b/pulpcore/app/entrypoint.py
@@ -118,6 +118,7 @@
@click.option("--limit-request-fields", type=int)
@click.option("--limit-request-field-size", type=int)
@click.option("--max-requests", type=int)
[email protected]("--max-requests-jitter", type=int)
@click.option("--access-logfile", "accesslog")
@click.option(
"--access-logformat",
diff --git a/pulpcore/content/entrypoint.py b/pulpcore/content/entrypoint.py
--- a/pulpcore/content/entrypoint.py
+++ b/pulpcore/content/entrypoint.py
@@ -33,6 +33,7 @@
@click.option("--limit-request-fields", type=int)
@click.option("--limit-request-field-size", type=int)
@click.option("--max-requests", type=int)
[email protected]("--max-requests-jitter", type=int)
@click.option("--access-logfile", "accesslog")
@click.option("--access-logformat", "access_log_format")
@click.option("--error-logfile", "--log-file", "errorlog")
| {"golden_diff": "diff --git a/pulpcore/app/entrypoint.py b/pulpcore/app/entrypoint.py\n--- a/pulpcore/app/entrypoint.py\n+++ b/pulpcore/app/entrypoint.py\n@@ -118,6 +118,7 @@\n @click.option(\"--limit-request-fields\", type=int)\n @click.option(\"--limit-request-field-size\", type=int)\n @click.option(\"--max-requests\", type=int)\[email protected](\"--max-requests-jitter\", type=int)\n @click.option(\"--access-logfile\", \"accesslog\")\n @click.option(\n \"--access-logformat\",\ndiff --git a/pulpcore/content/entrypoint.py b/pulpcore/content/entrypoint.py\n--- a/pulpcore/content/entrypoint.py\n+++ b/pulpcore/content/entrypoint.py\n@@ -33,6 +33,7 @@\n @click.option(\"--limit-request-fields\", type=int)\n @click.option(\"--limit-request-field-size\", type=int)\n @click.option(\"--max-requests\", type=int)\[email protected](\"--max-requests-jitter\", type=int)\n @click.option(\"--access-logfile\", \"accesslog\")\n @click.option(\"--access-logformat\", \"access_log_format\")\n @click.option(\"--error-logfile\", \"--log-file\", \"errorlog\")\n", "issue": "app entrypoint no longer supports --max-requests-jitter\n**Version**\r\n3.39\r\n\r\n**Describe the bug**\r\n--max-requests-jitter is not recognized\r\n\r\n**To Reproduce**\r\nRun the pulpcore-api entrypoint with --max-requests-jitter\r\n\r\n**Expected behavior**\r\nAccepts the argument\r\n\r\n**Additional context**\r\nRequested for Katello.\r\n\n", "before_files": [{"content": "from contextvars import ContextVar\nfrom logging import getLogger\nimport os\nimport socket\n\nimport click\nimport django\nfrom django.conf import settings\nfrom django.db import connection\nfrom django.db.utils import InterfaceError, OperationalError\nfrom gunicorn.workers.sync import SyncWorker\nfrom gunicorn.app.base import BaseApplication\n\nfrom pulpcore.app.apps import pulp_plugin_configs\n\nlogger = getLogger(__name__)\n\n\nusing_pulp_api_worker = ContextVar(\"using_pulp_api_worker\", default=False)\n\n\nclass PulpApiWorker(SyncWorker):\n def notify(self):\n super().notify()\n self.heartbeat()\n\n def heartbeat(self):\n try:\n self.api_app_status, created = self.ApiAppStatus.objects.get_or_create(\n name=self.name, defaults={\"versions\": self.versions}\n )\n\n if not created:\n self.api_app_status.save_heartbeat()\n\n if self.api_app_status.versions != self.versions:\n self.api_app_status.versions = self.versions\n self.api_app_status.save(update_fields=[\"versions\"])\n\n logger.debug(self.beat_msg)\n except (InterfaceError, OperationalError):\n connection.close_if_unusable_or_obsolete()\n logger.info(self.fail_beat_msg)\n\n def init_process(self):\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"pulpcore.app.settings\")\n django.setup()\n from pulpcore.app.models import ApiAppStatus\n\n if settings.API_APP_TTL < 2 * self.timeout:\n logger.warn(\n \"API_APP_TTL (%s) is smaller than half the gunicorn timeout (%s). \"\n \"You may experience workers wrongly reporting as missing\",\n settings.API_APP_TTL,\n self.timeout,\n )\n\n self.ApiAppStatus = ApiAppStatus\n self.api_app_status = None\n\n self.name = \"{pid}@{hostname}\".format(pid=self.pid, hostname=socket.gethostname())\n self.versions = {app.label: app.version for app in pulp_plugin_configs()}\n self.beat_msg = (\n \"Api App '{name}' heartbeat written, sleeping for '{interarrival}' seconds\".format(\n name=self.name, interarrival=self.timeout\n )\n )\n self.fail_beat_msg = (\n \"Api App '{name}' failed to write a heartbeat to the database, sleeping for \"\n \"'{interarrival}' seconds.\"\n ).format(name=self.name, interarrival=self.timeout)\n super().init_process()\n\n def run(self):\n try:\n super().run()\n finally:\n # cleanup\n if self.api_app_status:\n self.api_app_status.delete()\n\n\nclass PulpcoreApiApplication(BaseApplication):\n def __init__(self, options):\n self.options = options or {}\n super().__init__()\n\n def load_config(self):\n [\n self.cfg.set(key.lower(), value)\n for key, value in self.options.items()\n if value is not None\n ]\n self.cfg.set(\"default_proc_name\", \"pulpcore-api\")\n self.cfg.set(\"worker_class\", PulpApiWorker.__module__ + \".\" + PulpApiWorker.__qualname__)\n\n def load(self):\n using_pulp_api_worker.set(True)\n\n import pulpcore.app.wsgi\n\n using_pulp_api_worker.set(False)\n return pulpcore.app.wsgi.application\n\n\n# Gunicorn options are adapted from:\n# https://github.com/benoitc/gunicorn/blob/master/gunicorn/config.py\n\n\[email protected](\"--bind\", \"-b\", default=\"[::]:24817\")\[email protected](\"--workers\", \"-w\", type=int)\n# @click.option(\"--threads\", \"-w\", type=int) # We don't use a threaded worker...\[email protected](\"--name\", \"-n\", \"proc_name\")\[email protected](\"--timeout\", \"-t\", type=int)\[email protected](\"--graceful-timeout\", type=int)\[email protected](\"--keep-alive\", \"keepalive\", type=int)\[email protected](\"--limit-request-line\", type=int)\[email protected](\"--limit-request-fields\", type=int)\[email protected](\"--limit-request-field-size\", type=int)\[email protected](\"--max-requests\", type=int)\[email protected](\"--access-logfile\", \"accesslog\")\[email protected](\n \"--access-logformat\",\n \"access_log_format\",\n default=(\n \"pulp [%({correlation-id}o)s]: \"\n '%(h)s %(l)s %(u)s %(t)s \"%(r)s\" %(s)s %(b)s \"%(f)s\" \"%(a)s\"',\n ),\n)\[email protected](\"--error-logfile\", \"--log-file\", \"errorlog\")\[email protected](\n \"--log-level\", \"loglevel\", type=click.Choice([\"debug\", \"info\", \"warning\", \"error\", \"critical\"])\n)\[email protected](\"--reload/--no-reload\")\[email protected](\"--reload-engine\", type=click.Choice([\"auto\", \"poll\", \"inotify\"]))\[email protected](\"--reload-extra-file\", \"reload_extra_files\", multiple=True)\[email protected](\"--preload/--no-preload\", \"preload_app\")\[email protected](\"--reuse-port/--no-reuse-port\")\[email protected](\"--chdir\")\[email protected](\"--user\", \"-u\")\[email protected](\"--group\", \"-g\")\[email protected]()\ndef main(**options):\n PulpcoreApiApplication(options).run()\n", "path": "pulpcore/app/entrypoint.py"}, {"content": "import click\nfrom gunicorn.app.base import BaseApplication\n\n\nclass PulpcoreContentApplication(BaseApplication):\n def __init__(self, options):\n self.options = options or {}\n super().__init__()\n\n def load_config(self):\n [\n self.cfg.set(key.lower(), value)\n for key, value in self.options.items()\n if value is not None\n ]\n self.cfg.set(\"default_proc_name\", \"pulpcore-content\")\n self.cfg.set(\"worker_class\", \"aiohttp.GunicornWebWorker\")\n\n def load(self):\n import pulpcore.content\n\n return pulpcore.content.server\n\n\[email protected](\"--bind\", \"-b\", default=\"[::]:24816\")\[email protected](\"--workers\", \"-w\", type=int)\n# @click.option(\"--threads\", \"-w\", type=int) # We don't use a threaded worker...\[email protected](\"--name\", \"-n\", \"proc_name\")\[email protected](\"--timeout\", \"-t\", type=int)\[email protected](\"--graceful-timeout\", type=int)\[email protected](\"--keep-alive\", \"keepalive\", type=int)\[email protected](\"--limit-request-line\", type=int)\[email protected](\"--limit-request-fields\", type=int)\[email protected](\"--limit-request-field-size\", type=int)\[email protected](\"--max-requests\", type=int)\[email protected](\"--access-logfile\", \"accesslog\")\[email protected](\"--access-logformat\", \"access_log_format\")\[email protected](\"--error-logfile\", \"--log-file\", \"errorlog\")\[email protected](\n \"--log-level\", \"loglevel\", type=click.Choice([\"debug\", \"info\", \"warning\", \"error\", \"critical\"])\n)\[email protected](\"--reload/--no-reload\")\[email protected](\"--reload-engine\", type=click.Choice([\"auto\", \"poll\", \"inotify\"]))\[email protected](\"--reload-extra-file\", \"reload_extra_files\", multiple=True)\[email protected](\"--preload/--no-preload\", \"preload_app\")\[email protected](\"--reuse-port/--no-reuse-port\")\[email protected](\"--chdir\")\[email protected](\"--user\", \"-u\")\[email protected](\"--group\", \"-g\")\[email protected]()\ndef main(**options):\n PulpcoreContentApplication(options).run()\n", "path": "pulpcore/content/entrypoint.py"}]} | 2,678 | 273 |
gh_patches_debug_864 | rasdani/github-patches | git_diff | ivy-llc__ivy-28478 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix Frontend Failing Test: jax - manipulation.paddle.tile
</issue>
<code>
[start of ivy/functional/frontends/paddle/manipulation.py]
1 # global
2 import ivy
3 from ivy.functional.frontends.paddle.func_wrapper import (
4 to_ivy_arrays_and_back,
5 )
6 from ivy.func_wrapper import (
7 with_unsupported_dtypes,
8 with_supported_dtypes,
9 with_supported_device_and_dtypes,
10 )
11
12
13 @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
14 @to_ivy_arrays_and_back
15 def abs(x, name=None):
16 return ivy.abs(x)
17
18
19 @with_supported_dtypes(
20 {"2.6.0 and below": ("bool", "float32", "float64", "int32", "int64")},
21 "paddle",
22 )
23 @to_ivy_arrays_and_back
24 def broadcast_to(x, shape, name=None):
25 return ivy.broadcast_to(x, shape)
26
27
28 @with_supported_dtypes(
29 {
30 "2.6.0 and below": (
31 "bool",
32 "float16",
33 "float32",
34 "float64",
35 "int32",
36 "int64",
37 "uint8",
38 )
39 },
40 "paddle",
41 )
42 @to_ivy_arrays_and_back
43 def cast(x, dtype):
44 return ivy.astype(x, dtype)
45
46
47 @with_unsupported_dtypes({"2.6.0 and below": ("int8", "int16")}, "paddle")
48 @to_ivy_arrays_and_back
49 def concat(x, axis, name=None):
50 return ivy.concat(x, axis=axis)
51
52
53 @with_supported_dtypes(
54 {"2.6.0 and below": ("bool", "float32", "float64", "int32", "int64")},
55 "paddle",
56 )
57 @to_ivy_arrays_and_back
58 def expand(x, shape, name=None):
59 return ivy.expand(x, shape)
60
61
62 @with_unsupported_dtypes(
63 {"2.6.0 and below": ("int8", "uint8", "int16", "float16")},
64 "paddle",
65 )
66 @to_ivy_arrays_and_back
67 def flip(x, axis, name=None):
68 return ivy.flip(x, axis=axis)
69
70
71 @with_supported_dtypes(
72 {"2.6.0 and below": ("bool", "float32", "float64", "int32", "int64")},
73 "paddle",
74 )
75 @to_ivy_arrays_and_back
76 def gather(params, indices, axis=-1, batch_dims=0, name=None):
77 return ivy.gather(params, indices, axis=axis, batch_dims=batch_dims)
78
79
80 @with_unsupported_dtypes(
81 {"2.6.0 and below": ("int8", "uint8", "int16", "uint16", "float16", "bfloat16")},
82 "paddle",
83 )
84 @to_ivy_arrays_and_back
85 def gather_nd(x, index, name=None):
86 return ivy.gather_nd(x, index)
87
88
89 @with_supported_dtypes(
90 {"2.6.0 and below": ("bool", "int32", "int64", "float16", "float32", "float64")},
91 "paddle",
92 )
93 @to_ivy_arrays_and_back
94 def index_add(x, index, axis, value, *, name=None):
95 x = ivy.swapaxes(x, axis, 0)
96 value = ivy.swapaxes(value, axis, 0)
97 _to_adds = []
98 index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda i: i[0]))
99 while index:
100 _curr_idx = index[0][0]
101 while len(_to_adds) < _curr_idx:
102 _to_adds.append(ivy.zeros_like(value[0]))
103 _to_add_cum = ivy.get_item(value, index[0][1])
104 while (len(index)) > 1 and (index[0][0] == index[1][0]):
105 _to_add_cum = _to_add_cum + ivy.get_item(value, index.pop(1)[1])
106 index.pop(0)
107 _to_adds.append(_to_add_cum)
108 while len(_to_adds) < x.shape[0]:
109 _to_adds.append(ivy.zeros_like(value[0]))
110 _to_adds = ivy.stack(_to_adds)
111 if len(x.shape) < 2:
112 # Added this line due to the paddle backend treating scalars as 1-d arrays
113 _to_adds = ivy.flatten(_to_adds)
114
115 ret = ivy.add(x, _to_adds)
116 ret = ivy.swapaxes(ret, axis, 0)
117 return ret
118
119
120 @to_ivy_arrays_and_back
121 def put_along_axis(arr, indices, values, axis, reduce="assign"):
122 result = ivy.put_along_axis(arr, indices, values, axis)
123 return result
124
125
126 @with_supported_dtypes(
127 {"2.6.0 and below": ("int32", "int64", "float32", "float64")},
128 "paddle",
129 )
130 @to_ivy_arrays_and_back
131 def repeat_interleave(x, repeats, axis=None, name=None):
132 return ivy.repeat(x, repeats, axis=axis)
133
134
135 @to_ivy_arrays_and_back
136 def reshape(x, shape, name=None):
137 return ivy.reshape(x, shape)
138
139
140 @with_supported_dtypes(
141 {
142 "2.5.0 and below": (
143 "float32",
144 "float64",
145 "int32",
146 "int64",
147 "complex64",
148 "complex128",
149 )
150 },
151 "paddle",
152 )
153 @to_ivy_arrays_and_back
154 def roll(x, shifts, axis=None, name=None):
155 return ivy.roll(x, shifts, axis=axis)
156
157
158 @with_supported_device_and_dtypes(
159 {
160 "2.6.0 and above": {
161 "cpu": (
162 "bool",
163 "int32",
164 "int64",
165 "float32",
166 "float64",
167 ),
168 "gpu": ("float16",),
169 },
170 },
171 "paddle",
172 )
173 @to_ivy_arrays_and_back
174 def rot90(x, k=1, axes=(0, 1), name=None):
175 return ivy.rot90(x, k=k, axes=axes)
176
177
178 @with_unsupported_dtypes(
179 {"2.6.0 and below": ("int16", "complex64", "complex128")},
180 "paddle",
181 )
182 @to_ivy_arrays_and_back
183 def split(x, num_or_sections, axis=0, name=None):
184 return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)
185
186
187 @with_unsupported_dtypes(
188 {"2.6.0 and below": ("float16", "bfloat16", "int8", "int16")},
189 "paddle",
190 )
191 @to_ivy_arrays_and_back
192 def squeeze(x, axis=None, name=None):
193 return ivy.squeeze(x, axis=axis)
194
195
196 @to_ivy_arrays_and_back
197 def stack(x, axis=0, name=None):
198 return ivy.stack(x, axis=axis)
199
200
201 @with_supported_dtypes(
202 {"2.6.0 and below": ("float32", "float64")},
203 "paddle",
204 )
205 @to_ivy_arrays_and_back
206 def take_along_axis(arr, indices, axis):
207 return ivy.take_along_axis(arr, indices, axis)
208
209
210 @with_unsupported_dtypes(
211 {"2.6.0 and below": ("int8", "uint8", "int16", "float16")},
212 "paddle",
213 )
214 @to_ivy_arrays_and_back
215 def tile(x, repeat_times, name=None):
216 return ivy.tile(x, repeats=repeat_times)
217
218
219 @to_ivy_arrays_and_back
220 def tolist(x):
221 return ivy.to_list(x)
222
223
224 @with_supported_dtypes(
225 {"2.6.0 and below": ("bool", "int32", "int64", "float16", "float32", "float64")},
226 "paddle",
227 )
228 @to_ivy_arrays_and_back
229 def unbind(input, axis=0):
230 shape = list(input.shape)
231 num_splits = shape[axis]
232 shape.pop(axis)
233 return tuple(x.reshape(tuple(shape)) for x in split(input, num_splits, axis=axis))
234
235
236 @with_supported_dtypes(
237 {"2.6.0 and below": ("bool", "int32", "int64", "float16", "float32", "float64")},
238 "paddle",
239 )
240 @to_ivy_arrays_and_back
241 def unique_consecutive(x, axis=0):
242 return ivy.unique_consecutive(x, axis=axis)
243
244
245 @with_supported_dtypes(
246 {
247 "2.6.0 and below": (
248 "float32",
249 "float64",
250 "int32",
251 "int64",
252 )
253 },
254 "paddle",
255 )
256 @to_ivy_arrays_and_back
257 def unstack(x, axis=0, name=None):
258 return ivy.unstack(x, axis=axis)
259
260
261 absolute = abs
262
[end of ivy/functional/frontends/paddle/manipulation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/manipulation.py b/ivy/functional/frontends/paddle/manipulation.py
--- a/ivy/functional/frontends/paddle/manipulation.py
+++ b/ivy/functional/frontends/paddle/manipulation.py
@@ -208,7 +208,7 @@
@with_unsupported_dtypes(
- {"2.6.0 and below": ("int8", "uint8", "int16", "float16")},
+ {"2.6.0 and below": ("int8", "uint8", "int16", "float16", "bfloat16")},
"paddle",
)
@to_ivy_arrays_and_back
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/manipulation.py b/ivy/functional/frontends/paddle/manipulation.py\n--- a/ivy/functional/frontends/paddle/manipulation.py\n+++ b/ivy/functional/frontends/paddle/manipulation.py\n@@ -208,7 +208,7 @@\n \n \n @with_unsupported_dtypes(\n- {\"2.6.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n+ {\"2.6.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n @to_ivy_arrays_and_back\n", "issue": "Fix Frontend Failing Test: jax - manipulation.paddle.tile\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n with_supported_device_and_dtypes,\n)\n\n\n@with_unsupported_dtypes({\"2.6.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef broadcast_to(x, shape, name=None):\n return ivy.broadcast_to(x, shape)\n\n\n@with_supported_dtypes(\n {\n \"2.6.0 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n\n\n@with_unsupported_dtypes({\"2.6.0 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef expand(x, shape, name=None):\n return ivy.expand(x, shape)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef flip(x, axis, name=None):\n return ivy.flip(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef gather(params, indices, axis=-1, batch_dims=0, name=None):\n return ivy.gather(params, indices, axis=axis, batch_dims=batch_dims)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"int8\", \"uint8\", \"int16\", \"uint16\", \"float16\", \"bfloat16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef gather_nd(x, index, name=None):\n return ivy.gather_nd(x, index)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef index_add(x, index, axis, value, *, name=None):\n x = ivy.swapaxes(x, axis, 0)\n value = ivy.swapaxes(value, axis, 0)\n _to_adds = []\n index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda i: i[0]))\n while index:\n _curr_idx = index[0][0]\n while len(_to_adds) < _curr_idx:\n _to_adds.append(ivy.zeros_like(value[0]))\n _to_add_cum = ivy.get_item(value, index[0][1])\n while (len(index)) > 1 and (index[0][0] == index[1][0]):\n _to_add_cum = _to_add_cum + ivy.get_item(value, index.pop(1)[1])\n index.pop(0)\n _to_adds.append(_to_add_cum)\n while len(_to_adds) < x.shape[0]:\n _to_adds.append(ivy.zeros_like(value[0]))\n _to_adds = ivy.stack(_to_adds)\n if len(x.shape) < 2:\n # Added this line due to the paddle backend treating scalars as 1-d arrays\n _to_adds = ivy.flatten(_to_adds)\n\n ret = ivy.add(x, _to_adds)\n ret = ivy.swapaxes(ret, axis, 0)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef put_along_axis(arr, indices, values, axis, reduce=\"assign\"):\n result = ivy.put_along_axis(arr, indices, values, axis)\n return result\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"int32\", \"int64\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef repeat_interleave(x, repeats, axis=None, name=None):\n return ivy.repeat(x, repeats, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape, name=None):\n return ivy.reshape(x, shape)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef roll(x, shifts, axis=None, name=None):\n return ivy.roll(x, shifts, axis=axis)\n\n\n@with_supported_device_and_dtypes(\n {\n \"2.6.0 and above\": {\n \"cpu\": (\n \"bool\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\"float16\",),\n },\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef rot90(x, k=1, axes=(0, 1), name=None):\n return ivy.rot90(x, k=k, axes=axes)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis):\n return ivy.take_along_axis(arr, indices, axis)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@to_ivy_arrays_and_back\ndef tolist(x):\n return ivy.to_list(x)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef unbind(input, axis=0):\n shape = list(input.shape)\n num_splits = shape[axis]\n shape.pop(axis)\n return tuple(x.reshape(tuple(shape)) for x in split(input, num_splits, axis=axis))\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef unique_consecutive(x, axis=0):\n return ivy.unique_consecutive(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\n \"2.6.0 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef unstack(x, axis=0, name=None):\n return ivy.unstack(x, axis=axis)\n\n\nabsolute = abs\n", "path": "ivy/functional/frontends/paddle/manipulation.py"}]} | 3,291 | 160 |
gh_patches_debug_12462 | rasdani/github-patches | git_diff | dask__distributed-6306 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
parse_stdout.py produces malformed Junit reports
parse_stdout.py has recently stopped working.
This causes Windows timeouts to be reported as a white box instead of a red box in https://dask.org/distributed/test_report.html.
https://github.com/dask/distributed/actions/runs/2293197167
> Publish test results: artifacts/windows-latest-3.10-notci1/pytest.xml#L976Error processing result file: not well-formed (invalid token): line 976, column 93
</issue>
<code>
[start of continuous_integration/scripts/parse_stdout.py]
1 """On Windows, pytest-timeout kills off the whole test suite, leaving no junit report
2 behind. Parse the stdout of pytest to generate one.
3 """
4 from __future__ import annotations
5
6 import re
7 import sys
8 from collections import Counter, defaultdict
9 from collections.abc import Iterable
10 from datetime import datetime
11
12 OUTCOMES = {
13 "PASSED",
14 "FAILED",
15 # Test timeout. Marked as a variant of FAILED in the junit report
16 None,
17 # Setup failed or teardown failed.
18 # In the latter case, if the test also failed, show both a FAILED and an ERROR line.
19 "ERROR",
20 # @pytest.mark.skip, @pytest.mark.skipif, or raise pytest.skip()
21 "SKIPPED",
22 # Reported as a variant of SKIPPED in the junit report
23 "XFAIL",
24 # These appear respectively before and after another status. Ignore.
25 "RERUN",
26 "LEAKED",
27 }
28
29
30 def parse_rows(rows: Iterable[str]) -> list[tuple[str, str, set[str | None]]]:
31 match = re.compile(
32 r"(distributed/.*test.*)::([^ ]*)"
33 r"( (.*)(PASSED|FAILED|ERROR|SKIPPED|XFAIL|RERUN|LEAKED).*| )$"
34 )
35
36 out: defaultdict[tuple[str, str], set[str | None]] = defaultdict(set)
37
38 for row in rows:
39 m = match.match(row)
40 if not m:
41 continue
42
43 fname = m.group(1)
44 clsname = fname.replace("/", ".").replace(".py", "").replace("::", ".")
45
46 tname = m.group(2).strip()
47 if m.group(4) and "]" in m.group(4):
48 tname += " " + m.group(4).split("]")[0] + "]"
49
50 outcome = m.group(5)
51 assert outcome in OUTCOMES
52 if outcome not in {"RERUN", "LEAKED"}:
53 out[clsname, tname].add(outcome)
54
55 return [(clsname, tname, outcomes) for (clsname, tname), outcomes in out.items()]
56
57
58 def build_xml(rows: list[tuple[str, str, set[str | None]]]) -> None:
59 cnt = Counter(outcome for _, _, outcomes in rows for outcome in outcomes)
60 timestamp = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
61
62 # We could have used ElementTree but it feels like overkill here
63 print('<?xml version="1.0" encoding="utf-8"?>')
64 print("<testsuites>")
65 print(
66 '<testsuite name="distributed" '
67 f'errors="{cnt["ERROR"]}" failures="{cnt["FAILED"] + cnt[None]}" '
68 f'skipped="{cnt["SKIPPED"] + cnt["XFAIL"]}" tests="{sum(cnt.values())}" '
69 f'time="0.0" timestamp="{timestamp}" hostname="">'
70 )
71
72 for clsname, tname, outcomes in rows:
73 print(f'<testcase classname="{clsname}" name="{tname}" time="0.0"', end="")
74 if outcomes == {"PASSED"}:
75 print(" />")
76 elif outcomes == {"FAILED"}:
77 print('><failure message=""></failure></testcase>')
78 elif outcomes == {None}:
79 print('><failure message="pytest-timeout exceeded"></failure></testcase>')
80 elif outcomes == {"ERROR"}:
81 print('><error message="failed on setup"></error></testcase>')
82 elif outcomes == {"PASSED", "ERROR"}:
83 print('><error message="failed on teardown"></error></testcase>')
84 elif outcomes == {"FAILED", "ERROR"}:
85 print(
86 '><failure message=""></failure></testcase>\n'
87 f'<testcase classname="{clsname}" name="{tname}" time="0.0">'
88 '<error message="failed on teardown"></error></testcase>'
89 )
90 elif outcomes == {"SKIPPED"}:
91 print('><skipped type="pytest.skip" message="skip"></skipped></testcase>')
92 elif outcomes == {"XFAIL"}:
93 print('><skipped type="pytest.xfail" message="xfail"></skipped></testcase>')
94 else: # pragma: nocover
95 # This should be unreachable. We would normally raise ValueError, except
96 # that a crash in this script would be pretty much invisible.
97 print(
98 f' />\n<testcase classname="parse_stdout" name="build_xml" time="0.0">'
99 f'><failure message="Unexpected {outcomes=}"></failure></testcase>'
100 )
101
102 print("</testsuite>")
103 print("</testsuites>")
104
105
106 def main() -> None: # pragma: nocover
107 build_xml(parse_rows(sys.stdin))
108
109
110 if __name__ == "__main__":
111 main() # pragma: nocover
112
[end of continuous_integration/scripts/parse_stdout.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/continuous_integration/scripts/parse_stdout.py b/continuous_integration/scripts/parse_stdout.py
--- a/continuous_integration/scripts/parse_stdout.py
+++ b/continuous_integration/scripts/parse_stdout.py
@@ -3,6 +3,7 @@
"""
from __future__ import annotations
+import html
import re
import sys
from collections import Counter, defaultdict
@@ -70,6 +71,8 @@
)
for clsname, tname, outcomes in rows:
+ clsname = html.escape(clsname)
+ tname = html.escape(tname)
print(f'<testcase classname="{clsname}" name="{tname}" time="0.0"', end="")
if outcomes == {"PASSED"}:
print(" />")
| {"golden_diff": "diff --git a/continuous_integration/scripts/parse_stdout.py b/continuous_integration/scripts/parse_stdout.py\n--- a/continuous_integration/scripts/parse_stdout.py\n+++ b/continuous_integration/scripts/parse_stdout.py\n@@ -3,6 +3,7 @@\n \"\"\"\n from __future__ import annotations\n \n+import html\n import re\n import sys\n from collections import Counter, defaultdict\n@@ -70,6 +71,8 @@\n )\n \n for clsname, tname, outcomes in rows:\n+ clsname = html.escape(clsname)\n+ tname = html.escape(tname)\n print(f'<testcase classname=\"{clsname}\" name=\"{tname}\" time=\"0.0\"', end=\"\")\n if outcomes == {\"PASSED\"}:\n print(\" />\")\n", "issue": "parse_stdout.py produces malformed Junit reports\nparse_stdout.py has recently stopped working.\r\nThis causes Windows timeouts to be reported as a white box instead of a red box in https://dask.org/distributed/test_report.html.\r\n\r\nhttps://github.com/dask/distributed/actions/runs/2293197167\r\n> Publish test results:\u00a0artifacts/windows-latest-3.10-notci1/pytest.xml#L976Error processing result file: not well-formed (invalid token): line 976, column 93\r\n\n", "before_files": [{"content": "\"\"\"On Windows, pytest-timeout kills off the whole test suite, leaving no junit report\nbehind. Parse the stdout of pytest to generate one.\n\"\"\"\nfrom __future__ import annotations\n\nimport re\nimport sys\nfrom collections import Counter, defaultdict\nfrom collections.abc import Iterable\nfrom datetime import datetime\n\nOUTCOMES = {\n \"PASSED\",\n \"FAILED\",\n # Test timeout. Marked as a variant of FAILED in the junit report\n None,\n # Setup failed or teardown failed.\n # In the latter case, if the test also failed, show both a FAILED and an ERROR line.\n \"ERROR\",\n # @pytest.mark.skip, @pytest.mark.skipif, or raise pytest.skip()\n \"SKIPPED\",\n # Reported as a variant of SKIPPED in the junit report\n \"XFAIL\",\n # These appear respectively before and after another status. Ignore.\n \"RERUN\",\n \"LEAKED\",\n}\n\n\ndef parse_rows(rows: Iterable[str]) -> list[tuple[str, str, set[str | None]]]:\n match = re.compile(\n r\"(distributed/.*test.*)::([^ ]*)\"\n r\"( (.*)(PASSED|FAILED|ERROR|SKIPPED|XFAIL|RERUN|LEAKED).*| )$\"\n )\n\n out: defaultdict[tuple[str, str], set[str | None]] = defaultdict(set)\n\n for row in rows:\n m = match.match(row)\n if not m:\n continue\n\n fname = m.group(1)\n clsname = fname.replace(\"/\", \".\").replace(\".py\", \"\").replace(\"::\", \".\")\n\n tname = m.group(2).strip()\n if m.group(4) and \"]\" in m.group(4):\n tname += \" \" + m.group(4).split(\"]\")[0] + \"]\"\n\n outcome = m.group(5)\n assert outcome in OUTCOMES\n if outcome not in {\"RERUN\", \"LEAKED\"}:\n out[clsname, tname].add(outcome)\n\n return [(clsname, tname, outcomes) for (clsname, tname), outcomes in out.items()]\n\n\ndef build_xml(rows: list[tuple[str, str, set[str | None]]]) -> None:\n cnt = Counter(outcome for _, _, outcomes in rows for outcome in outcomes)\n timestamp = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n # We could have used ElementTree but it feels like overkill here\n print('<?xml version=\"1.0\" encoding=\"utf-8\"?>')\n print(\"<testsuites>\")\n print(\n '<testsuite name=\"distributed\" '\n f'errors=\"{cnt[\"ERROR\"]}\" failures=\"{cnt[\"FAILED\"] + cnt[None]}\" '\n f'skipped=\"{cnt[\"SKIPPED\"] + cnt[\"XFAIL\"]}\" tests=\"{sum(cnt.values())}\" '\n f'time=\"0.0\" timestamp=\"{timestamp}\" hostname=\"\">'\n )\n\n for clsname, tname, outcomes in rows:\n print(f'<testcase classname=\"{clsname}\" name=\"{tname}\" time=\"0.0\"', end=\"\")\n if outcomes == {\"PASSED\"}:\n print(\" />\")\n elif outcomes == {\"FAILED\"}:\n print('><failure message=\"\"></failure></testcase>')\n elif outcomes == {None}:\n print('><failure message=\"pytest-timeout exceeded\"></failure></testcase>')\n elif outcomes == {\"ERROR\"}:\n print('><error message=\"failed on setup\"></error></testcase>')\n elif outcomes == {\"PASSED\", \"ERROR\"}:\n print('><error message=\"failed on teardown\"></error></testcase>')\n elif outcomes == {\"FAILED\", \"ERROR\"}:\n print(\n '><failure message=\"\"></failure></testcase>\\n'\n f'<testcase classname=\"{clsname}\" name=\"{tname}\" time=\"0.0\">'\n '<error message=\"failed on teardown\"></error></testcase>'\n )\n elif outcomes == {\"SKIPPED\"}:\n print('><skipped type=\"pytest.skip\" message=\"skip\"></skipped></testcase>')\n elif outcomes == {\"XFAIL\"}:\n print('><skipped type=\"pytest.xfail\" message=\"xfail\"></skipped></testcase>')\n else: # pragma: nocover\n # This should be unreachable. We would normally raise ValueError, except\n # that a crash in this script would be pretty much invisible.\n print(\n f' />\\n<testcase classname=\"parse_stdout\" name=\"build_xml\" time=\"0.0\">'\n f'><failure message=\"Unexpected {outcomes=}\"></failure></testcase>'\n )\n\n print(\"</testsuite>\")\n print(\"</testsuites>\")\n\n\ndef main() -> None: # pragma: nocover\n build_xml(parse_rows(sys.stdin))\n\n\nif __name__ == \"__main__\":\n main() # pragma: nocover\n", "path": "continuous_integration/scripts/parse_stdout.py"}]} | 1,958 | 165 |
gh_patches_debug_34685 | rasdani/github-patches | git_diff | mabel-dev__opteryx-1501 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
✨ enable SQLAlchemy logging
### Thanks for stopping by to let us know something could be better!
**Is your feature request related to a problem? Please describe.** _A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]_
**Describe the solution you'd like** _A clear and concise description of what you want to happen._
**Describe alternatives you've considered** _A clear and concise description of any alternative solutions or features you've considered._
**Additional context** _Add any other context or screenshots about the feature request here._
</issue>
<code>
[start of opteryx/connectors/sql_connector.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 """
14 The SQL Connector downloads data from remote servers and converts them
15 to pyarrow tables so they can be processed as per any other data source.
16 """
17 import time
18 from decimal import Decimal
19 from typing import Any
20 from typing import Dict
21 from typing import Generator
22 from typing import Tuple
23
24 import pyarrow
25 from orso import DataFrame
26 from orso.schema import ConstantColumn
27 from orso.schema import FlatColumn
28 from orso.schema import RelationSchema
29 from orso.tools import random_string
30 from orso.types import PYTHON_TO_ORSO_MAP
31
32 from opteryx.connectors.base.base_connector import DEFAULT_MORSEL_SIZE
33 from opteryx.connectors.base.base_connector import INITIAL_CHUNK_SIZE
34 from opteryx.connectors.base.base_connector import MIN_CHUNK_SIZE
35 from opteryx.connectors.base.base_connector import BaseConnector
36 from opteryx.connectors.capabilities import PredicatePushable
37 from opteryx.exceptions import MissingDependencyError
38 from opteryx.exceptions import UnmetRequirementError
39 from opteryx.managers.expression import Node
40 from opteryx.managers.expression import NodeType
41 from opteryx.third_party.query_builder import Query
42
43
44 def _handle_operand(operand: Node, parameters: dict) -> Tuple[Any, dict]:
45 if operand.node_type == NodeType.IDENTIFIER:
46 return operand.source_column, parameters
47
48 literal = operand.value
49 if hasattr(literal, "item"):
50 literal = literal.item()
51
52 name = random_string(8)
53 parameters[name] = literal
54 return f":{name}", parameters
55
56
57 class SqlConnector(BaseConnector, PredicatePushable):
58 __mode__ = "Sql"
59
60 PUSHABLE_OPS: Dict[str, bool] = {
61 "Eq": True,
62 "NotEq": True,
63 "Gt": True,
64 "GtEq": True,
65 "Lt": True,
66 "LtEq": True,
67 "Like": True,
68 "NotLike": True,
69 }
70
71 OPS_XLAT: Dict[str, str] = {
72 "Eq": "=",
73 "NotEq": "!=",
74 "Gt": ">",
75 "GtEq": ">=",
76 "Lt": "<",
77 "LtEq": "<=",
78 "Like": "LIKE",
79 "NotLike": "NOT LIKE",
80 "IsTrue": "IS TRUE",
81 "IsNotTrue": "IS NOT TRUE",
82 "IsFalse": "IS FALSE",
83 "IsNotFalse": "IS NOT FALSE",
84 "IsNull": "IS NULL",
85 "IsNotNull": "IS NOT NULL",
86 }
87
88 def __init__(self, *args, connection: str = None, engine=None, **kwargs):
89 BaseConnector.__init__(self, **kwargs)
90 PredicatePushable.__init__(self, **kwargs)
91
92 try:
93 from sqlalchemy import MetaData
94 from sqlalchemy import create_engine
95 from sqlalchemy.pool import NullPool
96 except ImportError as err: # pragma: nocover
97 raise MissingDependencyError(err.name) from err
98
99 if engine is None and connection is None: # pragma: no cover
100 raise UnmetRequirementError(
101 "SQL Connections require either a SQL Alchemy connection string in the 'connection' parameter, or a SQL Alchemy Engine in the 'engine' parameter."
102 )
103
104 # create the SqlAlchemy engine
105 if engine is None:
106 self._engine = create_engine(connection, poolclass=NullPool)
107 else:
108 self._engine = engine
109
110 self.schema = None # type: ignore
111 self.metadata = MetaData()
112
113 def can_push(self, operator: Node, types: set = None) -> bool:
114 if super().can_push(operator, types):
115 return True
116 return operator.condition.node_type == NodeType.UNARY_OPERATOR
117
118 def read_dataset( # type:ignore
119 self,
120 *,
121 columns: list = None,
122 predicates: list = None,
123 chunk_size: int = INITIAL_CHUNK_SIZE, # type:ignore
124 ) -> Generator[pyarrow.Table, None, None]: # type:ignore
125 from sqlalchemy.sql import text
126
127 self.chunk_size = chunk_size
128 result_schema = self.schema
129
130 query_builder = Query().FROM(self.dataset)
131
132 # Update the SQL and the target morsel schema if we've pushed a projection
133 if columns:
134 column_names = [col.name for col in columns]
135 query_builder.add("SELECT", *column_names)
136 result_schema.columns = [ # type:ignore
137 col for col in self.schema.columns if col.name in column_names # type:ignore
138 ]
139 elif self.schema.columns: # type:ignore
140 query_builder.add("SELECT", "*")
141 else:
142 query_builder.add("SELECT", "1")
143 self.schema.columns.append(ConstantColumn(name="1", value=1)) # type:ignore
144
145 # Update SQL if we've pushed predicates
146 parameters: dict = {}
147 for predicate in predicates:
148 if predicate.node_type == NodeType.UNARY_OPERATOR:
149 operand = predicate.centre.current_name
150 operator = self.OPS_XLAT[predicate.value]
151
152 query_builder.WHERE(f"{operand} {operator}")
153 else:
154 left_operand = predicate.left
155 right_operand = predicate.right
156 operator = self.OPS_XLAT[predicate.value]
157
158 left_value, parameters = _handle_operand(left_operand, parameters)
159 right_value, parameters = _handle_operand(right_operand, parameters)
160
161 query_builder.WHERE(f"{left_value} {operator} {right_value}")
162
163 at_least_once = False
164
165 convert_time = 0.0
166
167 with self._engine.connect() as conn:
168 # DEBUG: log ("READ DATASET\n", str(query_builder))
169 # DEBUG: log ("PARAMETERS\n", parameters)
170 # Execution Options allows us to handle datasets larger than memory
171 result = conn.execution_options(stream_results=True, max_row_buffer=10000).execute(
172 text(str(query_builder)), parameters=parameters
173 )
174
175 while True:
176 batch_rows = result.fetchmany(self.chunk_size)
177 if not batch_rows:
178 break
179
180 # convert the SqlAlchemy Results to Arrow using Orso
181 b = time.monotonic_ns()
182 morsel = DataFrame(schema=result_schema, rows=batch_rows).arrow()
183 convert_time += time.monotonic_ns() - b
184 yield morsel
185 at_least_once = True
186
187 # Dynamically adjust chunk size based on the data size, we start by downloading
188 # 500 records to get an idea of the row size, assuming these 500 are
189 # representative, we work out how many rows fit into 16Mb (check setting).
190 # Don't keep recalculating, this is not a cheap operation and it's predicting
191 # the future so isn't going to ever be 100% correct
192 if self.chunk_size == INITIAL_CHUNK_SIZE and morsel.nbytes > 0:
193 self.chunk_size = int(len(morsel) // (morsel.nbytes / DEFAULT_MORSEL_SIZE)) + 1
194 self.chunk_size = (self.chunk_size // MIN_CHUNK_SIZE) * MIN_CHUNK_SIZE
195 self.chunk_size = max(self.chunk_size, MIN_CHUNK_SIZE)
196 # DEBUG: log (f"CHANGING CHUNK SIZE TO {self.chunk_size} was {INITIAL_CHUNK_SIZE}.")
197
198 if not at_least_once:
199 yield DataFrame(schema=result_schema).arrow()
200
201 # DEBUG: log (f"time spent converting: {convert_time/1e9}s")
202
203 def get_dataset_schema(self) -> RelationSchema:
204 from sqlalchemy import Table
205
206 if self.schema:
207 return self.schema
208
209 # Try to read the schema from the metastore
210 self.schema = self.read_schema_from_metastore()
211 if self.schema:
212 return self.schema
213
214 # get the schema from the dataset
215 # DEBUG: log ("GET SQL SCHEMA:", self.dataset)
216 try:
217 table = Table(self.dataset, self.metadata, autoload_with=self._engine)
218
219 self.schema = RelationSchema(
220 name=table.name,
221 columns=[
222 FlatColumn(
223 name=column.name,
224 type=PYTHON_TO_ORSO_MAP[column.type.python_type],
225 precision=(
226 None
227 if column.type.python_type != Decimal
228 else column.type.precision # type:ignore
229 ),
230 scale=(
231 None
232 if column.type.python_type != Decimal
233 else column.type.scale # type:ignore
234 ),
235 nullable=column.nullable,
236 )
237 for column in table.columns
238 ],
239 )
240 except Exception as err:
241 # Fall back to getting the schema from the first row, this is the column names, and where
242 # possible, column types.
243 # DEBUG: log (f"APPROXIMATING SCHEMA OF {self.dataset} BECAUSE OF {type(err).__name__}({err})")
244 from sqlalchemy.sql import text
245
246 with self._engine.connect() as conn:
247 query = Query().SELECT("*").FROM(self.dataset).LIMIT("1")
248 # DEBUG: log ("READ ROW\n", str(query))
249 row = conn.execute(text(str(query))).fetchone()._asdict()
250 # DEBUG: log ("ROW:", row)
251 self.schema = RelationSchema(
252 name=self.dataset,
253 columns=[
254 FlatColumn(
255 name=column,
256 type=0 if value is None else PYTHON_TO_ORSO_MAP[type(value)],
257 )
258 for column, value in row.items()
259 ],
260 )
261
262 return self.schema
263
[end of opteryx/connectors/sql_connector.py]
[start of opteryx/__version__.py]
1 __build__ = 340
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Store the version here so:
17 1) we don't load dependencies by storing it in __init__.py
18 2) we can import it in setup.py for the same reason
19 """
20 from enum import Enum # isort: skip
21
22
23 class VersionStatus(Enum):
24 ALPHA = "alpha"
25 BETA = "beta"
26 RELEASE = "release"
27
28
29 _major = 0
30 _minor = 13
31 _revision = 4
32 _status = VersionStatus.ALPHA
33
34 __author__ = "@joocer"
35 __version__ = f"{_major}.{_minor}.{_revision}" + (
36 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else ""
37 )
38
[end of opteryx/__version__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opteryx/__version__.py b/opteryx/__version__.py
--- a/opteryx/__version__.py
+++ b/opteryx/__version__.py
@@ -1,4 +1,4 @@
-__build__ = 340
+__build__ = 341
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/opteryx/connectors/sql_connector.py b/opteryx/connectors/sql_connector.py
--- a/opteryx/connectors/sql_connector.py
+++ b/opteryx/connectors/sql_connector.py
@@ -14,6 +14,7 @@
The SQL Connector downloads data from remote servers and converts them
to pyarrow tables so they can be processed as per any other data source.
"""
+import os
import time
from decimal import Decimal
from typing import Any
@@ -40,6 +41,8 @@
from opteryx.managers.expression import NodeType
from opteryx.third_party.query_builder import Query
+DEBUG_ENABLED = os.environ.get("OPTERYX_DEBUG") is not None
+
def _handle_operand(operand: Node, parameters: dict) -> Tuple[Any, dict]:
if operand.node_type == NodeType.IDENTIFIER:
@@ -103,7 +106,7 @@
# create the SqlAlchemy engine
if engine is None:
- self._engine = create_engine(connection, poolclass=NullPool)
+ self._engine = create_engine(connection, poolclass=NullPool, echo=DEBUG_ENABLED)
else:
self._engine = engine
@@ -168,7 +171,7 @@
# DEBUG: log ("READ DATASET\n", str(query_builder))
# DEBUG: log ("PARAMETERS\n", parameters)
# Execution Options allows us to handle datasets larger than memory
- result = conn.execution_options(stream_results=True, max_row_buffer=10000).execute(
+ result = conn.execution_options(stream_results=True, max_row_buffer=25000).execute(
text(str(query_builder)), parameters=parameters
)
| {"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 340\n+__build__ = 341\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\ndiff --git a/opteryx/connectors/sql_connector.py b/opteryx/connectors/sql_connector.py\n--- a/opteryx/connectors/sql_connector.py\n+++ b/opteryx/connectors/sql_connector.py\n@@ -14,6 +14,7 @@\n The SQL Connector downloads data from remote servers and converts them\n to pyarrow tables so they can be processed as per any other data source.\n \"\"\"\n+import os\n import time\n from decimal import Decimal\n from typing import Any\n@@ -40,6 +41,8 @@\n from opteryx.managers.expression import NodeType\n from opteryx.third_party.query_builder import Query\n \n+DEBUG_ENABLED = os.environ.get(\"OPTERYX_DEBUG\") is not None\n+\n \n def _handle_operand(operand: Node, parameters: dict) -> Tuple[Any, dict]:\n if operand.node_type == NodeType.IDENTIFIER:\n@@ -103,7 +106,7 @@\n \n # create the SqlAlchemy engine\n if engine is None:\n- self._engine = create_engine(connection, poolclass=NullPool)\n+ self._engine = create_engine(connection, poolclass=NullPool, echo=DEBUG_ENABLED)\n else:\n self._engine = engine\n \n@@ -168,7 +171,7 @@\n # DEBUG: log (\"READ DATASET\\n\", str(query_builder))\n # DEBUG: log (\"PARAMETERS\\n\", parameters)\n # Execution Options allows us to handle datasets larger than memory\n- result = conn.execution_options(stream_results=True, max_row_buffer=10000).execute(\n+ result = conn.execution_options(stream_results=True, max_row_buffer=25000).execute(\n text(str(query_builder)), parameters=parameters\n )\n", "issue": "\u2728 enable SQLAlchemy logging\n### Thanks for stopping by to let us know something could be better!\r\n\r\n**Is your feature request related to a problem? Please describe.** _A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]_\r\n\r\n**Describe the solution you'd like** _A clear and concise description of what you want to happen._\r\n\r\n**Describe alternatives you've considered** _A clear and concise description of any alternative solutions or features you've considered._\r\n\r\n**Additional context** _Add any other context or screenshots about the feature request here._\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe SQL Connector downloads data from remote servers and converts them\nto pyarrow tables so they can be processed as per any other data source.\n\"\"\"\nimport time\nfrom decimal import Decimal\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import Tuple\n\nimport pyarrow\nfrom orso import DataFrame\nfrom orso.schema import ConstantColumn\nfrom orso.schema import FlatColumn\nfrom orso.schema import RelationSchema\nfrom orso.tools import random_string\nfrom orso.types import PYTHON_TO_ORSO_MAP\n\nfrom opteryx.connectors.base.base_connector import DEFAULT_MORSEL_SIZE\nfrom opteryx.connectors.base.base_connector import INITIAL_CHUNK_SIZE\nfrom opteryx.connectors.base.base_connector import MIN_CHUNK_SIZE\nfrom opteryx.connectors.base.base_connector import BaseConnector\nfrom opteryx.connectors.capabilities import PredicatePushable\nfrom opteryx.exceptions import MissingDependencyError\nfrom opteryx.exceptions import UnmetRequirementError\nfrom opteryx.managers.expression import Node\nfrom opteryx.managers.expression import NodeType\nfrom opteryx.third_party.query_builder import Query\n\n\ndef _handle_operand(operand: Node, parameters: dict) -> Tuple[Any, dict]:\n if operand.node_type == NodeType.IDENTIFIER:\n return operand.source_column, parameters\n\n literal = operand.value\n if hasattr(literal, \"item\"):\n literal = literal.item()\n\n name = random_string(8)\n parameters[name] = literal\n return f\":{name}\", parameters\n\n\nclass SqlConnector(BaseConnector, PredicatePushable):\n __mode__ = \"Sql\"\n\n PUSHABLE_OPS: Dict[str, bool] = {\n \"Eq\": True,\n \"NotEq\": True,\n \"Gt\": True,\n \"GtEq\": True,\n \"Lt\": True,\n \"LtEq\": True,\n \"Like\": True,\n \"NotLike\": True,\n }\n\n OPS_XLAT: Dict[str, str] = {\n \"Eq\": \"=\",\n \"NotEq\": \"!=\",\n \"Gt\": \">\",\n \"GtEq\": \">=\",\n \"Lt\": \"<\",\n \"LtEq\": \"<=\",\n \"Like\": \"LIKE\",\n \"NotLike\": \"NOT LIKE\",\n \"IsTrue\": \"IS TRUE\",\n \"IsNotTrue\": \"IS NOT TRUE\",\n \"IsFalse\": \"IS FALSE\",\n \"IsNotFalse\": \"IS NOT FALSE\",\n \"IsNull\": \"IS NULL\",\n \"IsNotNull\": \"IS NOT NULL\",\n }\n\n def __init__(self, *args, connection: str = None, engine=None, **kwargs):\n BaseConnector.__init__(self, **kwargs)\n PredicatePushable.__init__(self, **kwargs)\n\n try:\n from sqlalchemy import MetaData\n from sqlalchemy import create_engine\n from sqlalchemy.pool import NullPool\n except ImportError as err: # pragma: nocover\n raise MissingDependencyError(err.name) from err\n\n if engine is None and connection is None: # pragma: no cover\n raise UnmetRequirementError(\n \"SQL Connections require either a SQL Alchemy connection string in the 'connection' parameter, or a SQL Alchemy Engine in the 'engine' parameter.\"\n )\n\n # create the SqlAlchemy engine\n if engine is None:\n self._engine = create_engine(connection, poolclass=NullPool)\n else:\n self._engine = engine\n\n self.schema = None # type: ignore\n self.metadata = MetaData()\n\n def can_push(self, operator: Node, types: set = None) -> bool:\n if super().can_push(operator, types):\n return True\n return operator.condition.node_type == NodeType.UNARY_OPERATOR\n\n def read_dataset( # type:ignore\n self,\n *,\n columns: list = None,\n predicates: list = None,\n chunk_size: int = INITIAL_CHUNK_SIZE, # type:ignore\n ) -> Generator[pyarrow.Table, None, None]: # type:ignore\n from sqlalchemy.sql import text\n\n self.chunk_size = chunk_size\n result_schema = self.schema\n\n query_builder = Query().FROM(self.dataset)\n\n # Update the SQL and the target morsel schema if we've pushed a projection\n if columns:\n column_names = [col.name for col in columns]\n query_builder.add(\"SELECT\", *column_names)\n result_schema.columns = [ # type:ignore\n col for col in self.schema.columns if col.name in column_names # type:ignore\n ]\n elif self.schema.columns: # type:ignore\n query_builder.add(\"SELECT\", \"*\")\n else:\n query_builder.add(\"SELECT\", \"1\")\n self.schema.columns.append(ConstantColumn(name=\"1\", value=1)) # type:ignore\n\n # Update SQL if we've pushed predicates\n parameters: dict = {}\n for predicate in predicates:\n if predicate.node_type == NodeType.UNARY_OPERATOR:\n operand = predicate.centre.current_name\n operator = self.OPS_XLAT[predicate.value]\n\n query_builder.WHERE(f\"{operand} {operator}\")\n else:\n left_operand = predicate.left\n right_operand = predicate.right\n operator = self.OPS_XLAT[predicate.value]\n\n left_value, parameters = _handle_operand(left_operand, parameters)\n right_value, parameters = _handle_operand(right_operand, parameters)\n\n query_builder.WHERE(f\"{left_value} {operator} {right_value}\")\n\n at_least_once = False\n\n convert_time = 0.0\n\n with self._engine.connect() as conn:\n # DEBUG: log (\"READ DATASET\\n\", str(query_builder))\n # DEBUG: log (\"PARAMETERS\\n\", parameters)\n # Execution Options allows us to handle datasets larger than memory\n result = conn.execution_options(stream_results=True, max_row_buffer=10000).execute(\n text(str(query_builder)), parameters=parameters\n )\n\n while True:\n batch_rows = result.fetchmany(self.chunk_size)\n if not batch_rows:\n break\n\n # convert the SqlAlchemy Results to Arrow using Orso\n b = time.monotonic_ns()\n morsel = DataFrame(schema=result_schema, rows=batch_rows).arrow()\n convert_time += time.monotonic_ns() - b\n yield morsel\n at_least_once = True\n\n # Dynamically adjust chunk size based on the data size, we start by downloading\n # 500 records to get an idea of the row size, assuming these 500 are\n # representative, we work out how many rows fit into 16Mb (check setting).\n # Don't keep recalculating, this is not a cheap operation and it's predicting\n # the future so isn't going to ever be 100% correct\n if self.chunk_size == INITIAL_CHUNK_SIZE and morsel.nbytes > 0:\n self.chunk_size = int(len(morsel) // (morsel.nbytes / DEFAULT_MORSEL_SIZE)) + 1\n self.chunk_size = (self.chunk_size // MIN_CHUNK_SIZE) * MIN_CHUNK_SIZE\n self.chunk_size = max(self.chunk_size, MIN_CHUNK_SIZE)\n # DEBUG: log (f\"CHANGING CHUNK SIZE TO {self.chunk_size} was {INITIAL_CHUNK_SIZE}.\")\n\n if not at_least_once:\n yield DataFrame(schema=result_schema).arrow()\n\n # DEBUG: log (f\"time spent converting: {convert_time/1e9}s\")\n\n def get_dataset_schema(self) -> RelationSchema:\n from sqlalchemy import Table\n\n if self.schema:\n return self.schema\n\n # Try to read the schema from the metastore\n self.schema = self.read_schema_from_metastore()\n if self.schema:\n return self.schema\n\n # get the schema from the dataset\n # DEBUG: log (\"GET SQL SCHEMA:\", self.dataset)\n try:\n table = Table(self.dataset, self.metadata, autoload_with=self._engine)\n\n self.schema = RelationSchema(\n name=table.name,\n columns=[\n FlatColumn(\n name=column.name,\n type=PYTHON_TO_ORSO_MAP[column.type.python_type],\n precision=(\n None\n if column.type.python_type != Decimal\n else column.type.precision # type:ignore\n ),\n scale=(\n None\n if column.type.python_type != Decimal\n else column.type.scale # type:ignore\n ),\n nullable=column.nullable,\n )\n for column in table.columns\n ],\n )\n except Exception as err:\n # Fall back to getting the schema from the first row, this is the column names, and where\n # possible, column types.\n # DEBUG: log (f\"APPROXIMATING SCHEMA OF {self.dataset} BECAUSE OF {type(err).__name__}({err})\")\n from sqlalchemy.sql import text\n\n with self._engine.connect() as conn:\n query = Query().SELECT(\"*\").FROM(self.dataset).LIMIT(\"1\")\n # DEBUG: log (\"READ ROW\\n\", str(query))\n row = conn.execute(text(str(query))).fetchone()._asdict()\n # DEBUG: log (\"ROW:\", row)\n self.schema = RelationSchema(\n name=self.dataset,\n columns=[\n FlatColumn(\n name=column,\n type=0 if value is None else PYTHON_TO_ORSO_MAP[type(value)],\n )\n for column, value in row.items()\n ],\n )\n\n return self.schema\n", "path": "opteryx/connectors/sql_connector.py"}, {"content": "__build__ = 340\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 13\n_revision = 4\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}]} | 3,891 | 474 |
gh_patches_debug_9502 | rasdani/github-patches | git_diff | redis__redis-py-2112 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support CASESENSITIVE tag in Tag Field
link: https://oss.redis.com/redisearch/Commands/#ftcreate
</issue>
<code>
[start of redis/commands/search/field.py]
1 from typing import List
2
3 from redis import DataError
4
5
6 class Field:
7
8 NUMERIC = "NUMERIC"
9 TEXT = "TEXT"
10 WEIGHT = "WEIGHT"
11 GEO = "GEO"
12 TAG = "TAG"
13 VECTOR = "VECTOR"
14 SORTABLE = "SORTABLE"
15 NOINDEX = "NOINDEX"
16 AS = "AS"
17
18 def __init__(
19 self,
20 name: str,
21 args: List[str] = None,
22 sortable: bool = False,
23 no_index: bool = False,
24 as_name: str = None,
25 ):
26 if args is None:
27 args = []
28 self.name = name
29 self.args = args
30 self.args_suffix = list()
31 self.as_name = as_name
32
33 if sortable:
34 self.args_suffix.append(Field.SORTABLE)
35 if no_index:
36 self.args_suffix.append(Field.NOINDEX)
37
38 if no_index and not sortable:
39 raise ValueError("Non-Sortable non-Indexable fields are ignored")
40
41 def append_arg(self, value):
42 self.args.append(value)
43
44 def redis_args(self):
45 args = [self.name]
46 if self.as_name:
47 args += [self.AS, self.as_name]
48 args += self.args
49 args += self.args_suffix
50 return args
51
52
53 class TextField(Field):
54 """
55 TextField is used to define a text field in a schema definition
56 """
57
58 NOSTEM = "NOSTEM"
59 PHONETIC = "PHONETIC"
60
61 def __init__(
62 self,
63 name: str,
64 weight: float = 1.0,
65 no_stem: bool = False,
66 phonetic_matcher: str = None,
67 **kwargs,
68 ):
69 Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
70
71 if no_stem:
72 Field.append_arg(self, self.NOSTEM)
73 if phonetic_matcher and phonetic_matcher in [
74 "dm:en",
75 "dm:fr",
76 "dm:pt",
77 "dm:es",
78 ]:
79 Field.append_arg(self, self.PHONETIC)
80 Field.append_arg(self, phonetic_matcher)
81
82
83 class NumericField(Field):
84 """
85 NumericField is used to define a numeric field in a schema definition
86 """
87
88 def __init__(self, name: str, **kwargs):
89 Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)
90
91
92 class GeoField(Field):
93 """
94 GeoField is used to define a geo-indexing field in a schema definition
95 """
96
97 def __init__(self, name: str, **kwargs):
98 Field.__init__(self, name, args=[Field.GEO], **kwargs)
99
100
101 class TagField(Field):
102 """
103 TagField is a tag-indexing field with simpler compression and tokenization.
104 See http://redisearch.io/Tags/
105 """
106
107 SEPARATOR = "SEPARATOR"
108
109 def __init__(self, name: str, separator: str = ",", **kwargs):
110 Field.__init__(
111 self, name, args=[Field.TAG, self.SEPARATOR, separator], **kwargs
112 )
113
114
115 class VectorField(Field):
116 """
117 Allows vector similarity queries against the value in this attribute.
118 See https://oss.redis.com/redisearch/Vectors/#vector_fields.
119 """
120
121 def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):
122 """
123 Create Vector Field. Notice that Vector cannot have sortable or no_index tag,
124 although it's also a Field.
125
126 ``name`` is the name of the field.
127
128 ``algorithm`` can be "FLAT" or "HNSW".
129
130 ``attributes`` each algorithm can have specific attributes. Some of them
131 are mandatory and some of them are optional. See
132 https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm
133 for more information.
134 """
135 sort = kwargs.get("sortable", False)
136 noindex = kwargs.get("no_index", False)
137
138 if sort or noindex:
139 raise DataError("Cannot set 'sortable' or 'no_index' in Vector fields.")
140
141 if algorithm.upper() not in ["FLAT", "HNSW"]:
142 raise DataError(
143 "Realtime vector indexing supporting 2 Indexing Methods:"
144 "'FLAT' and 'HNSW'."
145 )
146
147 attr_li = []
148
149 for key, value in attributes.items():
150 attr_li.extend([key, value])
151
152 Field.__init__(
153 self,
154 name,
155 args=[Field.VECTOR, algorithm, len(attr_li), *attr_li],
156 **kwargs,
157 )
158
[end of redis/commands/search/field.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py
--- a/redis/commands/search/field.py
+++ b/redis/commands/search/field.py
@@ -105,11 +105,20 @@
"""
SEPARATOR = "SEPARATOR"
+ CASESENSITIVE = "CASESENSITIVE"
- def __init__(self, name: str, separator: str = ",", **kwargs):
- Field.__init__(
- self, name, args=[Field.TAG, self.SEPARATOR, separator], **kwargs
- )
+ def __init__(
+ self,
+ name: str,
+ separator: str = ",",
+ case_sensitive: bool = False,
+ **kwargs,
+ ):
+ args = [Field.TAG, self.SEPARATOR, separator]
+ if case_sensitive:
+ args.append(self.CASESENSITIVE)
+
+ Field.__init__(self, name, args=args, **kwargs)
class VectorField(Field):
| {"golden_diff": "diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py\n--- a/redis/commands/search/field.py\n+++ b/redis/commands/search/field.py\n@@ -105,11 +105,20 @@\n \"\"\"\n \n SEPARATOR = \"SEPARATOR\"\n+ CASESENSITIVE = \"CASESENSITIVE\"\n \n- def __init__(self, name: str, separator: str = \",\", **kwargs):\n- Field.__init__(\n- self, name, args=[Field.TAG, self.SEPARATOR, separator], **kwargs\n- )\n+ def __init__(\n+ self,\n+ name: str,\n+ separator: str = \",\",\n+ case_sensitive: bool = False,\n+ **kwargs,\n+ ):\n+ args = [Field.TAG, self.SEPARATOR, separator]\n+ if case_sensitive:\n+ args.append(self.CASESENSITIVE)\n+\n+ Field.__init__(self, name, args=args, **kwargs)\n \n \n class VectorField(Field):\n", "issue": "Support CASESENSITIVE tag in Tag Field \nlink: https://oss.redis.com/redisearch/Commands/#ftcreate\n", "before_files": [{"content": "from typing import List\n\nfrom redis import DataError\n\n\nclass Field:\n\n NUMERIC = \"NUMERIC\"\n TEXT = \"TEXT\"\n WEIGHT = \"WEIGHT\"\n GEO = \"GEO\"\n TAG = \"TAG\"\n VECTOR = \"VECTOR\"\n SORTABLE = \"SORTABLE\"\n NOINDEX = \"NOINDEX\"\n AS = \"AS\"\n\n def __init__(\n self,\n name: str,\n args: List[str] = None,\n sortable: bool = False,\n no_index: bool = False,\n as_name: str = None,\n ):\n if args is None:\n args = []\n self.name = name\n self.args = args\n self.args_suffix = list()\n self.as_name = as_name\n\n if sortable:\n self.args_suffix.append(Field.SORTABLE)\n if no_index:\n self.args_suffix.append(Field.NOINDEX)\n\n if no_index and not sortable:\n raise ValueError(\"Non-Sortable non-Indexable fields are ignored\")\n\n def append_arg(self, value):\n self.args.append(value)\n\n def redis_args(self):\n args = [self.name]\n if self.as_name:\n args += [self.AS, self.as_name]\n args += self.args\n args += self.args_suffix\n return args\n\n\nclass TextField(Field):\n \"\"\"\n TextField is used to define a text field in a schema definition\n \"\"\"\n\n NOSTEM = \"NOSTEM\"\n PHONETIC = \"PHONETIC\"\n\n def __init__(\n self,\n name: str,\n weight: float = 1.0,\n no_stem: bool = False,\n phonetic_matcher: str = None,\n **kwargs,\n ):\n Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)\n\n if no_stem:\n Field.append_arg(self, self.NOSTEM)\n if phonetic_matcher and phonetic_matcher in [\n \"dm:en\",\n \"dm:fr\",\n \"dm:pt\",\n \"dm:es\",\n ]:\n Field.append_arg(self, self.PHONETIC)\n Field.append_arg(self, phonetic_matcher)\n\n\nclass NumericField(Field):\n \"\"\"\n NumericField is used to define a numeric field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)\n\n\nclass GeoField(Field):\n \"\"\"\n GeoField is used to define a geo-indexing field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.GEO], **kwargs)\n\n\nclass TagField(Field):\n \"\"\"\n TagField is a tag-indexing field with simpler compression and tokenization.\n See http://redisearch.io/Tags/\n \"\"\"\n\n SEPARATOR = \"SEPARATOR\"\n\n def __init__(self, name: str, separator: str = \",\", **kwargs):\n Field.__init__(\n self, name, args=[Field.TAG, self.SEPARATOR, separator], **kwargs\n )\n\n\nclass VectorField(Field):\n \"\"\"\n Allows vector similarity queries against the value in this attribute.\n See https://oss.redis.com/redisearch/Vectors/#vector_fields.\n \"\"\"\n\n def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):\n \"\"\"\n Create Vector Field. Notice that Vector cannot have sortable or no_index tag,\n although it's also a Field.\n\n ``name`` is the name of the field.\n\n ``algorithm`` can be \"FLAT\" or \"HNSW\".\n\n ``attributes`` each algorithm can have specific attributes. Some of them\n are mandatory and some of them are optional. See\n https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm\n for more information.\n \"\"\"\n sort = kwargs.get(\"sortable\", False)\n noindex = kwargs.get(\"no_index\", False)\n\n if sort or noindex:\n raise DataError(\"Cannot set 'sortable' or 'no_index' in Vector fields.\")\n\n if algorithm.upper() not in [\"FLAT\", \"HNSW\"]:\n raise DataError(\n \"Realtime vector indexing supporting 2 Indexing Methods:\"\n \"'FLAT' and 'HNSW'.\"\n )\n\n attr_li = []\n\n for key, value in attributes.items():\n attr_li.extend([key, value])\n\n Field.__init__(\n self,\n name,\n args=[Field.VECTOR, algorithm, len(attr_li), *attr_li],\n **kwargs,\n )\n", "path": "redis/commands/search/field.py"}]} | 1,958 | 230 |
gh_patches_debug_61829 | rasdani/github-patches | git_diff | pulp__pulpcore-4010 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RESTAPI document fix for Upstream Pulp Replication API
**Version**
Pulp installed through the Python modules.
"core:3.28.0"
"certguard:3.28.0"
"file:3.28.0"
"python:3.28.0"
"rpm:3.28.0"
**Describe the bug**
Why the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request "https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/"?
**To Reproduce**
None.
**Expected behavior**
A fix is required in the REST API document.
**Additional context**
Create Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create
Upstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate
</issue>
<code>
[start of pulpcore/app/viewsets/replica.py]
1 """
2 ViewSet for replicating repositories and distributions from an upstream Pulp
3 """
4 from django.conf import settings
5 from drf_spectacular.utils import extend_schema
6 from rest_framework import mixins
7 from rest_framework.decorators import action
8
9 from pulpcore.app.models import TaskGroup, UpstreamPulp
10 from pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer
11 from pulpcore.app.viewsets import NamedModelViewSet
12 from pulpcore.app.response import TaskGroupOperationResponse
13 from pulpcore.app.tasks import replicate_distributions
14 from pulpcore.tasking.tasks import dispatch
15
16
17 class UpstreamPulpViewSet(
18 NamedModelViewSet,
19 mixins.CreateModelMixin,
20 mixins.RetrieveModelMixin,
21 mixins.ListModelMixin,
22 mixins.DestroyModelMixin,
23 mixins.UpdateModelMixin,
24 ):
25 """API for configuring an upstream Pulp to replicate. This API is provided as a tech preview."""
26
27 queryset = UpstreamPulp.objects.all()
28 endpoint_name = "upstream-pulps"
29 serializer_class = UpstreamPulpSerializer
30 ordering = "-pulp_created"
31
32 @extend_schema(
33 summary="Replicate",
34 description="Trigger an asynchronous repository replication task group. This API is "
35 "provided as a tech preview.",
36 responses={202: AsyncOperationResponseSerializer},
37 )
38 @action(detail=True, methods=["post"])
39 def replicate(self, request, pk):
40 """
41 Triggers an asynchronous repository replication operation.
42 """
43 server = UpstreamPulp.objects.get(pk=pk)
44 task_group = TaskGroup.objects.create(description=f"Replication of {server.name}")
45
46 uri = "/api/v3/servers/"
47 if settings.DOMAIN_ENABLED:
48 uri = f"/{request.domain.name}{uri}"
49
50 dispatch(
51 replicate_distributions,
52 exclusive_resources=[uri],
53 kwargs={"server_pk": pk},
54 task_group=task_group,
55 )
56
57 return TaskGroupOperationResponse(task_group, request)
58
[end of pulpcore/app/viewsets/replica.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py
--- a/pulpcore/app/viewsets/replica.py
+++ b/pulpcore/app/viewsets/replica.py
@@ -33,6 +33,7 @@
summary="Replicate",
description="Trigger an asynchronous repository replication task group. This API is "
"provided as a tech preview.",
+ request=None,
responses={202: AsyncOperationResponseSerializer},
)
@action(detail=True, methods=["post"])
| {"golden_diff": "diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py\n--- a/pulpcore/app/viewsets/replica.py\n+++ b/pulpcore/app/viewsets/replica.py\n@@ -33,6 +33,7 @@\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n+ request=None,\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n", "issue": "RESTAPI document fix for Upstream Pulp Replication API\n**Version**\r\nPulp installed through the Python modules.\r\n\"core:3.28.0\"\r\n\"certguard:3.28.0\"\r\n\"file:3.28.0\"\r\n\"python:3.28.0\"\r\n\"rpm:3.28.0\"\r\n\r\n**Describe the bug**\r\nWhy the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate\" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request \"https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/\"?\r\n\r\n**To Reproduce**\r\nNone.\r\n\r\n**Expected behavior**\r\nA fix is required in the REST API document.\r\n\r\n**Additional context**\r\nCreate Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create\r\nUpstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nViewSet for replicating repositories and distributions from an upstream Pulp\n\"\"\"\nfrom django.conf import settings\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\nfrom rest_framework.decorators import action\n\nfrom pulpcore.app.models import TaskGroup, UpstreamPulp\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer\nfrom pulpcore.app.viewsets import NamedModelViewSet\nfrom pulpcore.app.response import TaskGroupOperationResponse\nfrom pulpcore.app.tasks import replicate_distributions\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass UpstreamPulpViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"API for configuring an upstream Pulp to replicate. This API is provided as a tech preview.\"\"\"\n\n queryset = UpstreamPulp.objects.all()\n endpoint_name = \"upstream-pulps\"\n serializer_class = UpstreamPulpSerializer\n ordering = \"-pulp_created\"\n\n @extend_schema(\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n def replicate(self, request, pk):\n \"\"\"\n Triggers an asynchronous repository replication operation.\n \"\"\"\n server = UpstreamPulp.objects.get(pk=pk)\n task_group = TaskGroup.objects.create(description=f\"Replication of {server.name}\")\n\n uri = \"/api/v3/servers/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.domain.name}{uri}\"\n\n dispatch(\n replicate_distributions,\n exclusive_resources=[uri],\n kwargs={\"server_pk\": pk},\n task_group=task_group,\n )\n\n return TaskGroupOperationResponse(task_group, request)\n", "path": "pulpcore/app/viewsets/replica.py"}]} | 1,328 | 122 |
gh_patches_debug_44395 | rasdani/github-patches | git_diff | mkdocs__mkdocs-2477 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mkdocs 1.2.x livereload no longer working
mkdocs==1.2 and latest no longer works
mkdocs==1.1.2 works
MacOS 11.4
Docker version 20.10.7, build f0df350
python:3.8-alpine (3.9.5 and 10-rc same behavior)
I checked the Docker container and the markdown is updating but the generated html is not. This seems to be associated with the 1.2 change with watcher.
LMK what logs/config/diag I can provide to help RCA the issue.
</issue>
<code>
[start of mkdocs/livereload/__init__.py]
1 import functools
2 import io
3 import logging
4 import mimetypes
5 import os
6 import os.path
7 import pathlib
8 import re
9 import socketserver
10 import threading
11 import time
12 import warnings
13 import wsgiref.simple_server
14
15 import watchdog.events
16 import watchdog.observers
17
18
19 class _LoggerAdapter(logging.LoggerAdapter):
20 def process(self, msg, kwargs):
21 return time.strftime("[%H:%M:%S] ") + msg, kwargs
22
23
24 log = _LoggerAdapter(logging.getLogger(__name__), {})
25
26
27 class LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):
28 daemon_threads = True
29 poll_response_timeout = 60
30
31 def __init__(
32 self,
33 builder,
34 host,
35 port,
36 root,
37 mount_path="/",
38 build_delay=0.25,
39 shutdown_delay=0.25,
40 **kwargs,
41 ):
42 self.builder = builder
43 self.server_name = host
44 self.server_port = port
45 self.root = os.path.abspath(root)
46 self.mount_path = ("/" + mount_path.lstrip("/")).rstrip("/") + "/"
47 self.url = f"http://{self.server_name}:{self.server_port}{self.mount_path}"
48 self.build_delay = build_delay
49 self.shutdown_delay = shutdown_delay
50 # To allow custom error pages.
51 self.error_handler = lambda code: None
52
53 super().__init__((host, port), _Handler, **kwargs)
54 self.set_app(self.serve_request)
55
56 self._wanted_epoch = _timestamp() # The version of the site that started building.
57 self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.
58 self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.
59
60 self._to_rebuild = {} # Used as an ordered set of functions to call.
61 self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild.
62
63 self._shutdown = False
64 self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))
65 self.observer = watchdog.observers.Observer(timeout=shutdown_delay)
66
67 def watch(self, path, func=None, recursive=True):
68 """Add the 'path' to watched paths, call the function and reload when any file changes under it."""
69 path = os.path.abspath(path)
70 if func in (None, self.builder):
71 func = self.builder
72 else:
73 warnings.warn(
74 "Plugins should not pass the 'func' parameter of watch(). "
75 "The ability to execute custom callbacks will be removed soon.",
76 DeprecationWarning,
77 stacklevel=2,
78 )
79
80 def callback(event, allowed_path=None):
81 if isinstance(event, watchdog.events.DirCreatedEvent):
82 return
83 if allowed_path is not None and event.src_path != allowed_path:
84 return
85 # Text editors always cause a "file close" event in addition to "modified" when saving
86 # a file. Some editors also have "swap" functionality that keeps writing into another
87 # file that's never closed. Prevent such write events from causing a rebuild.
88 if isinstance(event, watchdog.events.FileModifiedEvent):
89 # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:
90 if type(self.observer).__name__ == "InotifyObserver":
91 return
92 log.debug(str(event))
93 with self._rebuild_cond:
94 self._to_rebuild[func] = True
95 self._rebuild_cond.notify_all()
96
97 dir_handler = watchdog.events.FileSystemEventHandler()
98 dir_handler.on_any_event = callback
99
100 seen = set()
101
102 def schedule(path):
103 seen.add(path)
104 if path.is_file():
105 # Watchdog doesn't support watching files, so watch its directory and filter by path
106 handler = watchdog.events.FileSystemEventHandler()
107 handler.on_any_event = lambda event: callback(event, allowed_path=os.fspath(path))
108
109 parent = path.parent
110 log.debug(f"Watching file '{path}' through directory '{parent}'")
111 self.observer.schedule(handler, parent)
112 else:
113 log.debug(f"Watching directory '{path}'")
114 self.observer.schedule(dir_handler, path, recursive=recursive)
115
116 schedule(pathlib.Path(path).resolve())
117
118 def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path
119 if path_obj.is_symlink():
120 path_obj = pathlib.Path(path_obj).resolve()
121 if path_obj in seen or not path_obj.exists():
122 return
123 schedule(path_obj)
124
125 if path_obj.is_dir() and recursive:
126 with os.scandir(os.fspath(path_obj)) as scan:
127 for entry in scan:
128 watch_symlink_targets(entry)
129
130 watch_symlink_targets(pathlib.Path(path))
131
132 def serve(self):
133 self.observer.start()
134
135 log.info(f"Serving on {self.url}")
136 self.serve_thread.start()
137
138 self._build_loop()
139
140 def _build_loop(self):
141 while True:
142 with self._rebuild_cond:
143 while not self._rebuild_cond.wait_for(
144 lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay
145 ):
146 # We could have used just one wait instead of a loop + timeout, but we need
147 # occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.
148 pass
149 if self._shutdown:
150 break
151 log.info("Detected file changes")
152 while self._rebuild_cond.wait(timeout=self.build_delay):
153 log.debug("Waiting for file changes to stop happening")
154
155 self._wanted_epoch = _timestamp()
156 funcs = list(self._to_rebuild)
157 self._to_rebuild.clear()
158
159 for func in funcs:
160 func()
161
162 with self._epoch_cond:
163 log.info("Reloading browsers")
164 self._visible_epoch = self._wanted_epoch
165 self._epoch_cond.notify_all()
166
167 def shutdown(self):
168 self.observer.stop()
169 with self._rebuild_cond:
170 self._shutdown = True
171 self._rebuild_cond.notify_all()
172
173 if self.serve_thread.is_alive():
174 super().shutdown()
175 self.serve_thread.join()
176 self.observer.join()
177
178 def serve_request(self, environ, start_response):
179 try:
180 result = self._serve_request(environ, start_response)
181 except Exception:
182 code = 500
183 msg = "500 Internal Server Error"
184 log.exception(msg)
185 else:
186 if result is not None:
187 return result
188 code = 404
189 msg = "404 Not Found"
190
191 error_content = None
192 try:
193 error_content = self.error_handler(code)
194 except Exception:
195 log.exception("Failed to render an error message!")
196 if error_content is None:
197 error_content = msg.encode()
198
199 start_response(msg, [("Content-Type", "text/html")])
200 return [error_content]
201
202 def _serve_request(self, environ, start_response):
203 path = environ["PATH_INFO"]
204
205 m = re.fullmatch(r"/livereload/([0-9]+)/[0-9]+", path)
206 if m:
207 epoch = int(m[1])
208 start_response("200 OK", [("Content-Type", "text/plain")])
209
210 def condition():
211 return self._visible_epoch > epoch
212
213 with self._epoch_cond:
214 if not condition():
215 # Stall the browser, respond as soon as there's something new.
216 # If there's not, respond anyway after a minute.
217 self._log_poll_request(environ.get("HTTP_REFERER"), request_id=path)
218 self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)
219 return [b"%d" % self._visible_epoch]
220
221 if path == "/js/livereload.js":
222 file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "livereload.js")
223 elif path.startswith(self.mount_path):
224 if path.endswith("/"):
225 path += "index.html"
226 path = path[len(self.mount_path):]
227 file_path = os.path.join(self.root, path.lstrip("/"))
228 elif path == "/":
229 start_response("302 Found", [("Location", self.mount_path)])
230 return []
231 else:
232 return None # Not found
233
234 # Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.
235 with self._epoch_cond:
236 self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)
237 epoch = self._visible_epoch
238
239 try:
240 file = open(file_path, "rb")
241 except OSError:
242 return None # Not found
243
244 if path.endswith(".html"):
245 with file:
246 content = file.read()
247 content = self._inject_js_into_html(content, epoch)
248 file = io.BytesIO(content)
249 content_length = len(content)
250 else:
251 content_length = os.path.getsize(file_path)
252
253 content_type = self._guess_type(file_path)
254 start_response(
255 "200 OK", [("Content-Type", content_type), ("Content-Length", str(content_length))]
256 )
257 return wsgiref.util.FileWrapper(file)
258
259 @classmethod
260 def _inject_js_into_html(cls, content, epoch):
261 try:
262 body_end = content.rindex(b"</body>")
263 except ValueError:
264 body_end = len(content)
265 # The page will reload if the livereload poller returns a newer epoch than what it knows.
266 # The other timestamp becomes just a unique identifier for the initiating page.
267 return (
268 b'%b<script src="/js/livereload.js"></script><script>livereload(%d, %d);</script>%b'
269 % (content[:body_end], epoch, _timestamp(), content[body_end:])
270 )
271
272 @classmethod
273 @functools.lru_cache() # "Cache" to not repeat the same message for the same browser tab.
274 def _log_poll_request(cls, url, request_id):
275 log.info(f"Browser connected: {url}")
276
277 def _guess_type(cls, path):
278 # MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).
279 # Other uncommon types will not be accepted.
280 if path.endswith((".js", ".JS")):
281 return "application/javascript"
282 if path.endswith(".gz"):
283 return "application/gzip"
284
285 guess, _ = mimetypes.guess_type(path)
286 if guess:
287 return guess
288 return "application/octet-stream"
289
290
291 class _Handler(wsgiref.simple_server.WSGIRequestHandler):
292 def log_request(self, code="-", size="-"):
293 level = logging.DEBUG if str(code) == "200" else logging.WARNING
294 log.log(level, f'"{self.requestline}" code {code}')
295
296 def log_message(self, format, *args):
297 log.debug(format, *args)
298
299
300 def _timestamp():
301 return round(time.monotonic() * 1000)
302
[end of mkdocs/livereload/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkdocs/livereload/__init__.py b/mkdocs/livereload/__init__.py
--- a/mkdocs/livereload/__init__.py
+++ b/mkdocs/livereload/__init__.py
@@ -4,7 +4,6 @@
import mimetypes
import os
import os.path
-import pathlib
import re
import socketserver
import threading
@@ -13,7 +12,7 @@
import wsgiref.simple_server
import watchdog.events
-import watchdog.observers
+import watchdog.observers.polling
class _LoggerAdapter(logging.LoggerAdapter):
@@ -35,7 +34,7 @@
port,
root,
mount_path="/",
- build_delay=0.25,
+ polling_interval=0.5,
shutdown_delay=0.25,
**kwargs,
):
@@ -45,7 +44,7 @@
self.root = os.path.abspath(root)
self.mount_path = ("/" + mount_path.lstrip("/")).rstrip("/") + "/"
self.url = f"http://{self.server_name}:{self.server_port}{self.mount_path}"
- self.build_delay = build_delay
+ self.build_delay = 0.1
self.shutdown_delay = shutdown_delay
# To allow custom error pages.
self.error_handler = lambda code: None
@@ -62,7 +61,7 @@
self._shutdown = False
self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))
- self.observer = watchdog.observers.Observer(timeout=shutdown_delay)
+ self.observer = watchdog.observers.polling.PollingObserver(timeout=polling_interval)
def watch(self, path, func=None, recursive=True):
"""Add the 'path' to watched paths, call the function and reload when any file changes under it."""
@@ -77,57 +76,18 @@
stacklevel=2,
)
- def callback(event, allowed_path=None):
- if isinstance(event, watchdog.events.DirCreatedEvent):
+ def callback(event):
+ if event.is_directory:
return
- if allowed_path is not None and event.src_path != allowed_path:
- return
- # Text editors always cause a "file close" event in addition to "modified" when saving
- # a file. Some editors also have "swap" functionality that keeps writing into another
- # file that's never closed. Prevent such write events from causing a rebuild.
- if isinstance(event, watchdog.events.FileModifiedEvent):
- # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:
- if type(self.observer).__name__ == "InotifyObserver":
- return
log.debug(str(event))
with self._rebuild_cond:
self._to_rebuild[func] = True
self._rebuild_cond.notify_all()
- dir_handler = watchdog.events.FileSystemEventHandler()
- dir_handler.on_any_event = callback
-
- seen = set()
-
- def schedule(path):
- seen.add(path)
- if path.is_file():
- # Watchdog doesn't support watching files, so watch its directory and filter by path
- handler = watchdog.events.FileSystemEventHandler()
- handler.on_any_event = lambda event: callback(event, allowed_path=os.fspath(path))
-
- parent = path.parent
- log.debug(f"Watching file '{path}' through directory '{parent}'")
- self.observer.schedule(handler, parent)
- else:
- log.debug(f"Watching directory '{path}'")
- self.observer.schedule(dir_handler, path, recursive=recursive)
-
- schedule(pathlib.Path(path).resolve())
-
- def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path
- if path_obj.is_symlink():
- path_obj = pathlib.Path(path_obj).resolve()
- if path_obj in seen or not path_obj.exists():
- return
- schedule(path_obj)
-
- if path_obj.is_dir() and recursive:
- with os.scandir(os.fspath(path_obj)) as scan:
- for entry in scan:
- watch_symlink_targets(entry)
-
- watch_symlink_targets(pathlib.Path(path))
+ handler = watchdog.events.FileSystemEventHandler()
+ handler.on_any_event = callback
+ log.debug(f"Watching '{path}'")
+ self.observer.schedule(handler, path, recursive=recursive)
def serve(self):
self.observer.start()
| {"golden_diff": "diff --git a/mkdocs/livereload/__init__.py b/mkdocs/livereload/__init__.py\n--- a/mkdocs/livereload/__init__.py\n+++ b/mkdocs/livereload/__init__.py\n@@ -4,7 +4,6 @@\n import mimetypes\n import os\n import os.path\n-import pathlib\n import re\n import socketserver\n import threading\n@@ -13,7 +12,7 @@\n import wsgiref.simple_server\n \n import watchdog.events\n-import watchdog.observers\n+import watchdog.observers.polling\n \n \n class _LoggerAdapter(logging.LoggerAdapter):\n@@ -35,7 +34,7 @@\n port,\n root,\n mount_path=\"/\",\n- build_delay=0.25,\n+ polling_interval=0.5,\n shutdown_delay=0.25,\n **kwargs,\n ):\n@@ -45,7 +44,7 @@\n self.root = os.path.abspath(root)\n self.mount_path = (\"/\" + mount_path.lstrip(\"/\")).rstrip(\"/\") + \"/\"\n self.url = f\"http://{self.server_name}:{self.server_port}{self.mount_path}\"\n- self.build_delay = build_delay\n+ self.build_delay = 0.1\n self.shutdown_delay = shutdown_delay\n # To allow custom error pages.\n self.error_handler = lambda code: None\n@@ -62,7 +61,7 @@\n \n self._shutdown = False\n self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))\n- self.observer = watchdog.observers.Observer(timeout=shutdown_delay)\n+ self.observer = watchdog.observers.polling.PollingObserver(timeout=polling_interval)\n \n def watch(self, path, func=None, recursive=True):\n \"\"\"Add the 'path' to watched paths, call the function and reload when any file changes under it.\"\"\"\n@@ -77,57 +76,18 @@\n stacklevel=2,\n )\n \n- def callback(event, allowed_path=None):\n- if isinstance(event, watchdog.events.DirCreatedEvent):\n+ def callback(event):\n+ if event.is_directory:\n return\n- if allowed_path is not None and event.src_path != allowed_path:\n- return\n- # Text editors always cause a \"file close\" event in addition to \"modified\" when saving\n- # a file. Some editors also have \"swap\" functionality that keeps writing into another\n- # file that's never closed. Prevent such write events from causing a rebuild.\n- if isinstance(event, watchdog.events.FileModifiedEvent):\n- # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:\n- if type(self.observer).__name__ == \"InotifyObserver\":\n- return\n log.debug(str(event))\n with self._rebuild_cond:\n self._to_rebuild[func] = True\n self._rebuild_cond.notify_all()\n \n- dir_handler = watchdog.events.FileSystemEventHandler()\n- dir_handler.on_any_event = callback\n-\n- seen = set()\n-\n- def schedule(path):\n- seen.add(path)\n- if path.is_file():\n- # Watchdog doesn't support watching files, so watch its directory and filter by path\n- handler = watchdog.events.FileSystemEventHandler()\n- handler.on_any_event = lambda event: callback(event, allowed_path=os.fspath(path))\n-\n- parent = path.parent\n- log.debug(f\"Watching file '{path}' through directory '{parent}'\")\n- self.observer.schedule(handler, parent)\n- else:\n- log.debug(f\"Watching directory '{path}'\")\n- self.observer.schedule(dir_handler, path, recursive=recursive)\n-\n- schedule(pathlib.Path(path).resolve())\n-\n- def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path\n- if path_obj.is_symlink():\n- path_obj = pathlib.Path(path_obj).resolve()\n- if path_obj in seen or not path_obj.exists():\n- return\n- schedule(path_obj)\n-\n- if path_obj.is_dir() and recursive:\n- with os.scandir(os.fspath(path_obj)) as scan:\n- for entry in scan:\n- watch_symlink_targets(entry)\n-\n- watch_symlink_targets(pathlib.Path(path))\n+ handler = watchdog.events.FileSystemEventHandler()\n+ handler.on_any_event = callback\n+ log.debug(f\"Watching '{path}'\")\n+ self.observer.schedule(handler, path, recursive=recursive)\n \n def serve(self):\n self.observer.start()\n", "issue": "mkdocs 1.2.x livereload no longer working\nmkdocs==1.2 and latest no longer works\r\nmkdocs==1.1.2 works\r\n\r\nMacOS 11.4\r\nDocker version 20.10.7, build f0df350\r\npython:3.8-alpine (3.9.5 and 10-rc same behavior)\r\n\r\nI checked the Docker container and the markdown is updating but the generated html is not. This seems to be associated with the 1.2 change with watcher.\r\n\r\nLMK what logs/config/diag I can provide to help RCA the issue.\n", "before_files": [{"content": "import functools\nimport io\nimport logging\nimport mimetypes\nimport os\nimport os.path\nimport pathlib\nimport re\nimport socketserver\nimport threading\nimport time\nimport warnings\nimport wsgiref.simple_server\n\nimport watchdog.events\nimport watchdog.observers\n\n\nclass _LoggerAdapter(logging.LoggerAdapter):\n def process(self, msg, kwargs):\n return time.strftime(\"[%H:%M:%S] \") + msg, kwargs\n\n\nlog = _LoggerAdapter(logging.getLogger(__name__), {})\n\n\nclass LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):\n daemon_threads = True\n poll_response_timeout = 60\n\n def __init__(\n self,\n builder,\n host,\n port,\n root,\n mount_path=\"/\",\n build_delay=0.25,\n shutdown_delay=0.25,\n **kwargs,\n ):\n self.builder = builder\n self.server_name = host\n self.server_port = port\n self.root = os.path.abspath(root)\n self.mount_path = (\"/\" + mount_path.lstrip(\"/\")).rstrip(\"/\") + \"/\"\n self.url = f\"http://{self.server_name}:{self.server_port}{self.mount_path}\"\n self.build_delay = build_delay\n self.shutdown_delay = shutdown_delay\n # To allow custom error pages.\n self.error_handler = lambda code: None\n\n super().__init__((host, port), _Handler, **kwargs)\n self.set_app(self.serve_request)\n\n self._wanted_epoch = _timestamp() # The version of the site that started building.\n self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.\n self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.\n\n self._to_rebuild = {} # Used as an ordered set of functions to call.\n self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild.\n\n self._shutdown = False\n self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))\n self.observer = watchdog.observers.Observer(timeout=shutdown_delay)\n\n def watch(self, path, func=None, recursive=True):\n \"\"\"Add the 'path' to watched paths, call the function and reload when any file changes under it.\"\"\"\n path = os.path.abspath(path)\n if func in (None, self.builder):\n func = self.builder\n else:\n warnings.warn(\n \"Plugins should not pass the 'func' parameter of watch(). \"\n \"The ability to execute custom callbacks will be removed soon.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n def callback(event, allowed_path=None):\n if isinstance(event, watchdog.events.DirCreatedEvent):\n return\n if allowed_path is not None and event.src_path != allowed_path:\n return\n # Text editors always cause a \"file close\" event in addition to \"modified\" when saving\n # a file. Some editors also have \"swap\" functionality that keeps writing into another\n # file that's never closed. Prevent such write events from causing a rebuild.\n if isinstance(event, watchdog.events.FileModifiedEvent):\n # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:\n if type(self.observer).__name__ == \"InotifyObserver\":\n return\n log.debug(str(event))\n with self._rebuild_cond:\n self._to_rebuild[func] = True\n self._rebuild_cond.notify_all()\n\n dir_handler = watchdog.events.FileSystemEventHandler()\n dir_handler.on_any_event = callback\n\n seen = set()\n\n def schedule(path):\n seen.add(path)\n if path.is_file():\n # Watchdog doesn't support watching files, so watch its directory and filter by path\n handler = watchdog.events.FileSystemEventHandler()\n handler.on_any_event = lambda event: callback(event, allowed_path=os.fspath(path))\n\n parent = path.parent\n log.debug(f\"Watching file '{path}' through directory '{parent}'\")\n self.observer.schedule(handler, parent)\n else:\n log.debug(f\"Watching directory '{path}'\")\n self.observer.schedule(dir_handler, path, recursive=recursive)\n\n schedule(pathlib.Path(path).resolve())\n\n def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path\n if path_obj.is_symlink():\n path_obj = pathlib.Path(path_obj).resolve()\n if path_obj in seen or not path_obj.exists():\n return\n schedule(path_obj)\n\n if path_obj.is_dir() and recursive:\n with os.scandir(os.fspath(path_obj)) as scan:\n for entry in scan:\n watch_symlink_targets(entry)\n\n watch_symlink_targets(pathlib.Path(path))\n\n def serve(self):\n self.observer.start()\n\n log.info(f\"Serving on {self.url}\")\n self.serve_thread.start()\n\n self._build_loop()\n\n def _build_loop(self):\n while True:\n with self._rebuild_cond:\n while not self._rebuild_cond.wait_for(\n lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay\n ):\n # We could have used just one wait instead of a loop + timeout, but we need\n # occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.\n pass\n if self._shutdown:\n break\n log.info(\"Detected file changes\")\n while self._rebuild_cond.wait(timeout=self.build_delay):\n log.debug(\"Waiting for file changes to stop happening\")\n\n self._wanted_epoch = _timestamp()\n funcs = list(self._to_rebuild)\n self._to_rebuild.clear()\n\n for func in funcs:\n func()\n\n with self._epoch_cond:\n log.info(\"Reloading browsers\")\n self._visible_epoch = self._wanted_epoch\n self._epoch_cond.notify_all()\n\n def shutdown(self):\n self.observer.stop()\n with self._rebuild_cond:\n self._shutdown = True\n self._rebuild_cond.notify_all()\n\n if self.serve_thread.is_alive():\n super().shutdown()\n self.serve_thread.join()\n self.observer.join()\n\n def serve_request(self, environ, start_response):\n try:\n result = self._serve_request(environ, start_response)\n except Exception:\n code = 500\n msg = \"500 Internal Server Error\"\n log.exception(msg)\n else:\n if result is not None:\n return result\n code = 404\n msg = \"404 Not Found\"\n\n error_content = None\n try:\n error_content = self.error_handler(code)\n except Exception:\n log.exception(\"Failed to render an error message!\")\n if error_content is None:\n error_content = msg.encode()\n\n start_response(msg, [(\"Content-Type\", \"text/html\")])\n return [error_content]\n\n def _serve_request(self, environ, start_response):\n path = environ[\"PATH_INFO\"]\n\n m = re.fullmatch(r\"/livereload/([0-9]+)/[0-9]+\", path)\n if m:\n epoch = int(m[1])\n start_response(\"200 OK\", [(\"Content-Type\", \"text/plain\")])\n\n def condition():\n return self._visible_epoch > epoch\n\n with self._epoch_cond:\n if not condition():\n # Stall the browser, respond as soon as there's something new.\n # If there's not, respond anyway after a minute.\n self._log_poll_request(environ.get(\"HTTP_REFERER\"), request_id=path)\n self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)\n return [b\"%d\" % self._visible_epoch]\n\n if path == \"/js/livereload.js\":\n file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"livereload.js\")\n elif path.startswith(self.mount_path):\n if path.endswith(\"/\"):\n path += \"index.html\"\n path = path[len(self.mount_path):]\n file_path = os.path.join(self.root, path.lstrip(\"/\"))\n elif path == \"/\":\n start_response(\"302 Found\", [(\"Location\", self.mount_path)])\n return []\n else:\n return None # Not found\n\n # Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.\n with self._epoch_cond:\n self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)\n epoch = self._visible_epoch\n\n try:\n file = open(file_path, \"rb\")\n except OSError:\n return None # Not found\n\n if path.endswith(\".html\"):\n with file:\n content = file.read()\n content = self._inject_js_into_html(content, epoch)\n file = io.BytesIO(content)\n content_length = len(content)\n else:\n content_length = os.path.getsize(file_path)\n\n content_type = self._guess_type(file_path)\n start_response(\n \"200 OK\", [(\"Content-Type\", content_type), (\"Content-Length\", str(content_length))]\n )\n return wsgiref.util.FileWrapper(file)\n\n @classmethod\n def _inject_js_into_html(cls, content, epoch):\n try:\n body_end = content.rindex(b\"</body>\")\n except ValueError:\n body_end = len(content)\n # The page will reload if the livereload poller returns a newer epoch than what it knows.\n # The other timestamp becomes just a unique identifier for the initiating page.\n return (\n b'%b<script src=\"/js/livereload.js\"></script><script>livereload(%d, %d);</script>%b'\n % (content[:body_end], epoch, _timestamp(), content[body_end:])\n )\n\n @classmethod\n @functools.lru_cache() # \"Cache\" to not repeat the same message for the same browser tab.\n def _log_poll_request(cls, url, request_id):\n log.info(f\"Browser connected: {url}\")\n\n def _guess_type(cls, path):\n # MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).\n # Other uncommon types will not be accepted.\n if path.endswith((\".js\", \".JS\")):\n return \"application/javascript\"\n if path.endswith(\".gz\"):\n return \"application/gzip\"\n\n guess, _ = mimetypes.guess_type(path)\n if guess:\n return guess\n return \"application/octet-stream\"\n\n\nclass _Handler(wsgiref.simple_server.WSGIRequestHandler):\n def log_request(self, code=\"-\", size=\"-\"):\n level = logging.DEBUG if str(code) == \"200\" else logging.WARNING\n log.log(level, f'\"{self.requestline}\" code {code}')\n\n def log_message(self, format, *args):\n log.debug(format, *args)\n\n\ndef _timestamp():\n return round(time.monotonic() * 1000)\n", "path": "mkdocs/livereload/__init__.py"}]} | 3,850 | 985 |
gh_patches_debug_1194 | rasdani/github-patches | git_diff | pytorch__TensorRT-371 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
🐛 [Bug] An error occurs in CompileGraph when gpu_id == 1
When I tried to Complie on the second GPU in a multi-GPU environment, an error occurred. The code sample used is as follows.
```cpp
void load(const std::string& model_path, int64_t gpu_id, int64_t opt_batch_size) {
torch::jit::Module module = torch::jit::load(model_path);
torch::Device device = (torch::cuda::is_available() ? torch::Device(torch::kCUDA, gpu_id) : torch::Device(torch::kCPU));
module.to(device, torch::kHalf);
module.eval();
std::vector<int64_t> in_opt = { opt_batch_size, INPUT_CHANNEL_NUM, BOARD_WIDTH, BOARD_WIDTH };
trtorch::CompileSpec::InputRange range(in_opt);
trtorch::CompileSpec info({ range });
info.op_precision = torch::kHalf;
info.device.gpu_id = gpu_id;
module = trtorch::CompileGraph(module, info);
}
```
#### Error1
I called this function with gpu_id = 1. I got the following error:
```
terminate called after throwing an instance of 'trtorch::Error'
what(): [enforce fail at core/conversion/conversionctx/ConversionCtx.cpp:107] Expected cudaSetDevice(settings.device.gpu_id) to be true but got false
Unable to set gpu id: 1
```
I think this line is the cause.
https://github.com/NVIDIA/TRTorch/blob/1d4b967a28e36beee048703f5645ee6fcc95793d/core/conversion/conversionctx/ConversionCtx.cpp#L112
`cudaSetDevice` returns `cudaSuccess` (= 0) on success. However, `TRTORCH_CHECK` judges success or failure as a Boolean type.
I fixed it as follows and rebuilt it so that this error disappeared.
```diff
diff --git a/core/conversion/conversionctx/ConversionCtx.cpp b/core/conversion/conversionctx/ConversionCtx.cpp
index ff23692..bc5bf68 100644
--- a/core/conversion/conversionctx/ConversionCtx.cpp
+++ b/core/conversion/conversionctx/ConversionCtx.cpp
@@ -109,7 +109,7 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
cfg->setEngineCapability(settings.capability);
if (settings.device.gpu_id) {
- TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id), "Unable to set gpu id: " << settings.device.gpu_id);
+ TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id) == cudaSuccess, "Unable to set gpu id: " << settings.device.gpu_id);
}
if (settings.device.device_type == nvinfer1::DeviceType::kDLA) {
```
You may also use `set_device`.
https://github.com/NVIDIA/TRTorch/blob/1d4b967a28e36beee048703f5645ee6fcc95793d/core/compiler.cpp#L176-L178
#### Error2
After making the above fix, I get the following error:
```
ERROR: [TRTorch Conversion Context] - Builder was created on device different than current device.
```
I changed `cudaSetDevice` to do it at the beginning of the function and it worked fine.
```diff
diff --git a/core/conversion/conversionctx/ConversionCtx.cpp b/core/conversion/conversionctx/ConversionCtx.cpp
index ff23692..09a419c 100644
--- a/core/conversion/conversionctx/ConversionCtx.cpp
+++ b/core/conversion/conversionctx/ConversionCtx.cpp
@@ -47,6 +47,10 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
util::logging::get_logger().get_reportable_severity(),
util::logging::get_logger().get_is_colored_output_on()) {
// TODO: Support FP16 and FP32 from JIT information
+ if (settings.device.gpu_id) {
+ TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id) == cudaSuccess, "Unable to set gpu id: " << settings.device.gpu_id);
+ }
+
builder = nvinfer1::createInferBuilder(logger);
net = builder->createNetworkV2(1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));
@@ -108,10 +112,6 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
cfg->setDefaultDeviceType(settings.device.device_type);
cfg->setEngineCapability(settings.capability);
- if (settings.device.gpu_id) {
- TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id), "Unable to set gpu id: " << settings.device.gpu_id);
- }
-
if (settings.device.device_type == nvinfer1::DeviceType::kDLA) {
auto nbDLACores = builder->getNbDLACores();
TRTORCH_CHECK(
```
It's working, but I'm not sure if this is a good fix as there may be other side effects as well.
I would appreciate it if you could respond appropriately.
</issue>
<code>
[start of py/trtorch/_compiler.py]
1 from typing import List, Dict, Any
2 import torch
3 from torch import nn
4
5 import trtorch._C
6 from trtorch._compile_spec import _parse_compile_spec
7 from trtorch._version import __version__
8 from types import FunctionType
9
10
11 def compile(module: torch.jit.ScriptModule, compile_spec: Any) -> torch.jit.ScriptModule:
12 """Compile a TorchScript module for NVIDIA GPUs using TensorRT
13
14 Takes a existing TorchScript module and a set of settings to configure the compiler
15 and will convert methods to JIT Graphs which call equivalent TensorRT engines
16
17 Converts specifically the forward method of a TorchScript Module
18
19 Args:
20 module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch
21 ``torch.nn.Module``
22 compile_spec (dict): Compilation settings including operating precision, target device, etc.
23 One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs
24 to the graph. All other keys are optional
25
26 .. code-block:: py
27
28 compile_spec = {
29 "input_shapes": [
30 (1, 3, 224, 224), # Static input shape for input #1
31 {
32 "min": (1, 3, 224, 224),
33 "opt": (1, 3, 512, 512),
34 "max": (1, 3, 1024, 1024)
35 } # Dynamic input shape for input #2
36 ],
37 "device": {
38 "device_type": torch.device("cuda"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)
39 "gpu_id": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)
40 "dla_core": 0, # (DLA only) Target dla core id to run engine
41 "allow_gpu_fallback": false, # (DLA only) Allow layers unsupported on DLA to run on GPU
42 },
43 "op_precision": torch.half, # Operating precision set to FP16
44 "refit": false, # enable refit
45 "debug": false, # enable debuggable engine
46 "strict_types": false, # kernels should strictly run in operating precision
47 "capability": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels
48 "num_min_timing_iters": 2, # Number of minimization timing iterations used to select kernels
49 "num_avg_timing_iters": 1, # Number of averaging timing iterations used to select kernels
50 "workspace_size": 0, # Maximum size of workspace given to TensorRT
51 "max_batch_size": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)
52 }
53
54 Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using
55 torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum
56 to select device type.
57
58 Returns:
59 torch.jit.ScriptModule: Compiled TorchScript Module, when run it will execute via TensorRT
60 """
61
62 if isinstance(module, torch.jit.ScriptFunction):
63 raise TypeError(
64 "torch.jit.ScriptFunction currently is not directly supported, wrap the function in a module to compile")
65
66 compiled_cpp_mod = trtorch._C.compile_graph(module._c, _parse_compile_spec(compile_spec))
67 compiled_module = torch.jit._recursive.wrap_cpp_module(compiled_cpp_mod)
68 return compiled_module
69
70
71 def convert_method_to_trt_engine(module: torch.jit.ScriptModule, method_name: str, compile_spec: Any) -> str:
72 """Convert a TorchScript module method to a serialized TensorRT engine
73
74 Converts a specified method of a module to a serialized TensorRT engine given a dictionary of conversion settings
75
76 Args:
77 module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch
78 ``torch.nn.Module``
79 method_name (str): Name of method to convert
80 compile_spec (dict): Compilation settings including operating precision, target device, etc.
81 One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs
82 to the graph. All other keys are optional
83
84 .. code-block:: py
85
86 CompileSpec = {
87 "input_shapes": [
88 (1, 3, 224, 224), # Static input shape for input #1
89 {
90 "min": (1, 3, 224, 224),
91 "opt": (1, 3, 512, 512),
92 "max": (1, 3, 1024, 1024)
93 } # Dynamic input shape for input #2
94 ],
95 "device": {
96 "device_type": torch.device("cuda"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)
97 "gpu_id": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)
98 "dla_core": 0, # (DLA only) Target dla core id to run engine
99 "allow_gpu_fallback": false, # (DLA only) Allow layers unsupported on DLA to run on GPU
100 },
101 "op_precision": torch.half, # Operating precision set to FP16
102 "disable_tf32": False, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas
103 "refit": false, # enable refit
104 "debug": false, # enable debuggable engine
105 "strict_types": false, # kernels should strictly run in operating precision
106 "capability": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels
107 "num_min_timing_iters": 2, # Number of minimization timing iterations used to select kernels
108 "num_avg_timing_iters": 1, # Number of averaging timing iterations used to select kernels
109 "workspace_size": 0, # Maximum size of workspace given to TensorRT
110 "max_batch_size": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)
111 }
112
113 Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using
114 torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum
115 to select device type.
116
117 Returns:
118 bytes: Serialized TensorRT engine, can either be saved to a file or deserialized via TensorRT APIs
119 """
120 if isinstance(module, torch.jit.ScriptFunction):
121 raise TypeError(
122 "torch.jit.ScriptFunctions currently are not directly supported, wrap the function in a module to compile")
123
124 return trtorch._C.convert_graph_to_trt_engine(module._c, method_name, _parse_compile_spec(compile_spec))
125
126
127 def check_method_op_support(module: torch.jit.ScriptModule, method_name: str) -> bool:
128 """Checks to see if a method is fully supported by TRTorch
129
130 Checks if a method of a TorchScript module can be compiled by TRTorch, if not, a list of operators
131 that are not supported are printed out and the function returns false, else true.
132
133 Args:
134 module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch
135 ``torch.nn.Module``
136 method_name (str): Name of method to check
137
138 Returns:
139 bool: True if supported Method
140 """
141 return trtorch._C.check_method_op_support(module._c, method_name)
142
143
144 def dump_build_info():
145 """Prints build information about the TRTorch distribution to stdout
146 """
147 print(get_build_info())
148
149
150 def get_build_info() -> str:
151 """Returns a string containing the build information of TRTorch distribution
152
153 Returns:
154 str: String containing the build information for TRTorch distribution
155 """
156 build_info = trtorch._C.get_build_info()
157 build_info = "TRTorch Version: " + str(__version__) + '\n' + build_info
158 return build_info
159
[end of py/trtorch/_compiler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py/trtorch/_compiler.py b/py/trtorch/_compiler.py
--- a/py/trtorch/_compiler.py
+++ b/py/trtorch/_compiler.py
@@ -156,3 +156,6 @@
build_info = trtorch._C.get_build_info()
build_info = "TRTorch Version: " + str(__version__) + '\n' + build_info
return build_info
+
+def set_device(gpu_id):
+ trtorch._C.set_device(gpu_id)
| {"golden_diff": "diff --git a/py/trtorch/_compiler.py b/py/trtorch/_compiler.py\n--- a/py/trtorch/_compiler.py\n+++ b/py/trtorch/_compiler.py\n@@ -156,3 +156,6 @@\n build_info = trtorch._C.get_build_info()\n build_info = \"TRTorch Version: \" + str(__version__) + '\\n' + build_info\n return build_info\n+\n+def set_device(gpu_id):\n+ trtorch._C.set_device(gpu_id)\n", "issue": "\ud83d\udc1b [Bug] An error occurs in CompileGraph when gpu_id == 1\nWhen I tried to Complie on the second GPU in a multi-GPU environment, an error occurred. The code sample used is as follows.\r\n\r\n```cpp\r\nvoid load(const std::string& model_path, int64_t gpu_id, int64_t opt_batch_size) {\r\n torch::jit::Module module = torch::jit::load(model_path);\r\n torch::Device device = (torch::cuda::is_available() ? torch::Device(torch::kCUDA, gpu_id) : torch::Device(torch::kCPU));\r\n module.to(device, torch::kHalf);\r\n module.eval();\r\n\r\n std::vector<int64_t> in_opt = { opt_batch_size, INPUT_CHANNEL_NUM, BOARD_WIDTH, BOARD_WIDTH };\r\n\r\n trtorch::CompileSpec::InputRange range(in_opt);\r\n trtorch::CompileSpec info({ range });\r\n info.op_precision = torch::kHalf;\r\n info.device.gpu_id = gpu_id;\r\n module = trtorch::CompileGraph(module, info);\r\n}\r\n```\r\n\r\n#### Error1\r\nI called this function with gpu_id = 1. I got the following error:\r\n\r\n```\r\nterminate called after throwing an instance of 'trtorch::Error'\r\n what(): [enforce fail at core/conversion/conversionctx/ConversionCtx.cpp:107] Expected cudaSetDevice(settings.device.gpu_id) to be true but got false\r\nUnable to set gpu id: 1\r\n```\r\n\r\nI think this line is the cause.\r\nhttps://github.com/NVIDIA/TRTorch/blob/1d4b967a28e36beee048703f5645ee6fcc95793d/core/conversion/conversionctx/ConversionCtx.cpp#L112\r\n\r\n`cudaSetDevice` returns `cudaSuccess` (= 0) on success. However, `TRTORCH_CHECK` judges success or failure as a Boolean type.\r\n\r\nI fixed it as follows and rebuilt it so that this error disappeared.\r\n\r\n```diff\r\ndiff --git a/core/conversion/conversionctx/ConversionCtx.cpp b/core/conversion/conversionctx/ConversionCtx.cpp\r\nindex ff23692..bc5bf68 100644\r\n--- a/core/conversion/conversionctx/ConversionCtx.cpp\r\n+++ b/core/conversion/conversionctx/ConversionCtx.cpp\r\n@@ -109,7 +109,7 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)\r\n cfg->setEngineCapability(settings.capability);\r\n \r\n if (settings.device.gpu_id) {\r\n- TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id), \"Unable to set gpu id: \" << settings.device.gpu_id);\r\n+ TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id) == cudaSuccess, \"Unable to set gpu id: \" << settings.device.gpu_id);\r\n }\r\n \r\n if (settings.device.device_type == nvinfer1::DeviceType::kDLA) {\r\n```\r\n\r\nYou may also use `set_device`.\r\n\r\nhttps://github.com/NVIDIA/TRTorch/blob/1d4b967a28e36beee048703f5645ee6fcc95793d/core/compiler.cpp#L176-L178\r\n\r\n#### Error2\r\nAfter making the above fix, I get the following error:\r\n\r\n```\r\nERROR: [TRTorch Conversion Context] - Builder was created on device different than current device.\r\n```\r\n\r\nI changed `cudaSetDevice` to do it at the beginning of the function and it worked fine.\r\n\r\n```diff\r\ndiff --git a/core/conversion/conversionctx/ConversionCtx.cpp b/core/conversion/conversionctx/ConversionCtx.cpp\r\nindex ff23692..09a419c 100644\r\n--- a/core/conversion/conversionctx/ConversionCtx.cpp\r\n+++ b/core/conversion/conversionctx/ConversionCtx.cpp\r\n@@ -47,6 +47,10 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)\r\n util::logging::get_logger().get_reportable_severity(),\r\n util::logging::get_logger().get_is_colored_output_on()) {\r\n // TODO: Support FP16 and FP32 from JIT information\r\n+ if (settings.device.gpu_id) {\r\n+ TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id) == cudaSuccess, \"Unable to set gpu id: \" << settings.device.gpu_id);\r\n+ }\r\n+\r\n builder = nvinfer1::createInferBuilder(logger);\r\n net = builder->createNetworkV2(1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));\r\n \r\n@@ -108,10 +112,6 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)\r\n cfg->setDefaultDeviceType(settings.device.device_type);\r\n cfg->setEngineCapability(settings.capability);\r\n \r\n- if (settings.device.gpu_id) {\r\n- TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id), \"Unable to set gpu id: \" << settings.device.gpu_id);\r\n- }\r\n-\r\n if (settings.device.device_type == nvinfer1::DeviceType::kDLA) {\r\n auto nbDLACores = builder->getNbDLACores();\r\n TRTORCH_CHECK(\r\n```\r\n\r\nIt's working, but I'm not sure if this is a good fix as there may be other side effects as well.\r\nI would appreciate it if you could respond appropriately.\n", "before_files": [{"content": "from typing import List, Dict, Any\nimport torch\nfrom torch import nn\n\nimport trtorch._C\nfrom trtorch._compile_spec import _parse_compile_spec\nfrom trtorch._version import __version__\nfrom types import FunctionType\n\n\ndef compile(module: torch.jit.ScriptModule, compile_spec: Any) -> torch.jit.ScriptModule:\n \"\"\"Compile a TorchScript module for NVIDIA GPUs using TensorRT\n\n Takes a existing TorchScript module and a set of settings to configure the compiler\n and will convert methods to JIT Graphs which call equivalent TensorRT engines\n\n Converts specifically the forward method of a TorchScript Module\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n compile_spec (dict): Compilation settings including operating precision, target device, etc.\n One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs\n to the graph. All other keys are optional\n\n .. code-block:: py\n\n compile_spec = {\n \"input_shapes\": [\n (1, 3, 224, 224), # Static input shape for input #1\n {\n \"min\": (1, 3, 224, 224),\n \"opt\": (1, 3, 512, 512),\n \"max\": (1, 3, 1024, 1024)\n } # Dynamic input shape for input #2\n ],\n \"device\": {\n \"device_type\": torch.device(\"cuda\"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)\n \"gpu_id\": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)\n \"dla_core\": 0, # (DLA only) Target dla core id to run engine\n \"allow_gpu_fallback\": false, # (DLA only) Allow layers unsupported on DLA to run on GPU\n },\n \"op_precision\": torch.half, # Operating precision set to FP16\n \"refit\": false, # enable refit\n \"debug\": false, # enable debuggable engine\n \"strict_types\": false, # kernels should strictly run in operating precision\n \"capability\": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": 2, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": 1, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": 0, # Maximum size of workspace given to TensorRT\n \"max_batch_size\": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)\n }\n\n Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using\n torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum\n to select device type.\n\n Returns:\n torch.jit.ScriptModule: Compiled TorchScript Module, when run it will execute via TensorRT\n \"\"\"\n\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunction currently is not directly supported, wrap the function in a module to compile\")\n\n compiled_cpp_mod = trtorch._C.compile_graph(module._c, _parse_compile_spec(compile_spec))\n compiled_module = torch.jit._recursive.wrap_cpp_module(compiled_cpp_mod)\n return compiled_module\n\n\ndef convert_method_to_trt_engine(module: torch.jit.ScriptModule, method_name: str, compile_spec: Any) -> str:\n \"\"\"Convert a TorchScript module method to a serialized TensorRT engine\n\n Converts a specified method of a module to a serialized TensorRT engine given a dictionary of conversion settings\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to convert\n compile_spec (dict): Compilation settings including operating precision, target device, etc.\n One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs\n to the graph. All other keys are optional\n\n .. code-block:: py\n\n CompileSpec = {\n \"input_shapes\": [\n (1, 3, 224, 224), # Static input shape for input #1\n {\n \"min\": (1, 3, 224, 224),\n \"opt\": (1, 3, 512, 512),\n \"max\": (1, 3, 1024, 1024)\n } # Dynamic input shape for input #2\n ],\n \"device\": {\n \"device_type\": torch.device(\"cuda\"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)\n \"gpu_id\": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)\n \"dla_core\": 0, # (DLA only) Target dla core id to run engine\n \"allow_gpu_fallback\": false, # (DLA only) Allow layers unsupported on DLA to run on GPU\n },\n \"op_precision\": torch.half, # Operating precision set to FP16\n \"disable_tf32\": False, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas\n \"refit\": false, # enable refit\n \"debug\": false, # enable debuggable engine\n \"strict_types\": false, # kernels should strictly run in operating precision\n \"capability\": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": 2, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": 1, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": 0, # Maximum size of workspace given to TensorRT\n \"max_batch_size\": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)\n }\n\n Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using\n torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum\n to select device type.\n\n Returns:\n bytes: Serialized TensorRT engine, can either be saved to a file or deserialized via TensorRT APIs\n \"\"\"\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunctions currently are not directly supported, wrap the function in a module to compile\")\n\n return trtorch._C.convert_graph_to_trt_engine(module._c, method_name, _parse_compile_spec(compile_spec))\n\n\ndef check_method_op_support(module: torch.jit.ScriptModule, method_name: str) -> bool:\n \"\"\"Checks to see if a method is fully supported by TRTorch\n\n Checks if a method of a TorchScript module can be compiled by TRTorch, if not, a list of operators\n that are not supported are printed out and the function returns false, else true.\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to check\n\n Returns:\n bool: True if supported Method\n \"\"\"\n return trtorch._C.check_method_op_support(module._c, method_name)\n\n\ndef dump_build_info():\n \"\"\"Prints build information about the TRTorch distribution to stdout\n \"\"\"\n print(get_build_info())\n\n\ndef get_build_info() -> str:\n \"\"\"Returns a string containing the build information of TRTorch distribution\n\n Returns:\n str: String containing the build information for TRTorch distribution\n \"\"\"\n build_info = trtorch._C.get_build_info()\n build_info = \"TRTorch Version: \" + str(__version__) + '\\n' + build_info\n return build_info\n", "path": "py/trtorch/_compiler.py"}]} | 3,921 | 112 |
gh_patches_debug_10948 | rasdani/github-patches | git_diff | dmlc__dgl-1305 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: module 'dgl.nn' has no attribute 'pytorch'
## 🐛 Bug
When I try to use some of the predefined modules of dgl by the following code, I encounter an error: AttributeError: module 'dgl.nn' has no attribute 'pytorch'.
Similar problems also happen to other backends, including TensorFlow and MXNet.
## To Reproduce
Steps to reproduce the behavior:
```python
import dgl.nn
# or import dgl
c = dgl.nn.pytorch.conv.GraphConv(10,2)
```
## Expected behavior
The code should generate a GraphConv layer without any error.
## Environment
- DGL Version (e.g., 1.0): 0.4.2
- Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): Pytorch 1.4.0
- OS (e.g., Linux): Irrelelevent
- How you installed DGL (`conda`, `pip`, source): conda
- Build command you used (if compiling from source):
- Python version: 3.7
- CUDA/cuDNN version (if applicable): Irrelelevent
- GPU models and configuration (e.g. V100): Irrelelevent
- Any other relevant information:
## Additional context
I read the source code and **found the reason and solution** to this problem.
### Reason:
The `__init__.py` file of `dgl/nn` is empty. Therefore, if i import dgl or dgl.nn, python cannot automatically find files of its sub-directories.
I verified it by the following code:
```python
import dgl.nn.python
c = dgl.nn.pytorch.conv.GraphConv(10,2)
```
It works fine.
### Solution:
Add 3 lines of code like `from . import pytorch` into file `dgl/nn/__init__.py` for PyTorch and the other 2 backends.
It is better to automatically detect the backend library and import the correct sub-directory.
</issue>
<code>
[start of python/dgl/__init__.py]
1 """DGL root package."""
2 # Windows compatibility
3 # This initializes Winsock and performs cleanup at termination as required
4 import socket
5
6 # Need to ensure that the backend framework is imported before load dgl libs,
7 # otherwise weird cuda problem happens
8 from .backend import load_backend
9
10 from . import function
11 from . import nn
12 from . import contrib
13 from . import container
14 from . import random
15 from . import sampling
16
17 from ._ffi.runtime_ctypes import TypeCode
18 from ._ffi.function import register_func, get_global_func, list_global_func_names, extract_ext_funcs
19 from ._ffi.base import DGLError, __version__
20
21 from .base import ALL, NTYPE, NID, ETYPE, EID
22 from .readout import *
23 from .batched_heterograph import *
24 from .convert import *
25 from .graph import DGLGraph, batch, unbatch
26 from .generators import *
27 from .heterograph import DGLHeteroGraph
28 from .nodeflow import *
29 from .traversal import *
30 from .transform import *
31 from .propagate import *
32 from .udf import NodeBatch, EdgeBatch
33
[end of python/dgl/__init__.py]
[start of python/dgl/nn/__init__.py]
1 """Package for neural network common components."""
2
[end of python/dgl/nn/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/dgl/__init__.py b/python/dgl/__init__.py
--- a/python/dgl/__init__.py
+++ b/python/dgl/__init__.py
@@ -8,7 +8,6 @@
from .backend import load_backend
from . import function
-from . import nn
from . import contrib
from . import container
from . import random
diff --git a/python/dgl/nn/__init__.py b/python/dgl/nn/__init__.py
--- a/python/dgl/nn/__init__.py
+++ b/python/dgl/nn/__init__.py
@@ -1 +1,12 @@
"""Package for neural network common components."""
+import importlib
+import sys
+from ..backend import backend_name
+
+def _load_backend(mod_name):
+ mod = importlib.import_module('.%s' % mod_name, __name__)
+ thismod = sys.modules[__name__]
+ for api, obj in mod.__dict__.items():
+ setattr(thismod, api, obj)
+
+_load_backend(backend_name)
| {"golden_diff": "diff --git a/python/dgl/__init__.py b/python/dgl/__init__.py\n--- a/python/dgl/__init__.py\n+++ b/python/dgl/__init__.py\n@@ -8,7 +8,6 @@\n from .backend import load_backend\n \n from . import function\n-from . import nn\n from . import contrib\n from . import container\n from . import random\ndiff --git a/python/dgl/nn/__init__.py b/python/dgl/nn/__init__.py\n--- a/python/dgl/nn/__init__.py\n+++ b/python/dgl/nn/__init__.py\n@@ -1 +1,12 @@\n \"\"\"Package for neural network common components.\"\"\"\n+import importlib\n+import sys\n+from ..backend import backend_name\n+\n+def _load_backend(mod_name):\n+ mod = importlib.import_module('.%s' % mod_name, __name__)\n+ thismod = sys.modules[__name__]\n+ for api, obj in mod.__dict__.items():\n+ setattr(thismod, api, obj)\n+\n+_load_backend(backend_name)\n", "issue": "AttributeError: module 'dgl.nn' has no attribute 'pytorch'\n## \ud83d\udc1b Bug\r\n\r\nWhen I try to use some of the predefined modules of dgl by the following code, I encounter an error: AttributeError: module 'dgl.nn' has no attribute 'pytorch'.\r\n\r\nSimilar problems also happen to other backends, including TensorFlow and MXNet.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n```python\r\nimport dgl.nn\r\n# or import dgl\r\nc = dgl.nn.pytorch.conv.GraphConv(10,2)\r\n```\r\n\r\n## Expected behavior\r\n\r\nThe code should generate a GraphConv layer without any error.\r\n\r\n## Environment\r\n\r\n - DGL Version (e.g., 1.0): 0.4.2\r\n - Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): Pytorch 1.4.0\r\n - OS (e.g., Linux): Irrelelevent \r\n - How you installed DGL (`conda`, `pip`, source): conda\r\n - Build command you used (if compiling from source):\r\n - Python version: 3.7\r\n - CUDA/cuDNN version (if applicable): Irrelelevent \r\n - GPU models and configuration (e.g. V100): Irrelelevent \r\n - Any other relevant information:\r\n\r\n## Additional context\r\n\r\nI read the source code and **found the reason and solution** to this problem.\r\n\r\n### Reason:\r\n\r\nThe `__init__.py` file of `dgl/nn` is empty. Therefore, if i import dgl or dgl.nn, python cannot automatically find files of its sub-directories.\r\n\r\nI verified it by the following code:\r\n```python\r\nimport dgl.nn.python\r\nc = dgl.nn.pytorch.conv.GraphConv(10,2)\r\n```\r\nIt works fine.\r\n\r\n### Solution:\r\n\r\nAdd 3 lines of code like `from . import pytorch` into file `dgl/nn/__init__.py` for PyTorch and the other 2 backends.\r\nIt is better to automatically detect the backend library and import the correct sub-directory.\n", "before_files": [{"content": "\"\"\"DGL root package.\"\"\"\n# Windows compatibility\n# This initializes Winsock and performs cleanup at termination as required\nimport socket\n\n# Need to ensure that the backend framework is imported before load dgl libs,\n# otherwise weird cuda problem happens\nfrom .backend import load_backend\n\nfrom . import function\nfrom . import nn\nfrom . import contrib\nfrom . import container\nfrom . import random\nfrom . import sampling\n\nfrom ._ffi.runtime_ctypes import TypeCode\nfrom ._ffi.function import register_func, get_global_func, list_global_func_names, extract_ext_funcs\nfrom ._ffi.base import DGLError, __version__\n\nfrom .base import ALL, NTYPE, NID, ETYPE, EID\nfrom .readout import *\nfrom .batched_heterograph import *\nfrom .convert import *\nfrom .graph import DGLGraph, batch, unbatch\nfrom .generators import *\nfrom .heterograph import DGLHeteroGraph\nfrom .nodeflow import *\nfrom .traversal import *\nfrom .transform import *\nfrom .propagate import *\nfrom .udf import NodeBatch, EdgeBatch\n", "path": "python/dgl/__init__.py"}, {"content": "\"\"\"Package for neural network common components.\"\"\"\n", "path": "python/dgl/nn/__init__.py"}]} | 1,317 | 234 |
gh_patches_debug_14394 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1728 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Annotation answers get parsed incorrectly in csv export
For annotation type answers, the csv export looks like this currently:

It appears the annotation json gets part as part of the export. We should probably add some escaping.
</issue>
<code>
[start of app/grandchallenge/core/renderers.py]
1 from rest_framework_csv.renderers import CSVRenderer
2
3
4 class PaginatedCSVRenderer(CSVRenderer):
5 results_field = "results"
6
7 def render(self, data, *args, **kwargs):
8 if self.results_field in data:
9 data = data[self.results_field]
10
11 return super().render(data, *args, **kwargs)
12
[end of app/grandchallenge/core/renderers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/core/renderers.py b/app/grandchallenge/core/renderers.py
--- a/app/grandchallenge/core/renderers.py
+++ b/app/grandchallenge/core/renderers.py
@@ -1,3 +1,5 @@
+import json
+
from rest_framework_csv.renderers import CSVRenderer
@@ -9,3 +11,19 @@
data = data[self.results_field]
return super().render(data, *args, **kwargs)
+
+ def flatten_data(self, data):
+ """
+ Create a dictionary that is 1 level deep, with nested values serialized
+ as json. This means that the header rows are now consistent.
+ """
+ for row in data:
+ flat_row = {k: self._flatten_value(v) for k, v in row.items()}
+ yield flat_row
+
+ @staticmethod
+ def _flatten_value(value):
+ if isinstance(value, (dict, list)):
+ return json.dumps(value)
+ else:
+ return value
| {"golden_diff": "diff --git a/app/grandchallenge/core/renderers.py b/app/grandchallenge/core/renderers.py\n--- a/app/grandchallenge/core/renderers.py\n+++ b/app/grandchallenge/core/renderers.py\n@@ -1,3 +1,5 @@\n+import json\n+\n from rest_framework_csv.renderers import CSVRenderer\n \n \n@@ -9,3 +11,19 @@\n data = data[self.results_field]\n \n return super().render(data, *args, **kwargs)\n+\n+ def flatten_data(self, data):\n+ \"\"\"\n+ Create a dictionary that is 1 level deep, with nested values serialized\n+ as json. This means that the header rows are now consistent.\n+ \"\"\"\n+ for row in data:\n+ flat_row = {k: self._flatten_value(v) for k, v in row.items()}\n+ yield flat_row\n+\n+ @staticmethod\n+ def _flatten_value(value):\n+ if isinstance(value, (dict, list)):\n+ return json.dumps(value)\n+ else:\n+ return value\n", "issue": "Annotation answers get parsed incorrectly in csv export\nFor annotation type answers, the csv export looks like this currently:\r\n\r\n\r\nIt appears the annotation json gets part as part of the export. We should probably add some escaping.\n", "before_files": [{"content": "from rest_framework_csv.renderers import CSVRenderer\n\n\nclass PaginatedCSVRenderer(CSVRenderer):\n results_field = \"results\"\n\n def render(self, data, *args, **kwargs):\n if self.results_field in data:\n data = data[self.results_field]\n\n return super().render(data, *args, **kwargs)\n", "path": "app/grandchallenge/core/renderers.py"}]} | 753 | 228 |
gh_patches_debug_6909 | rasdani/github-patches | git_diff | ResonantGeoData__ResonantGeoData-436 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
expose STAC post endpoint in Python client
for https://github.com/ResonantGeoData/issue-dashboard/issues/11
</issue>
<code>
[start of rgdc/rgdc/rgdc.py]
1 from base64 import b64encode
2 from dataclasses import dataclass
3 import getpass
4 from pathlib import Path
5 import tempfile
6 from typing import Dict, Iterator, List, Optional, Tuple, Union
7
8 from tqdm import tqdm
9
10 from .session import RgdcSession
11 from .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE
12 from .utils import (
13 DEFAULT_RGD_API,
14 download_checksum_file_to_path,
15 limit_offset_pager,
16 spatial_search_params,
17 spatial_subentry_id,
18 )
19
20
21 @dataclass
22 class RasterDownload:
23 path: Path
24 images: List[Path]
25 ancillary: List[Path]
26
27
28 class Rgdc:
29 def __init__(
30 self,
31 api_url: str = DEFAULT_RGD_API,
32 username: Optional[str] = None,
33 password: Optional[str] = None,
34 ):
35 """
36 Initialize a RGD Client.
37
38 Args:
39 api_url: The base url of the RGD API instance.
40 username: The username to authenticate to the instance with, if any.
41 password: The password associated with the provided username. If None, a prompt will be provided.
42
43 Returns:
44 A new Rgdc instance.
45 """
46 auth_header = None
47
48 # Prompt for password if not provided
49 if username is not None and password is None:
50 password = getpass.getpass()
51
52 if username and password:
53 encoded_credentials = b64encode(f'{username}:{password}'.encode('utf-8')).decode()
54 auth_header = f'Basic {encoded_credentials}'
55
56 self.session = RgdcSession(base_url=api_url, auth_header=auth_header)
57
58 def list_image_tiles(self, image_id: Union[str, int]) -> Dict:
59 """List geodata imagery tiles."""
60 r = self.session.get(f'geoprocess/imagery/{image_id}/tiles')
61 return r.json()
62
63 def download_image_file(
64 self, image_id: Union[str, int], chunk_size: int = 1024 * 1024
65 ) -> Iterator[bytes]:
66 """
67 Download the associated ImageFile data for this ImageEntry directly from S3.
68
69 Args:
70 image_id: The ID of the ImageEntry to download.
71 chunk_size: The size (in bytes) of each item in the returned iterator (defaults to 1MB).
72
73 Returns:
74 An iterator of byte chunks.
75 """
76 r = self.session.get(f'geodata/imagery/{image_id}/data', stream=True)
77 return r.iter_content(chunk_size=chunk_size)
78
79 def download_image_thumbnail(
80 self,
81 image_id: Union[str, int],
82 ) -> bytes:
83 """
84 Download the generated thumbnail for this ImageEntry.
85
86 Args:
87 image_id: The ID of the ImageEntry to download.
88
89 Returns:
90 Thumbnail bytes.
91 """
92 r = self.session.get(f'geoprocess/imagery/{image_id}/thumbnail')
93 return r.content
94
95 def download_raster_thumbnail(
96 self,
97 raster_meta_id: Union[str, int, dict],
98 band: int = 0,
99 ) -> bytes:
100 """
101 Download the generated thumbnail for this ImageEntry.
102
103 Args:
104 raster_meta_id: The id of the RasterMetaEntry, which is a child to the desired raster entry, or search result.
105 band: The index of the image in the raster's image set to produce thumbnail from.
106
107 Returns:
108 Thumbnail bytes.
109 """
110 if isinstance(raster_meta_id, dict):
111 raster_meta_id = spatial_subentry_id(raster_meta_id)
112
113 r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')
114 parent_raster = r.json().get('parent_raster', {})
115 images = parent_raster.get('image_set', {}).get('images', [])
116 try:
117 return self.download_image_thumbnail(images[band]['id'])
118 except IndexError:
119 raise IndexError(f'Band index ({band}) out of range.')
120
121 def get_raster(self, raster_meta_id: Union[str, int, dict], stac: bool = False) -> Dict:
122 """Get raster entry detail.
123
124 Args:
125 stac: Optionally return as STAC Item dictionary/JSON.
126
127 Returns:
128 Serialized object representation.
129 """
130 if isinstance(raster_meta_id, dict):
131 raster_meta_id = spatial_subentry_id(raster_meta_id)
132
133 if stac:
134 r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}/stac')
135 else:
136 r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')
137 return r.json()
138
139 def download_raster(
140 self,
141 raster_meta_id: Union[str, int, dict],
142 pathname: Optional[str] = None,
143 nest_with_name: bool = False,
144 keep_existing: bool = True,
145 ) -> RasterDownload:
146 """
147 Download the image set associated with a raster entry to disk.
148
149 Args:
150 raster_meta_id: The id of the RasterMetaEntry, which is a child to the desired raster entry, or search result.
151 pathname: The directory to download the image set to. If not supplied, a temporary directory will be used.
152 nest_with_name: If True, nests the download within an additional directory, using the raster entry name.
153 keep_existing: If False, replace files existing on disk. Only valid if `pathname` is given.
154
155 Returns:
156 A dictionary of the paths to all files downloaded under the directory.
157 """
158 if isinstance(raster_meta_id, dict):
159 raster_meta_id = spatial_subentry_id(raster_meta_id)
160
161 r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')
162 parent_raster = r.json().get('parent_raster', {})
163
164 # Create dirs after request to avoid empty dirs if failed
165 if pathname is None:
166 pathname = tempfile.mkdtemp()
167
168 # Handle optional nesting with raster entry name
169 path = Path(pathname)
170 parent_raster_name: Optional[str] = parent_raster.get('name')
171
172 if nest_with_name and parent_raster_name:
173 path = path / parent_raster_name
174
175 # Ensure base download directory exists
176 if not path.exists():
177 path.mkdir()
178
179 # Initialize dataclass
180 raster_download = RasterDownload(path, [], [])
181
182 # Download images
183 images = parent_raster.get('image_set', {}).get('images', [])
184 for image in tqdm(images, desc='Downloading image files'):
185 file = image.get('image_file', {}).get('file', {})
186 file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)
187 if file_path:
188 raster_download.images.append(file_path)
189
190 # Download ancillary files
191 ancillary = parent_raster.get('ancillary_files', [])
192 for file in tqdm(ancillary, desc='Downloading ancillary files'):
193 file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)
194 if file_path:
195 raster_download.ancillary.append(file_path)
196
197 return raster_download
198
199 def search(
200 self,
201 query: Optional[Union[Dict, str]] = None,
202 predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,
203 relates: Optional[str] = None,
204 distance: Optional[Tuple[float, float]] = None,
205 acquired: Optional[DATETIME_OR_STR_TUPLE] = None,
206 instrumentation: Optional[str] = None,
207 limit: Optional[int] = None,
208 offset: Optional[int] = None,
209 ) -> List[Dict]:
210 """
211 Search for geospatial entries based on various criteria.
212
213 For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.
214 E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.
215
216 Args:
217 query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.
218 predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will
219 be used to filter data such that predicate(a, b) where b is the queried geometry.
220 relates: Specify exactly how the queried geometry should relate to the data using a
221 DE-9IM string code.
222 distance: The min/max distance around the queried geometry in meters.
223 acquired: The min/max date and time (ISO 8601) when data was acquired.
224 instrumentation: The instrumentation used to acquire at least one of these data.
225 limit: The maximum number of results to return.
226 offset: The number of results to skip.
227
228 Returns:
229 A list of Spatial Entries.
230 """
231 params = spatial_search_params(
232 query=query,
233 predicate=predicate,
234 relates=relates,
235 distance=distance,
236 acquired=acquired,
237 instrumentation=instrumentation,
238 limit=limit,
239 offset=offset,
240 )
241 return list(limit_offset_pager(self.session, 'geosearch', params=params))
242
243 def search_raster_stac(
244 self,
245 query: Optional[Union[Dict, str]] = None,
246 predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,
247 relates: Optional[str] = None,
248 distance: Optional[Tuple[float, float]] = None,
249 acquired: Optional[DATETIME_OR_STR_TUPLE] = None,
250 instrumentation: Optional[str] = None,
251 num_bands: Optional[Tuple[int, int]] = None,
252 resolution: Optional[Tuple[int, int]] = None,
253 cloud_cover: Optional[Tuple[float, float]] = None,
254 limit: Optional[int] = None,
255 offset: Optional[int] = None,
256 ) -> List[Dict]:
257 """
258 Search for raster entries based on various criteria.
259
260 For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.
261 E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.
262
263 Args:
264 query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.
265 predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will
266 be used to filter data such that predicate(a, b) where b is the queried geometry.
267 relates: Specify exactly how the queried geometry should relate to the data using a
268 DE-9IM string code.
269 distance: The min/max distance around the queried geometry in meters.
270 acquired: The min/max date and time (ISO 8601) when data was acquired.
271 instrumentation: The instrumentation used to acquire at least one of these data.
272 num_bands: The min/max number of bands in the raster.
273 resolution: The min/max resolution of the raster.
274 cloud_cover: The min/max cloud coverage of the raster.
275 limit: The maximum number of results to return.
276 offset: The number of results to skip.
277
278 Returns:
279 A list of Spatial Entries in STAC Item format.
280 """
281 params = spatial_search_params(
282 query=query,
283 predicate=predicate,
284 relates=relates,
285 distance=distance,
286 acquired=acquired,
287 instrumentation=instrumentation,
288 limit=limit,
289 offset=offset,
290 )
291
292 if num_bands and len(num_bands) == 2:
293 nbmin, nbmax = num_bands
294 params['num_bands_min'] = nbmin
295 params['num_bands_max'] = nbmax
296
297 if resolution and len(resolution) == 2:
298 rmin, rmax = resolution
299 params['resolution_min'] = rmin
300 params['resolution_max'] = rmax
301
302 if cloud_cover and len(cloud_cover) == 2:
303 ccmin, ccmax = cloud_cover
304 params['cloud_cover_min'] = ccmin
305 params['cloud_cover_max'] = ccmax
306
307 return list(limit_offset_pager(self.session, 'geosearch/raster', params=params))
308
[end of rgdc/rgdc/rgdc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rgdc/rgdc/rgdc.py b/rgdc/rgdc/rgdc.py
--- a/rgdc/rgdc/rgdc.py
+++ b/rgdc/rgdc/rgdc.py
@@ -240,6 +240,13 @@
)
return list(limit_offset_pager(self.session, 'geosearch', params=params))
+ def create_raster_stac(self, raster: Dict) -> Dict:
+ """Create a raster entry using STAC format."""
+ r = self.session.post('geodata/imagery/raster/stac', json=raster)
+ r.raise_for_status()
+
+ return r.json()
+
def search_raster_stac(
self,
query: Optional[Union[Dict, str]] = None,
| {"golden_diff": "diff --git a/rgdc/rgdc/rgdc.py b/rgdc/rgdc/rgdc.py\n--- a/rgdc/rgdc/rgdc.py\n+++ b/rgdc/rgdc/rgdc.py\n@@ -240,6 +240,13 @@\n )\n return list(limit_offset_pager(self.session, 'geosearch', params=params))\n \n+ def create_raster_stac(self, raster: Dict) -> Dict:\n+ \"\"\"Create a raster entry using STAC format.\"\"\"\n+ r = self.session.post('geodata/imagery/raster/stac', json=raster)\n+ r.raise_for_status()\n+\n+ return r.json()\n+\n def search_raster_stac(\n self,\n query: Optional[Union[Dict, str]] = None,\n", "issue": "expose STAC post endpoint in Python client\nfor https://github.com/ResonantGeoData/issue-dashboard/issues/11\n", "before_files": [{"content": "from base64 import b64encode\nfrom dataclasses import dataclass\nimport getpass\nfrom pathlib import Path\nimport tempfile\nfrom typing import Dict, Iterator, List, Optional, Tuple, Union\n\nfrom tqdm import tqdm\n\nfrom .session import RgdcSession\nfrom .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE\nfrom .utils import (\n DEFAULT_RGD_API,\n download_checksum_file_to_path,\n limit_offset_pager,\n spatial_search_params,\n spatial_subentry_id,\n)\n\n\n@dataclass\nclass RasterDownload:\n path: Path\n images: List[Path]\n ancillary: List[Path]\n\n\nclass Rgdc:\n def __init__(\n self,\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n ):\n \"\"\"\n Initialize a RGD Client.\n\n Args:\n api_url: The base url of the RGD API instance.\n username: The username to authenticate to the instance with, if any.\n password: The password associated with the provided username. If None, a prompt will be provided.\n\n Returns:\n A new Rgdc instance.\n \"\"\"\n auth_header = None\n\n # Prompt for password if not provided\n if username is not None and password is None:\n password = getpass.getpass()\n\n if username and password:\n encoded_credentials = b64encode(f'{username}:{password}'.encode('utf-8')).decode()\n auth_header = f'Basic {encoded_credentials}'\n\n self.session = RgdcSession(base_url=api_url, auth_header=auth_header)\n\n def list_image_tiles(self, image_id: Union[str, int]) -> Dict:\n \"\"\"List geodata imagery tiles.\"\"\"\n r = self.session.get(f'geoprocess/imagery/{image_id}/tiles')\n return r.json()\n\n def download_image_file(\n self, image_id: Union[str, int], chunk_size: int = 1024 * 1024\n ) -> Iterator[bytes]:\n \"\"\"\n Download the associated ImageFile data for this ImageEntry directly from S3.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n chunk_size: The size (in bytes) of each item in the returned iterator (defaults to 1MB).\n\n Returns:\n An iterator of byte chunks.\n \"\"\"\n r = self.session.get(f'geodata/imagery/{image_id}/data', stream=True)\n return r.iter_content(chunk_size=chunk_size)\n\n def download_image_thumbnail(\n self,\n image_id: Union[str, int],\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n r = self.session.get(f'geoprocess/imagery/{image_id}/thumbnail')\n return r.content\n\n def download_raster_thumbnail(\n self,\n raster_meta_id: Union[str, int, dict],\n band: int = 0,\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n raster_meta_id: The id of the RasterMetaEntry, which is a child to the desired raster entry, or search result.\n band: The index of the image in the raster's image set to produce thumbnail from.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n images = parent_raster.get('image_set', {}).get('images', [])\n try:\n return self.download_image_thumbnail(images[band]['id'])\n except IndexError:\n raise IndexError(f'Band index ({band}) out of range.')\n\n def get_raster(self, raster_meta_id: Union[str, int, dict], stac: bool = False) -> Dict:\n \"\"\"Get raster entry detail.\n\n Args:\n stac: Optionally return as STAC Item dictionary/JSON.\n\n Returns:\n Serialized object representation.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n if stac:\n r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}/stac')\n else:\n r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')\n return r.json()\n\n def download_raster(\n self,\n raster_meta_id: Union[str, int, dict],\n pathname: Optional[str] = None,\n nest_with_name: bool = False,\n keep_existing: bool = True,\n ) -> RasterDownload:\n \"\"\"\n Download the image set associated with a raster entry to disk.\n\n Args:\n raster_meta_id: The id of the RasterMetaEntry, which is a child to the desired raster entry, or search result.\n pathname: The directory to download the image set to. If not supplied, a temporary directory will be used.\n nest_with_name: If True, nests the download within an additional directory, using the raster entry name.\n keep_existing: If False, replace files existing on disk. Only valid if `pathname` is given.\n\n Returns:\n A dictionary of the paths to all files downloaded under the directory.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n\n # Create dirs after request to avoid empty dirs if failed\n if pathname is None:\n pathname = tempfile.mkdtemp()\n\n # Handle optional nesting with raster entry name\n path = Path(pathname)\n parent_raster_name: Optional[str] = parent_raster.get('name')\n\n if nest_with_name and parent_raster_name:\n path = path / parent_raster_name\n\n # Ensure base download directory exists\n if not path.exists():\n path.mkdir()\n\n # Initialize dataclass\n raster_download = RasterDownload(path, [], [])\n\n # Download images\n images = parent_raster.get('image_set', {}).get('images', [])\n for image in tqdm(images, desc='Downloading image files'):\n file = image.get('image_file', {}).get('file', {})\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.images.append(file_path)\n\n # Download ancillary files\n ancillary = parent_raster.get('ancillary_files', [])\n for file in tqdm(ancillary, desc='Downloading ancillary files'):\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.ancillary.append(file_path)\n\n return raster_download\n\n def search(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for geospatial entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n return list(limit_offset_pager(self.session, 'geosearch', params=params))\n\n def search_raster_stac(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n num_bands: Optional[Tuple[int, int]] = None,\n resolution: Optional[Tuple[int, int]] = None,\n cloud_cover: Optional[Tuple[float, float]] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for raster entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n num_bands: The min/max number of bands in the raster.\n resolution: The min/max resolution of the raster.\n cloud_cover: The min/max cloud coverage of the raster.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries in STAC Item format.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n\n if num_bands and len(num_bands) == 2:\n nbmin, nbmax = num_bands\n params['num_bands_min'] = nbmin\n params['num_bands_max'] = nbmax\n\n if resolution and len(resolution) == 2:\n rmin, rmax = resolution\n params['resolution_min'] = rmin\n params['resolution_max'] = rmax\n\n if cloud_cover and len(cloud_cover) == 2:\n ccmin, ccmax = cloud_cover\n params['cloud_cover_min'] = ccmin\n params['cloud_cover_max'] = ccmax\n\n return list(limit_offset_pager(self.session, 'geosearch/raster', params=params))\n", "path": "rgdc/rgdc/rgdc.py"}]} | 4,042 | 179 |
gh_patches_debug_20801 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3343 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Internal Server Error when searching with partial dates in Find a Record
## Description
<!-- A clear and concise description of what the bug is. -->
Sometimes when partially entering the date in the Record Search Field, it randomly throws internal server errors.
Sometimes it throws the intended `InvalidDateFormatAPIException` but other times it `UnboundLocalError`.
Here is a recording of the behaviour:
https://github.com/mathesar-foundation/mathesar/assets/31622972/6e7885c2-9d19-473a-86e0-8c63c5e68c0f
Also, it would be good UI/UX to show the user the date is invalid. In case when the user has just put an incorrect date time format and the server returns a 400 BAD REQUEST InvalidDateFormatAPIException. The user won't know as the table still shows the loading state.
<img width="1700" alt="image" src="https://github.com/mathesar-foundation/mathesar/assets/31622972/78fac439-c326-47ae-8189-08dc61963d75">
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
- Server should not throw 500 internal server error and always throw InvalidDateFormatAPIException in case of wrong date format.
- User should be conveyed if their input date is incorrect.
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
## Environment
- OS: macOS
- Browser: Chrome
- Other info:
## Additional context
<!-- Add any other context about the problem or screenshots here. -->
</issue>
<code>
[start of mathesar/api/db/viewsets/records.py]
1 from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat
2 from rest_access_policy import AccessViewSetMixin
3 from rest_framework import status, viewsets
4 from rest_framework.exceptions import NotFound, MethodNotAllowed
5 from rest_framework.renderers import BrowsableAPIRenderer
6 from rest_framework.response import Response
7 from rest_framework.permissions import IsAuthenticatedOrReadOnly
8 from sqlalchemy.exc import IntegrityError, DataError
9
10 from mathesar.api.db.permissions.records import RecordAccessPolicy
11 from mathesar.api.exceptions.error_codes import ErrorCodes
12 import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions
13 import mathesar.api.exceptions.generic_exceptions.base_exceptions as generic_api_exceptions
14 from db.functions.exceptions import (
15 BadDBFunctionFormat, ReferencedColumnsDontExist, UnknownDBFunctionID,
16 )
17 from db.records.exceptions import (
18 BadGroupFormat, GroupFieldNotFound, InvalidGroupType, UndefinedFunction,
19 BadSortFormat, SortFieldNotFound
20 )
21 from mathesar.api.pagination import TableLimitOffsetPagination
22 from mathesar.api.serializers.records import RecordListParameterSerializer, RecordSerializer
23 from mathesar.api.utils import get_table_or_404
24 from mathesar.functions.operations.convert import rewrite_db_function_spec_column_ids_to_names
25 from mathesar.models.base import Table
26 from mathesar.utils.json import MathesarJSONRenderer
27
28
29 class RecordViewSet(AccessViewSetMixin, viewsets.ViewSet):
30 permission_classes = [IsAuthenticatedOrReadOnly]
31 access_policy = RecordAccessPolicy
32
33 # There is no 'update' method.
34 # We're not supporting PUT requests because there aren't a lot of use cases
35 # where the entire record needs to be replaced, PATCH suffices for updates.
36 def get_queryset(self):
37 return Table.objects.all().order_by('-created_at')
38
39 renderer_classes = [MathesarJSONRenderer, BrowsableAPIRenderer]
40
41 # For filter parameter formatting, see:
42 # db/functions/operations/deserialize.py::get_db_function_from_ma_function_spec function doc>
43 # For sorting parameter formatting, see:
44 # https://github.com/centerofci/sqlalchemy-filters#sort-format
45 def list(self, request, table_pk=None):
46 paginator = TableLimitOffsetPagination()
47
48 serializer = RecordListParameterSerializer(data=request.GET)
49 serializer.is_valid(raise_exception=True)
50 table = get_table_or_404(table_pk)
51
52 filter_unprocessed = serializer.validated_data['filter']
53 order_by = serializer.validated_data['order_by']
54 grouping = serializer.validated_data['grouping']
55 search_fuzzy = serializer.validated_data['search_fuzzy']
56 filter_processed = None
57 column_names_to_ids = table.get_column_name_id_bidirectional_map()
58 column_ids_to_names = column_names_to_ids.inverse
59 if filter_unprocessed:
60 filter_processed = rewrite_db_function_spec_column_ids_to_names(
61 column_ids_to_names=column_ids_to_names,
62 spec=filter_unprocessed,
63 )
64 # Replace column id value used in the `field` property with column name
65 name_converted_group_by = None
66 if grouping:
67 group_by_columns_names = [column_ids_to_names[column_id] for column_id in grouping['columns']]
68 name_converted_group_by = {**grouping, 'columns': group_by_columns_names}
69 name_converted_order_by = [{**column, 'field': column_ids_to_names[column['field']]} for column in order_by]
70 name_converted_search = [{**column, 'column': column_ids_to_names[column['field']]} for column in search_fuzzy]
71
72 try:
73 records = paginator.paginate_queryset(
74 self.get_queryset(), request, table, column_names_to_ids,
75 filters=filter_processed,
76 order_by=name_converted_order_by,
77 grouping=name_converted_group_by,
78 search=name_converted_search,
79 duplicate_only=serializer.validated_data['duplicate_only']
80 )
81 except (BadDBFunctionFormat, UnknownDBFunctionID, ReferencedColumnsDontExist) as e:
82 raise database_api_exceptions.BadFilterAPIException(
83 e,
84 field='filters',
85 status_code=status.HTTP_400_BAD_REQUEST
86 )
87 except (BadSortFormat, SortFieldNotFound) as e:
88 raise database_api_exceptions.BadSortAPIException(
89 e,
90 field='order_by',
91 status_code=status.HTTP_400_BAD_REQUEST
92 )
93 except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e:
94 raise database_api_exceptions.BadGroupAPIException(
95 e,
96 field='grouping',
97 status_code=status.HTTP_400_BAD_REQUEST
98 )
99 except UndefinedFunction as e:
100 raise database_api_exceptions.UndefinedFunctionAPIException(
101 e,
102 details=e.args[0],
103 status_code=status.HTTP_400_BAD_REQUEST
104 )
105 except DataError as e:
106 if isinstance(e.orig, InvalidDatetimeFormat):
107 raise database_api_exceptions.InvalidDateFormatAPIException(
108 e,
109 status_code=status.HTTP_400_BAD_REQUEST,
110 )
111
112 serializer = RecordSerializer(
113 records,
114 many=True,
115 context=self.get_serializer_context(table)
116 )
117 return paginator.get_paginated_response(serializer.data)
118
119 def retrieve(self, request, pk=None, table_pk=None):
120 table = get_table_or_404(table_pk)
121 # TODO refactor to use serializer for more DRY response logic
122 paginator = TableLimitOffsetPagination()
123 record_filters = {
124 "equal": [
125 {"column_name": [table.primary_key_column_name]},
126 {"literal": [pk]}
127 ]
128 }
129 column_names_to_ids = table.get_column_name_id_bidirectional_map()
130 records = paginator.paginate_queryset(
131 table,
132 request,
133 table,
134 column_names_to_ids,
135 filters=record_filters
136 )
137 if not records:
138 raise NotFound
139 serializer = RecordSerializer(
140 records,
141 many=True,
142 context=self.get_serializer_context(table)
143 )
144 return paginator.get_paginated_response(serializer.data)
145
146 def create(self, request, table_pk=None):
147 table = get_table_or_404(table_pk)
148 primary_key_column_name = None
149 try:
150 primary_key_column_name = table.primary_key_column_name
151 except AssertionError:
152 raise generic_api_exceptions.MethodNotAllowedAPIException(
153 MethodNotAllowed,
154 error_code=ErrorCodes.MethodNotAllowed.value,
155 message="You cannot insert into tables without a primary key"
156 )
157 serializer = RecordSerializer(data=request.data, context=self.get_serializer_context(table))
158 serializer.is_valid(raise_exception=True)
159 serializer.save()
160 # TODO refactor to use serializer for more DRY response logic
161 column_name_id_map = table.get_column_name_id_bidirectional_map()
162 table_pk_column_id = column_name_id_map[primary_key_column_name]
163 pk_value = serializer.data[table_pk_column_id]
164 paginator = TableLimitOffsetPagination()
165 record_filters = {
166 "equal": [
167 {"column_name": [primary_key_column_name]},
168 {"literal": [pk_value]}
169 ]
170 }
171 column_names_to_ids = table.get_column_name_id_bidirectional_map()
172 records = paginator.paginate_queryset(
173 table,
174 request,
175 table,
176 column_names_to_ids,
177 filters=record_filters
178 )
179 serializer = RecordSerializer(
180 records,
181 many=True,
182 context=self.get_serializer_context(table)
183 )
184 response = paginator.get_paginated_response(serializer.data)
185 response.status_code = status.HTTP_201_CREATED
186 return response
187
188 def partial_update(self, request, pk=None, table_pk=None):
189 table = get_table_or_404(table_pk)
190 serializer = RecordSerializer(
191 {'id': pk},
192 data=request.data,
193 context=self.get_serializer_context(table),
194 partial=True
195 )
196 serializer.is_valid(raise_exception=True)
197 serializer.save()
198 # TODO refactor to use serializer for more DRY response logic
199 paginator = TableLimitOffsetPagination()
200 record_filters = {
201 "equal": [
202 {"column_name": [table.primary_key_column_name]},
203 {"literal": [pk]}
204 ]
205 }
206 column_names_to_ids = table.get_column_name_id_bidirectional_map()
207 records = paginator.paginate_queryset(
208 table,
209 request,
210 table,
211 column_names_to_ids,
212 filters=record_filters
213 )
214 serializer = RecordSerializer(
215 records,
216 many=True,
217 context=self.get_serializer_context(table)
218 )
219 return paginator.get_paginated_response(serializer.data)
220
221 def destroy(self, request, pk=None, table_pk=None):
222 table = get_table_or_404(table_pk)
223 if table.get_record(pk) is None:
224 raise generic_api_exceptions.NotFoundAPIException(
225 NotFound,
226 error_code=ErrorCodes.RecordNotFound.value,
227 message="Record doesn't exist"
228 )
229 try:
230 table.delete_record(pk)
231 except IntegrityError as e:
232 if isinstance(e.orig, ForeignKeyViolation):
233 raise database_api_exceptions.ForeignKeyViolationAPIException(
234 e,
235 status_code=status.HTTP_400_BAD_REQUEST,
236 referent_table=table,
237 )
238
239 return Response(status=status.HTTP_204_NO_CONTENT)
240
241 def get_serializer_context(self, table):
242 columns_map = table.get_column_name_id_bidirectional_map()
243 context = {'columns_map': columns_map, 'table': table}
244 return context
245
[end of mathesar/api/db/viewsets/records.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/api/db/viewsets/records.py b/mathesar/api/db/viewsets/records.py
--- a/mathesar/api/db/viewsets/records.py
+++ b/mathesar/api/db/viewsets/records.py
@@ -1,4 +1,4 @@
-from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat
+from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat, DatetimeFieldOverflow
from rest_access_policy import AccessViewSetMixin
from rest_framework import status, viewsets
from rest_framework.exceptions import NotFound, MethodNotAllowed
@@ -108,6 +108,16 @@
e,
status_code=status.HTTP_400_BAD_REQUEST,
)
+ elif isinstance(e.orig, DatetimeFieldOverflow):
+ raise database_api_exceptions.InvalidDateAPIException(
+ e,
+ status_code=status.HTTP_400_BAD_REQUEST,
+ )
+ else:
+ raise database_api_exceptions.MathesarAPIException(
+ e,
+ status_code=status.HTTP_400_BAD_REQUEST
+ )
serializer = RecordSerializer(
records,
| {"golden_diff": "diff --git a/mathesar/api/db/viewsets/records.py b/mathesar/api/db/viewsets/records.py\n--- a/mathesar/api/db/viewsets/records.py\n+++ b/mathesar/api/db/viewsets/records.py\n@@ -1,4 +1,4 @@\n-from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat\n+from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat, DatetimeFieldOverflow\n from rest_access_policy import AccessViewSetMixin\n from rest_framework import status, viewsets\n from rest_framework.exceptions import NotFound, MethodNotAllowed\n@@ -108,6 +108,16 @@\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n )\n+ elif isinstance(e.orig, DatetimeFieldOverflow):\n+ raise database_api_exceptions.InvalidDateAPIException(\n+ e,\n+ status_code=status.HTTP_400_BAD_REQUEST,\n+ )\n+ else:\n+ raise database_api_exceptions.MathesarAPIException(\n+ e,\n+ status_code=status.HTTP_400_BAD_REQUEST\n+ )\n \n serializer = RecordSerializer(\n records,\n", "issue": "Internal Server Error when searching with partial dates in Find a Record\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\nSometimes when partially entering the date in the Record Search Field, it randomly throws internal server errors.\r\nSometimes it throws the intended `InvalidDateFormatAPIException` but other times it `UnboundLocalError`.\r\n\r\nHere is a recording of the behaviour:\r\n\r\nhttps://github.com/mathesar-foundation/mathesar/assets/31622972/6e7885c2-9d19-473a-86e0-8c63c5e68c0f\r\n\r\n\r\nAlso, it would be good UI/UX to show the user the date is invalid. In case when the user has just put an incorrect date time format and the server returns a 400 BAD REQUEST InvalidDateFormatAPIException. The user won't know as the table still shows the loading state.\r\n<img width=\"1700\" alt=\"image\" src=\"https://github.com/mathesar-foundation/mathesar/assets/31622972/78fac439-c326-47ae-8189-08dc61963d75\">\r\n\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n- Server should not throw 500 internal server error and always throw InvalidDateFormatAPIException in case of wrong date format.\r\n- User should be conveyed if their input date is incorrect.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n\r\n## Environment\r\n - OS: macOS\r\n - Browser: Chrome\r\n - Other info:\r\n\r\n## Additional context\r\n<!-- Add any other context about the problem or screenshots here. -->\r\n\n", "before_files": [{"content": "from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat\nfrom rest_access_policy import AccessViewSetMixin\nfrom rest_framework import status, viewsets\nfrom rest_framework.exceptions import NotFound, MethodNotAllowed\nfrom rest_framework.renderers import BrowsableAPIRenderer\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\nfrom sqlalchemy.exc import IntegrityError, DataError\n\nfrom mathesar.api.db.permissions.records import RecordAccessPolicy\nfrom mathesar.api.exceptions.error_codes import ErrorCodes\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nimport mathesar.api.exceptions.generic_exceptions.base_exceptions as generic_api_exceptions\nfrom db.functions.exceptions import (\n BadDBFunctionFormat, ReferencedColumnsDontExist, UnknownDBFunctionID,\n)\nfrom db.records.exceptions import (\n BadGroupFormat, GroupFieldNotFound, InvalidGroupType, UndefinedFunction,\n BadSortFormat, SortFieldNotFound\n)\nfrom mathesar.api.pagination import TableLimitOffsetPagination\nfrom mathesar.api.serializers.records import RecordListParameterSerializer, RecordSerializer\nfrom mathesar.api.utils import get_table_or_404\nfrom mathesar.functions.operations.convert import rewrite_db_function_spec_column_ids_to_names\nfrom mathesar.models.base import Table\nfrom mathesar.utils.json import MathesarJSONRenderer\n\n\nclass RecordViewSet(AccessViewSetMixin, viewsets.ViewSet):\n permission_classes = [IsAuthenticatedOrReadOnly]\n access_policy = RecordAccessPolicy\n\n # There is no 'update' method.\n # We're not supporting PUT requests because there aren't a lot of use cases\n # where the entire record needs to be replaced, PATCH suffices for updates.\n def get_queryset(self):\n return Table.objects.all().order_by('-created_at')\n\n renderer_classes = [MathesarJSONRenderer, BrowsableAPIRenderer]\n\n # For filter parameter formatting, see:\n # db/functions/operations/deserialize.py::get_db_function_from_ma_function_spec function doc>\n # For sorting parameter formatting, see:\n # https://github.com/centerofci/sqlalchemy-filters#sort-format\n def list(self, request, table_pk=None):\n paginator = TableLimitOffsetPagination()\n\n serializer = RecordListParameterSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n table = get_table_or_404(table_pk)\n\n filter_unprocessed = serializer.validated_data['filter']\n order_by = serializer.validated_data['order_by']\n grouping = serializer.validated_data['grouping']\n search_fuzzy = serializer.validated_data['search_fuzzy']\n filter_processed = None\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n column_ids_to_names = column_names_to_ids.inverse\n if filter_unprocessed:\n filter_processed = rewrite_db_function_spec_column_ids_to_names(\n column_ids_to_names=column_ids_to_names,\n spec=filter_unprocessed,\n )\n # Replace column id value used in the `field` property with column name\n name_converted_group_by = None\n if grouping:\n group_by_columns_names = [column_ids_to_names[column_id] for column_id in grouping['columns']]\n name_converted_group_by = {**grouping, 'columns': group_by_columns_names}\n name_converted_order_by = [{**column, 'field': column_ids_to_names[column['field']]} for column in order_by]\n name_converted_search = [{**column, 'column': column_ids_to_names[column['field']]} for column in search_fuzzy]\n\n try:\n records = paginator.paginate_queryset(\n self.get_queryset(), request, table, column_names_to_ids,\n filters=filter_processed,\n order_by=name_converted_order_by,\n grouping=name_converted_group_by,\n search=name_converted_search,\n duplicate_only=serializer.validated_data['duplicate_only']\n )\n except (BadDBFunctionFormat, UnknownDBFunctionID, ReferencedColumnsDontExist) as e:\n raise database_api_exceptions.BadFilterAPIException(\n e,\n field='filters',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (BadSortFormat, SortFieldNotFound) as e:\n raise database_api_exceptions.BadSortAPIException(\n e,\n field='order_by',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e:\n raise database_api_exceptions.BadGroupAPIException(\n e,\n field='grouping',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except UndefinedFunction as e:\n raise database_api_exceptions.UndefinedFunctionAPIException(\n e,\n details=e.args[0],\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except DataError as e:\n if isinstance(e.orig, InvalidDatetimeFormat):\n raise database_api_exceptions.InvalidDateFormatAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n )\n\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n return paginator.get_paginated_response(serializer.data)\n\n def retrieve(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n # TODO refactor to use serializer for more DRY response logic\n paginator = TableLimitOffsetPagination()\n record_filters = {\n \"equal\": [\n {\"column_name\": [table.primary_key_column_name]},\n {\"literal\": [pk]}\n ]\n }\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n records = paginator.paginate_queryset(\n table,\n request,\n table,\n column_names_to_ids,\n filters=record_filters\n )\n if not records:\n raise NotFound\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n return paginator.get_paginated_response(serializer.data)\n\n def create(self, request, table_pk=None):\n table = get_table_or_404(table_pk)\n primary_key_column_name = None\n try:\n primary_key_column_name = table.primary_key_column_name\n except AssertionError:\n raise generic_api_exceptions.MethodNotAllowedAPIException(\n MethodNotAllowed,\n error_code=ErrorCodes.MethodNotAllowed.value,\n message=\"You cannot insert into tables without a primary key\"\n )\n serializer = RecordSerializer(data=request.data, context=self.get_serializer_context(table))\n serializer.is_valid(raise_exception=True)\n serializer.save()\n # TODO refactor to use serializer for more DRY response logic\n column_name_id_map = table.get_column_name_id_bidirectional_map()\n table_pk_column_id = column_name_id_map[primary_key_column_name]\n pk_value = serializer.data[table_pk_column_id]\n paginator = TableLimitOffsetPagination()\n record_filters = {\n \"equal\": [\n {\"column_name\": [primary_key_column_name]},\n {\"literal\": [pk_value]}\n ]\n }\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n records = paginator.paginate_queryset(\n table,\n request,\n table,\n column_names_to_ids,\n filters=record_filters\n )\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n response = paginator.get_paginated_response(serializer.data)\n response.status_code = status.HTTP_201_CREATED\n return response\n\n def partial_update(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n serializer = RecordSerializer(\n {'id': pk},\n data=request.data,\n context=self.get_serializer_context(table),\n partial=True\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n # TODO refactor to use serializer for more DRY response logic\n paginator = TableLimitOffsetPagination()\n record_filters = {\n \"equal\": [\n {\"column_name\": [table.primary_key_column_name]},\n {\"literal\": [pk]}\n ]\n }\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n records = paginator.paginate_queryset(\n table,\n request,\n table,\n column_names_to_ids,\n filters=record_filters\n )\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n return paginator.get_paginated_response(serializer.data)\n\n def destroy(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n if table.get_record(pk) is None:\n raise generic_api_exceptions.NotFoundAPIException(\n NotFound,\n error_code=ErrorCodes.RecordNotFound.value,\n message=\"Record doesn't exist\"\n )\n try:\n table.delete_record(pk)\n except IntegrityError as e:\n if isinstance(e.orig, ForeignKeyViolation):\n raise database_api_exceptions.ForeignKeyViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n referent_table=table,\n )\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def get_serializer_context(self, table):\n columns_map = table.get_column_name_id_bidirectional_map()\n context = {'columns_map': columns_map, 'table': table}\n return context\n", "path": "mathesar/api/db/viewsets/records.py"}]} | 3,527 | 236 |
gh_patches_debug_54112 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1639 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Some translation strings missing/not applied
**Describe the bug**
Some translations are not being applied.
**To Reproduce**
change Bookwyrm language to other than English (tested with Lithuanian)
https://ziurkes.group.lt/user/athinkingmeat/books/read
https://ziurkes.group.lt/user/athinkingmeat/books/reading
https://ziurkes.group.lt/user/athinkingmeat/books/to-read
**Expected behavior**
All these links should have "read", "currently reading" and "to read" strings translated, but they are shown in English
**Screenshots**



**Instance**
https://ziurkes.group.lt/
**Additional context**
Probably is a problem with other languages as well
</issue>
<code>
[start of bookwyrm/utils/isni.py]
1 """ISNI author checking utilities"""
2 import xml.etree.ElementTree as ET
3 import requests
4
5 from bookwyrm import activitypub, models
6
7
8 def request_isni_data(search_index, search_term, max_records=5):
9 """Request data from the ISNI API"""
10
11 search_string = f'{search_index}="{search_term}"'
12 query_params = {
13 "query": search_string,
14 "version": "1.1",
15 "operation": "searchRetrieve",
16 "recordSchema": "isni-b",
17 "maximumRecords": max_records,
18 "startRecord": "1",
19 "recordPacking": "xml",
20 "sortKeys": "RLV,pica,0,,",
21 }
22 result = requests.get("http://isni.oclc.org/sru/", params=query_params, timeout=10)
23 # the OCLC ISNI server asserts the payload is encoded
24 # in latin1, but we know better
25 result.encoding = "utf-8"
26 return result.text
27
28
29 def make_name_string(element):
30 """create a string of form 'personal_name surname'"""
31
32 # NOTE: this will often be incorrect, many naming systems
33 # list "surname" before personal name
34 forename = element.find(".//forename")
35 surname = element.find(".//surname")
36 if forename is not None:
37 return "".join([forename.text, " ", surname.text])
38 return surname.text
39
40
41 def get_other_identifier(element, code):
42 """Get other identifiers associated with an author from their ISNI record"""
43
44 identifiers = element.findall(".//otherIdentifierOfIdentity")
45 for section_head in identifiers:
46 if (
47 section_head.find(".//type") is not None
48 and section_head.find(".//type").text == code
49 and section_head.find(".//identifier") is not None
50 ):
51 return section_head.find(".//identifier").text
52
53 # if we can't find it in otherIdentifierOfIdentity,
54 # try sources
55 for source in element.findall(".//sources"):
56 code_of_source = source.find(".//codeOfSource")
57 if code_of_source is not None and code_of_source.text.lower() == code.lower():
58 return source.find(".//sourceIdentifier").text
59
60 return ""
61
62
63 def get_external_information_uri(element, match_string):
64 """Get URLs associated with an author from their ISNI record"""
65
66 sources = element.findall(".//externalInformation")
67 for source in sources:
68 information = source.find(".//information")
69 uri = source.find(".//URI")
70 if (
71 uri is not None
72 and information is not None
73 and information.text.lower() == match_string.lower()
74 ):
75 return uri.text
76 return ""
77
78
79 def find_authors_by_name(name_string, description=False):
80 """Query the ISNI database for possible author matches by name"""
81
82 payload = request_isni_data("pica.na", name_string)
83 # parse xml
84 root = ET.fromstring(payload)
85 # build list of possible authors
86 possible_authors = []
87 for element in root.iter("responseRecord"):
88 personal_name = element.find(".//forename/..")
89 if not personal_name:
90 continue
91
92 author = get_author_from_isni(element.find(".//isniUnformatted").text)
93
94 if bool(description):
95
96 titles = []
97 # prefer title records from LoC+ coop, Australia, Ireland, or Singapore
98 # in that order
99 for source in ["LCNACO", "NLA", "N6I", "NLB"]:
100 for parent in element.findall(f'.//titleOfWork/[@source="{source}"]'):
101 titles.append(parent.find(".//title"))
102 for parent in element.findall(f'.//titleOfWork[@subsource="{source}"]'):
103 titles.append(parent.find(".//title"))
104 # otherwise just grab the first title listing
105 titles.append(element.find(".//title"))
106
107 if titles is not None:
108 # some of the "titles" in ISNI are a little ...iffy
109 # '@' is used by ISNI/OCLC to index the starting point ignoring stop words
110 # (e.g. "The @Government of no one")
111 title_elements = [
112 e for e in titles if not e.text.replace("@", "").isnumeric()
113 ]
114 if len(title_elements):
115 author.bio = title_elements[0].text.replace("@", "")
116 else:
117 author.bio = None
118
119 possible_authors.append(author)
120
121 return possible_authors
122
123
124 def get_author_from_isni(isni):
125 """Find data to populate a new author record from their ISNI"""
126
127 payload = request_isni_data("pica.isn", isni)
128 # parse xml
129 root = ET.fromstring(payload)
130 # there should only be a single responseRecord
131 # but let's use the first one just in case
132 element = root.find(".//responseRecord")
133 name = make_name_string(element.find(".//forename/.."))
134 viaf = get_other_identifier(element, "viaf")
135 # use a set to dedupe aliases in ISNI
136 aliases = set()
137 aliases_element = element.findall(".//personalNameVariant")
138 for entry in aliases_element:
139 aliases.add(make_name_string(entry))
140 # aliases needs to be list not set
141 aliases = list(aliases)
142 bio = element.find(".//nameTitle")
143 bio = bio.text if bio is not None else ""
144 wikipedia = get_external_information_uri(element, "Wikipedia")
145
146 author = activitypub.Author(
147 id=element.find(".//isniURI").text,
148 name=name,
149 isni=isni,
150 viafId=viaf,
151 aliases=aliases,
152 bio=bio,
153 wikipediaLink=wikipedia,
154 )
155
156 return author
157
158
159 def build_author_from_isni(match_value):
160 """Build basic author class object from ISNI URL"""
161
162 # if it is an isni value get the data
163 if match_value.startswith("https://isni.org/isni/"):
164 isni = match_value.replace("https://isni.org/isni/", "")
165 return {"author": get_author_from_isni(isni)}
166 # otherwise it's a name string
167 return {}
168
169
170 def augment_author_metadata(author, isni):
171 """Update any missing author fields from ISNI data"""
172
173 isni_author = get_author_from_isni(isni)
174 isni_author.to_model(model=models.Author, instance=author, overwrite=False)
175
176 # we DO want to overwrite aliases because we're adding them to the
177 # existing aliases and ISNI will usually have more.
178 # We need to dedupe because ISNI records often have lots of dupe aliases
179 aliases = set(isni_author.aliases)
180 for alias in author.aliases:
181 aliases.add(alias)
182 author.aliases = list(aliases)
183 author.save()
184
[end of bookwyrm/utils/isni.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/utils/isni.py b/bookwyrm/utils/isni.py
--- a/bookwyrm/utils/isni.py
+++ b/bookwyrm/utils/isni.py
@@ -19,7 +19,7 @@
"recordPacking": "xml",
"sortKeys": "RLV,pica,0,,",
}
- result = requests.get("http://isni.oclc.org/sru/", params=query_params, timeout=10)
+ result = requests.get("http://isni.oclc.org/sru/", params=query_params, timeout=15)
# the OCLC ISNI server asserts the payload is encoded
# in latin1, but we know better
result.encoding = "utf-8"
| {"golden_diff": "diff --git a/bookwyrm/utils/isni.py b/bookwyrm/utils/isni.py\n--- a/bookwyrm/utils/isni.py\n+++ b/bookwyrm/utils/isni.py\n@@ -19,7 +19,7 @@\n \"recordPacking\": \"xml\",\n \"sortKeys\": \"RLV,pica,0,,\",\n }\n- result = requests.get(\"http://isni.oclc.org/sru/\", params=query_params, timeout=10)\n+ result = requests.get(\"http://isni.oclc.org/sru/\", params=query_params, timeout=15)\n # the OCLC ISNI server asserts the payload is encoded\n # in latin1, but we know better\n result.encoding = \"utf-8\"\n", "issue": "Some translation strings missing/not applied\n**Describe the bug**\r\nSome translations are not being applied.\r\n\r\n**To Reproduce**\r\n\r\nchange Bookwyrm language to other than English (tested with Lithuanian)\r\n\r\nhttps://ziurkes.group.lt/user/athinkingmeat/books/read\r\nhttps://ziurkes.group.lt/user/athinkingmeat/books/reading\r\nhttps://ziurkes.group.lt/user/athinkingmeat/books/to-read\r\n\r\n**Expected behavior**\r\nAll these links should have \"read\", \"currently reading\" and \"to read\" strings translated, but they are shown in English\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n**Instance**\r\n\r\nhttps://ziurkes.group.lt/\r\n\r\n**Additional context**\r\nProbably is a problem with other languages as well\r\n\n", "before_files": [{"content": "\"\"\"ISNI author checking utilities\"\"\"\nimport xml.etree.ElementTree as ET\nimport requests\n\nfrom bookwyrm import activitypub, models\n\n\ndef request_isni_data(search_index, search_term, max_records=5):\n \"\"\"Request data from the ISNI API\"\"\"\n\n search_string = f'{search_index}=\"{search_term}\"'\n query_params = {\n \"query\": search_string,\n \"version\": \"1.1\",\n \"operation\": \"searchRetrieve\",\n \"recordSchema\": \"isni-b\",\n \"maximumRecords\": max_records,\n \"startRecord\": \"1\",\n \"recordPacking\": \"xml\",\n \"sortKeys\": \"RLV,pica,0,,\",\n }\n result = requests.get(\"http://isni.oclc.org/sru/\", params=query_params, timeout=10)\n # the OCLC ISNI server asserts the payload is encoded\n # in latin1, but we know better\n result.encoding = \"utf-8\"\n return result.text\n\n\ndef make_name_string(element):\n \"\"\"create a string of form 'personal_name surname'\"\"\"\n\n # NOTE: this will often be incorrect, many naming systems\n # list \"surname\" before personal name\n forename = element.find(\".//forename\")\n surname = element.find(\".//surname\")\n if forename is not None:\n return \"\".join([forename.text, \" \", surname.text])\n return surname.text\n\n\ndef get_other_identifier(element, code):\n \"\"\"Get other identifiers associated with an author from their ISNI record\"\"\"\n\n identifiers = element.findall(\".//otherIdentifierOfIdentity\")\n for section_head in identifiers:\n if (\n section_head.find(\".//type\") is not None\n and section_head.find(\".//type\").text == code\n and section_head.find(\".//identifier\") is not None\n ):\n return section_head.find(\".//identifier\").text\n\n # if we can't find it in otherIdentifierOfIdentity,\n # try sources\n for source in element.findall(\".//sources\"):\n code_of_source = source.find(\".//codeOfSource\")\n if code_of_source is not None and code_of_source.text.lower() == code.lower():\n return source.find(\".//sourceIdentifier\").text\n\n return \"\"\n\n\ndef get_external_information_uri(element, match_string):\n \"\"\"Get URLs associated with an author from their ISNI record\"\"\"\n\n sources = element.findall(\".//externalInformation\")\n for source in sources:\n information = source.find(\".//information\")\n uri = source.find(\".//URI\")\n if (\n uri is not None\n and information is not None\n and information.text.lower() == match_string.lower()\n ):\n return uri.text\n return \"\"\n\n\ndef find_authors_by_name(name_string, description=False):\n \"\"\"Query the ISNI database for possible author matches by name\"\"\"\n\n payload = request_isni_data(\"pica.na\", name_string)\n # parse xml\n root = ET.fromstring(payload)\n # build list of possible authors\n possible_authors = []\n for element in root.iter(\"responseRecord\"):\n personal_name = element.find(\".//forename/..\")\n if not personal_name:\n continue\n\n author = get_author_from_isni(element.find(\".//isniUnformatted\").text)\n\n if bool(description):\n\n titles = []\n # prefer title records from LoC+ coop, Australia, Ireland, or Singapore\n # in that order\n for source in [\"LCNACO\", \"NLA\", \"N6I\", \"NLB\"]:\n for parent in element.findall(f'.//titleOfWork/[@source=\"{source}\"]'):\n titles.append(parent.find(\".//title\"))\n for parent in element.findall(f'.//titleOfWork[@subsource=\"{source}\"]'):\n titles.append(parent.find(\".//title\"))\n # otherwise just grab the first title listing\n titles.append(element.find(\".//title\"))\n\n if titles is not None:\n # some of the \"titles\" in ISNI are a little ...iffy\n # '@' is used by ISNI/OCLC to index the starting point ignoring stop words\n # (e.g. \"The @Government of no one\")\n title_elements = [\n e for e in titles if not e.text.replace(\"@\", \"\").isnumeric()\n ]\n if len(title_elements):\n author.bio = title_elements[0].text.replace(\"@\", \"\")\n else:\n author.bio = None\n\n possible_authors.append(author)\n\n return possible_authors\n\n\ndef get_author_from_isni(isni):\n \"\"\"Find data to populate a new author record from their ISNI\"\"\"\n\n payload = request_isni_data(\"pica.isn\", isni)\n # parse xml\n root = ET.fromstring(payload)\n # there should only be a single responseRecord\n # but let's use the first one just in case\n element = root.find(\".//responseRecord\")\n name = make_name_string(element.find(\".//forename/..\"))\n viaf = get_other_identifier(element, \"viaf\")\n # use a set to dedupe aliases in ISNI\n aliases = set()\n aliases_element = element.findall(\".//personalNameVariant\")\n for entry in aliases_element:\n aliases.add(make_name_string(entry))\n # aliases needs to be list not set\n aliases = list(aliases)\n bio = element.find(\".//nameTitle\")\n bio = bio.text if bio is not None else \"\"\n wikipedia = get_external_information_uri(element, \"Wikipedia\")\n\n author = activitypub.Author(\n id=element.find(\".//isniURI\").text,\n name=name,\n isni=isni,\n viafId=viaf,\n aliases=aliases,\n bio=bio,\n wikipediaLink=wikipedia,\n )\n\n return author\n\n\ndef build_author_from_isni(match_value):\n \"\"\"Build basic author class object from ISNI URL\"\"\"\n\n # if it is an isni value get the data\n if match_value.startswith(\"https://isni.org/isni/\"):\n isni = match_value.replace(\"https://isni.org/isni/\", \"\")\n return {\"author\": get_author_from_isni(isni)}\n # otherwise it's a name string\n return {}\n\n\ndef augment_author_metadata(author, isni):\n \"\"\"Update any missing author fields from ISNI data\"\"\"\n\n isni_author = get_author_from_isni(isni)\n isni_author.to_model(model=models.Author, instance=author, overwrite=False)\n\n # we DO want to overwrite aliases because we're adding them to the\n # existing aliases and ISNI will usually have more.\n # We need to dedupe because ISNI records often have lots of dupe aliases\n aliases = set(isni_author.aliases)\n for alias in author.aliases:\n aliases.add(alias)\n author.aliases = list(aliases)\n author.save()\n", "path": "bookwyrm/utils/isni.py"}]} | 2,798 | 164 |
gh_patches_debug_11968 | rasdani/github-patches | git_diff | getredash__redash-1484 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong sort for Date column
It happens if sort by date column with UI.
Screenshot shows everything.
<img width="544" alt="2016-12-06 14 37 07" src="https://cloud.githubusercontent.com/assets/7091907/20924299/dea511b4-bbc1-11e6-8ffd-2bdda4bcbbd8.png">
</issue>
<code>
[start of redash/handlers/queries.py]
1 from itertools import chain
2
3 import sqlparse
4 from flask import jsonify, request
5 from flask_login import login_required
6 from flask_restful import abort
7 from funcy import distinct, take
8 from sqlalchemy.orm.exc import StaleDataError
9
10 from redash import models
11 from redash.handlers.base import (BaseResource, get_object_or_404,
12 org_scoped_rule, paginate, routes)
13 from redash.handlers.query_results import run_query
14 from redash.permissions import (can_modify, not_view_only, require_access,
15 require_admin_or_owner,
16 require_object_modify_permission,
17 require_permission, view_only)
18 from redash.utils import collect_parameters_from_request
19
20
21 @routes.route(org_scoped_rule('/api/queries/format'), methods=['POST'])
22 @login_required
23 def format_sql_query(org_slug=None):
24 arguments = request.get_json(force=True)
25 query = arguments.get("query", "")
26
27 return jsonify({'query': sqlparse.format(query, reindent=True, keyword_case='upper')})
28
29
30 class QuerySearchResource(BaseResource):
31 @require_permission('view_query')
32 def get(self):
33 term = request.args.get('q', '')
34
35 return [q.to_dict(with_last_modified_by=False) for q in models.Query.search(term, self.current_user.group_ids)]
36
37
38 class QueryRecentResource(BaseResource):
39 @require_permission('view_query')
40 def get(self):
41 queries = models.Query.recent(self.current_user.group_ids, self.current_user.id)
42 recent = [d.to_dict(with_last_modified_by=False) for d in queries]
43
44 global_recent = []
45 if len(recent) < 10:
46 global_recent = [d.to_dict(with_last_modified_by=False) for d in models.Query.recent(self.current_user.group_ids)]
47
48 return take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))
49
50
51 class QueryListResource(BaseResource):
52 @require_permission('create_query')
53 def post(self):
54 query_def = request.get_json(force=True)
55 data_source = models.DataSource.get_by_id_and_org(query_def.pop('data_source_id'), self.current_org)
56 require_access(data_source.groups, self.current_user, not_view_only)
57
58 for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:
59 query_def.pop(field, None)
60
61 # If we already executed this query, save the query result reference
62 if 'latest_query_data_id' in query_def:
63 query_def['latest_query_data'] = query_def.pop('latest_query_data_id')
64
65 query_def['query_text'] = query_def.pop('query')
66 query_def['user'] = self.current_user
67 query_def['data_source'] = data_source
68 query_def['org'] = self.current_org
69 query_def['is_draft'] = True
70 query = models.Query.create(**query_def)
71 models.db.session.add(query)
72 models.db.session.commit()
73
74 self.record_event({
75 'action': 'create',
76 'object_id': query.id,
77 'object_type': 'query'
78 })
79
80 return query.to_dict()
81
82 @require_permission('view_query')
83 def get(self):
84 results = models.Query.all_queries(self.current_user.group_ids)
85 page = request.args.get('page', 1, type=int)
86 page_size = request.args.get('page_size', 25, type=int)
87 return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))
88
89
90 class MyQueriesResource(BaseResource):
91 @require_permission('view_query')
92 def get(self):
93 drafts = request.args.get('drafts') is not None
94 results = models.Query.by_user(self.current_user, drafts)
95 page = request.args.get('page', 1, type=int)
96 page_size = request.args.get('page_size', 25, type=int)
97 return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))
98
99
100 class QueryResource(BaseResource):
101 @require_permission('edit_query')
102 def post(self, query_id):
103 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
104 query_def = request.get_json(force=True)
105
106 require_object_modify_permission(query, self.current_user)
107
108 for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by', 'org']:
109 query_def.pop(field, None)
110
111 if 'query' in query_def:
112 query_def['query_text'] = query_def.pop('query')
113
114 query_def['last_modified_by'] = self.current_user
115 query_def['changed_by'] = self.current_user
116 # SQLAlchemy handles the case where a concurrent transaction beats us
117 # to the update. But we still have to make sure that we're not starting
118 # out behind.
119 if 'version' in query_def and query_def['version'] != query.version:
120 abort(409)
121
122 try:
123 self.update_model(query, query_def)
124 models.db.session.commit()
125 except StaleDataError:
126 abort(409)
127
128 result = query.to_dict(with_visualizations=True)
129 return result
130
131 @require_permission('view_query')
132 def get(self, query_id):
133 q = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
134 require_access(q.groups, self.current_user, view_only)
135
136 result = q.to_dict(with_visualizations=True)
137 result['can_edit'] = can_modify(q, self.current_user)
138 return result
139
140 # TODO: move to resource of its own? (POST /queries/{id}/archive)
141 def delete(self, query_id):
142 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
143 require_admin_or_owner(query.user_id)
144 query.archive(self.current_user)
145
146
147 class QueryForkResource(BaseResource):
148 @require_permission('edit_query')
149 def post(self, query_id):
150 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
151 forked_query = query.fork(self.current_user)
152 return forked_query.to_dict(with_visualizations=True)
153
154
155 class QueryRefreshResource(BaseResource):
156 def post(self, query_id):
157 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
158 require_access(query.groups, self.current_user, not_view_only)
159
160 parameter_values = collect_parameters_from_request(request.args)
161
162 return run_query(query.data_source, parameter_values, query.query_text, query.id)
163
[end of redash/handlers/queries.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py
--- a/redash/handlers/queries.py
+++ b/redash/handlers/queries.py
@@ -58,10 +58,6 @@
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:
query_def.pop(field, None)
- # If we already executed this query, save the query result reference
- if 'latest_query_data_id' in query_def:
- query_def['latest_query_data'] = query_def.pop('latest_query_data_id')
-
query_def['query_text'] = query_def.pop('query')
query_def['user'] = self.current_user
query_def['data_source'] = data_source
| {"golden_diff": "diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py\n--- a/redash/handlers/queries.py\n+++ b/redash/handlers/queries.py\n@@ -58,10 +58,6 @@\n for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:\n query_def.pop(field, None)\n \n- # If we already executed this query, save the query result reference\n- if 'latest_query_data_id' in query_def:\n- query_def['latest_query_data'] = query_def.pop('latest_query_data_id')\n-\n query_def['query_text'] = query_def.pop('query')\n query_def['user'] = self.current_user\n query_def['data_source'] = data_source\n", "issue": "Wrong sort for Date column\nIt happens if sort by date column with UI.\r\n\r\nScreenshot shows everything. \r\n\r\n<img width=\"544\" alt=\"2016-12-06 14 37 07\" src=\"https://cloud.githubusercontent.com/assets/7091907/20924299/dea511b4-bbc1-11e6-8ffd-2bdda4bcbbd8.png\">\r\n\n", "before_files": [{"content": "from itertools import chain\n\nimport sqlparse\nfrom flask import jsonify, request\nfrom flask_login import login_required\nfrom flask_restful import abort\nfrom funcy import distinct, take\nfrom sqlalchemy.orm.exc import StaleDataError\n\nfrom redash import models\nfrom redash.handlers.base import (BaseResource, get_object_or_404,\n org_scoped_rule, paginate, routes)\nfrom redash.handlers.query_results import run_query\nfrom redash.permissions import (can_modify, not_view_only, require_access,\n require_admin_or_owner,\n require_object_modify_permission,\n require_permission, view_only)\nfrom redash.utils import collect_parameters_from_request\n\n\[email protected](org_scoped_rule('/api/queries/format'), methods=['POST'])\n@login_required\ndef format_sql_query(org_slug=None):\n arguments = request.get_json(force=True)\n query = arguments.get(\"query\", \"\")\n\n return jsonify({'query': sqlparse.format(query, reindent=True, keyword_case='upper')})\n\n\nclass QuerySearchResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n term = request.args.get('q', '')\n\n return [q.to_dict(with_last_modified_by=False) for q in models.Query.search(term, self.current_user.group_ids)]\n\n\nclass QueryRecentResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n queries = models.Query.recent(self.current_user.group_ids, self.current_user.id)\n recent = [d.to_dict(with_last_modified_by=False) for d in queries]\n\n global_recent = []\n if len(recent) < 10:\n global_recent = [d.to_dict(with_last_modified_by=False) for d in models.Query.recent(self.current_user.group_ids)]\n\n return take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))\n\n\nclass QueryListResource(BaseResource):\n @require_permission('create_query')\n def post(self):\n query_def = request.get_json(force=True)\n data_source = models.DataSource.get_by_id_and_org(query_def.pop('data_source_id'), self.current_org)\n require_access(data_source.groups, self.current_user, not_view_only)\n\n for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:\n query_def.pop(field, None)\n\n # If we already executed this query, save the query result reference\n if 'latest_query_data_id' in query_def:\n query_def['latest_query_data'] = query_def.pop('latest_query_data_id')\n\n query_def['query_text'] = query_def.pop('query')\n query_def['user'] = self.current_user\n query_def['data_source'] = data_source\n query_def['org'] = self.current_org\n query_def['is_draft'] = True\n query = models.Query.create(**query_def)\n models.db.session.add(query)\n models.db.session.commit()\n\n self.record_event({\n 'action': 'create',\n 'object_id': query.id,\n 'object_type': 'query'\n })\n\n return query.to_dict()\n\n @require_permission('view_query')\n def get(self):\n results = models.Query.all_queries(self.current_user.group_ids)\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))\n\n\nclass MyQueriesResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n drafts = request.args.get('drafts') is not None\n results = models.Query.by_user(self.current_user, drafts)\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))\n\n\nclass QueryResource(BaseResource):\n @require_permission('edit_query')\n def post(self, query_id):\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n query_def = request.get_json(force=True)\n\n require_object_modify_permission(query, self.current_user)\n\n for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by', 'org']:\n query_def.pop(field, None)\n\n if 'query' in query_def:\n query_def['query_text'] = query_def.pop('query')\n\n query_def['last_modified_by'] = self.current_user\n query_def['changed_by'] = self.current_user\n # SQLAlchemy handles the case where a concurrent transaction beats us\n # to the update. But we still have to make sure that we're not starting\n # out behind.\n if 'version' in query_def and query_def['version'] != query.version:\n abort(409)\n\n try:\n self.update_model(query, query_def)\n models.db.session.commit()\n except StaleDataError:\n abort(409)\n\n result = query.to_dict(with_visualizations=True)\n return result\n\n @require_permission('view_query')\n def get(self, query_id):\n q = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(q.groups, self.current_user, view_only)\n\n result = q.to_dict(with_visualizations=True)\n result['can_edit'] = can_modify(q, self.current_user)\n return result\n\n # TODO: move to resource of its own? (POST /queries/{id}/archive)\n def delete(self, query_id):\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_admin_or_owner(query.user_id)\n query.archive(self.current_user)\n\n\nclass QueryForkResource(BaseResource):\n @require_permission('edit_query')\n def post(self, query_id):\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n forked_query = query.fork(self.current_user)\n return forked_query.to_dict(with_visualizations=True)\n\n\nclass QueryRefreshResource(BaseResource):\n def post(self, query_id):\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(query.groups, self.current_user, not_view_only)\n\n parameter_values = collect_parameters_from_request(request.args)\n\n return run_query(query.data_source, parameter_values, query.query_text, query.id)\n", "path": "redash/handlers/queries.py"}]} | 2,484 | 180 |
gh_patches_debug_27380 | rasdani/github-patches | git_diff | deis__deis-2991 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
deis blocks domain creation under improper circumstances
`$ deis domains:add django.paas-sandbox`
`Adding django.paas-sandbox to rubber-undertow... 400 BAD REQUEST`
`{u'domain': [u'Hostname does not look like a valid hostname. Only lowercase characters are allowed.']}`
deis will not let me create a domain where the top of the domain is "paas-sandbox", as displayed above, however this is not a top level domain nor is it intended as one. Our corporation uses an internal domain that is set as the dns search on all corporate computers. For example, if someone types `http://www/` in their browser, it will come up because their dns searches `corp.example.com` for non qualifying domains. They are really going to the site `www.corp.example.com` but all they type and all the host header on the server sees is "www". deis requires the domain to exist how the user will display it in the browser in order to render the page. Our DNS system is almost identical to Active Directory so anyone using Active Directory can relate but this is not a MS / Active Directory specific scenario. This can be done using only dhcp and most ISP's will do this from their dhcp as well.
Anyways, please fix. Thank you.
</issue>
<code>
[start of controller/api/serializers.py]
1 """
2 Classes to serialize the RESTful representation of Deis API models.
3 """
4
5 from __future__ import unicode_literals
6
7 import json
8 import re
9
10 from django.conf import settings
11 from django.contrib.auth.models import User
12 from django.utils import timezone
13 from rest_framework import serializers
14 from rest_framework.validators import UniqueTogetherValidator
15
16 from api import models
17
18
19 PROCTYPE_MATCH = re.compile(r'^(?P<type>[a-z]+)')
20 MEMLIMIT_MATCH = re.compile(r'^(?P<mem>[0-9]+[BbKkMmGg])$')
21 CPUSHARE_MATCH = re.compile(r'^(?P<cpu>[0-9]+)$')
22 TAGKEY_MATCH = re.compile(r'^[a-z]+$')
23 TAGVAL_MATCH = re.compile(r'^\w+$')
24
25
26 class JSONFieldSerializer(serializers.Field):
27 def to_representation(self, obj):
28 return obj
29
30 def to_internal_value(self, data):
31 try:
32 val = json.loads(data)
33 except TypeError:
34 val = data
35 return val
36
37
38 class ModelSerializer(serializers.ModelSerializer):
39
40 uuid = serializers.ReadOnlyField()
41
42 def get_validators(self):
43 """
44 Hack to remove DRF's UniqueTogetherValidator when it concerns the UUID.
45
46 See https://github.com/deis/deis/pull/2898#discussion_r23105147
47 """
48 validators = super(ModelSerializer, self).get_validators()
49 for v in validators:
50 if isinstance(v, UniqueTogetherValidator) and 'uuid' in v.fields:
51 validators.remove(v)
52 return validators
53
54
55 class UserSerializer(serializers.ModelSerializer):
56 class Meta:
57 model = User
58 fields = ['email', 'username', 'password', 'first_name', 'last_name', 'is_superuser',
59 'is_staff', 'groups', 'user_permissions', 'last_login', 'date_joined',
60 'is_active']
61 read_only_fields = ['is_superuser', 'is_staff', 'groups',
62 'user_permissions', 'last_login', 'date_joined', 'is_active']
63 extra_kwargs = {'password': {'write_only': True}}
64
65 def create(self, validated_data):
66 now = timezone.now()
67 user = User(
68 email=validated_data.get('email'),
69 username=validated_data.get('username'),
70 last_login=now,
71 date_joined=now,
72 is_active=True
73 )
74 if validated_data.get('first_name'):
75 user.first_name = validated_data['first_name']
76 if validated_data.get('last_name'):
77 user.last_name = validated_data['last_name']
78 user.set_password(validated_data['password'])
79 # Make the first signup an admin / superuser
80 if not User.objects.filter(is_superuser=True).exists():
81 user.is_superuser = user.is_staff = True
82 user.save()
83 return user
84
85
86 class AdminUserSerializer(serializers.ModelSerializer):
87 """Serialize admin status for a User model."""
88
89 class Meta:
90 model = User
91 fields = ['username', 'is_superuser']
92 read_only_fields = ['username']
93
94
95 class AppSerializer(ModelSerializer):
96 """Serialize a :class:`~api.models.App` model."""
97
98 owner = serializers.ReadOnlyField(source='owner.username')
99 structure = JSONFieldSerializer(required=False)
100 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
101 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
102
103 class Meta:
104 """Metadata options for a :class:`AppSerializer`."""
105 model = models.App
106 fields = ['uuid', 'id', 'owner', 'url', 'structure', 'created', 'updated']
107 read_only_fields = ['uuid']
108
109
110 class BuildSerializer(ModelSerializer):
111 """Serialize a :class:`~api.models.Build` model."""
112
113 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
114 owner = serializers.ReadOnlyField(source='owner.username')
115 procfile = JSONFieldSerializer(required=False)
116 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
117 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
118
119 class Meta:
120 """Metadata options for a :class:`BuildSerializer`."""
121 model = models.Build
122 fields = ['owner', 'app', 'image', 'sha', 'procfile', 'dockerfile', 'created',
123 'updated', 'uuid']
124 read_only_fields = ['uuid']
125
126
127 class ConfigSerializer(ModelSerializer):
128 """Serialize a :class:`~api.models.Config` model."""
129
130 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
131 owner = serializers.ReadOnlyField(source='owner.username')
132 values = JSONFieldSerializer(required=False)
133 memory = JSONFieldSerializer(required=False)
134 cpu = JSONFieldSerializer(required=False)
135 tags = JSONFieldSerializer(required=False)
136 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
137 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
138
139 class Meta:
140 """Metadata options for a :class:`ConfigSerializer`."""
141 model = models.Config
142
143 def validate_memory(self, value):
144 for k, v in value.items():
145 if v is None: # use NoneType to unset a value
146 continue
147 if not re.match(PROCTYPE_MATCH, k):
148 raise serializers.ValidationError("Process types can only contain [a-z]")
149 if not re.match(MEMLIMIT_MATCH, str(v)):
150 raise serializers.ValidationError(
151 "Limit format: <number><unit>, where unit = B, K, M or G")
152 return value
153
154 def validate_cpu(self, value):
155 for k, v in value.items():
156 if v is None: # use NoneType to unset a value
157 continue
158 if not re.match(PROCTYPE_MATCH, k):
159 raise serializers.ValidationError("Process types can only contain [a-z]")
160 shares = re.match(CPUSHARE_MATCH, str(v))
161 if not shares:
162 raise serializers.ValidationError("CPU shares must be an integer")
163 for v in shares.groupdict().values():
164 try:
165 i = int(v)
166 except ValueError:
167 raise serializers.ValidationError("CPU shares must be an integer")
168 if i > 1024 or i < 0:
169 raise serializers.ValidationError("CPU shares must be between 0 and 1024")
170 return value
171
172 def validate_tags(self, value):
173 for k, v in value.items():
174 if v is None: # use NoneType to unset a value
175 continue
176 if not re.match(TAGKEY_MATCH, k):
177 raise serializers.ValidationError("Tag keys can only contain [a-z]")
178 if not re.match(TAGVAL_MATCH, str(v)):
179 raise serializers.ValidationError("Invalid tag value")
180 return value
181
182
183 class ReleaseSerializer(ModelSerializer):
184 """Serialize a :class:`~api.models.Release` model."""
185
186 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
187 owner = serializers.ReadOnlyField(source='owner.username')
188 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
189 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
190
191 class Meta:
192 """Metadata options for a :class:`ReleaseSerializer`."""
193 model = models.Release
194
195
196 class ContainerSerializer(ModelSerializer):
197 """Serialize a :class:`~api.models.Container` model."""
198
199 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
200 owner = serializers.ReadOnlyField(source='owner.username')
201 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
202 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
203 release = serializers.SerializerMethodField()
204
205 class Meta:
206 """Metadata options for a :class:`ContainerSerializer`."""
207 model = models.Container
208
209 def get_release(self, obj):
210 return "v{}".format(obj.release.version)
211
212
213 class KeySerializer(ModelSerializer):
214 """Serialize a :class:`~api.models.Key` model."""
215
216 owner = serializers.ReadOnlyField(source='owner.username')
217 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
218 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
219
220 class Meta:
221 """Metadata options for a KeySerializer."""
222 model = models.Key
223
224
225 class DomainSerializer(ModelSerializer):
226 """Serialize a :class:`~api.models.Domain` model."""
227
228 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
229 owner = serializers.ReadOnlyField(source='owner.username')
230 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
231 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
232
233 class Meta:
234 """Metadata options for a :class:`DomainSerializer`."""
235 model = models.Domain
236 fields = ['uuid', 'owner', 'created', 'updated', 'app', 'domain']
237
238 def validate_domain(self, value):
239 """
240 Check that the hostname is valid
241 """
242 match = re.match(
243 r'^(\*\.)?(' + settings.APP_URL_REGEX + r'\.)*([a-z0-9-]+)\.([a-z0-9]{2,})$',
244 value)
245 if not match:
246 raise serializers.ValidationError(
247 "Hostname does not look like a valid hostname. "
248 "Only lowercase characters are allowed.")
249
250 if models.Domain.objects.filter(domain=value).exists():
251 raise serializers.ValidationError(
252 "The domain {} is already in use by another app".format(value))
253
254 domain_parts = value.split('.')
255 if domain_parts[0] == '*':
256 raise serializers.ValidationError(
257 "Adding a wildcard subdomain is currently not supported".format(value))
258
259 return value
260
261
262 class PushSerializer(ModelSerializer):
263 """Serialize a :class:`~api.models.Push` model."""
264
265 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
266 owner = serializers.ReadOnlyField(source='owner.username')
267 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
268 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
269
270 class Meta:
271 """Metadata options for a :class:`PushSerializer`."""
272 model = models.Push
273 fields = ['uuid', 'owner', 'app', 'sha', 'fingerprint', 'receive_user', 'receive_repo',
274 'ssh_connection', 'ssh_original_command', 'created', 'updated']
275
[end of controller/api/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/controller/api/serializers.py b/controller/api/serializers.py
--- a/controller/api/serializers.py
+++ b/controller/api/serializers.py
@@ -239,23 +239,22 @@
"""
Check that the hostname is valid
"""
- match = re.match(
- r'^(\*\.)?(' + settings.APP_URL_REGEX + r'\.)*([a-z0-9-]+)\.([a-z0-9]{2,})$',
- value)
- if not match:
+ if len(value) > 255:
+ raise serializers.ValidationError('Hostname must be 255 characters or less.')
+ if value[-1:] == ".":
+ value = value[:-1] # strip exactly one dot from the right, if present
+ labels = value.split('.')
+ if labels[0] == '*':
raise serializers.ValidationError(
- "Hostname does not look like a valid hostname. "
- "Only lowercase characters are allowed.")
-
+ 'Adding a wildcard subdomain is currently not supported.')
+ allowed = re.compile("^(?!-)[a-z0-9-]{1,63}(?<!-)$", re.IGNORECASE)
+ for label in labels:
+ match = allowed.match(label)
+ if not match or '--' in label or label[-1].isdigit() or label.isdigit():
+ raise serializers.ValidationError('Hostname does not look valid.')
if models.Domain.objects.filter(domain=value).exists():
raise serializers.ValidationError(
"The domain {} is already in use by another app".format(value))
-
- domain_parts = value.split('.')
- if domain_parts[0] == '*':
- raise serializers.ValidationError(
- "Adding a wildcard subdomain is currently not supported".format(value))
-
return value
| {"golden_diff": "diff --git a/controller/api/serializers.py b/controller/api/serializers.py\n--- a/controller/api/serializers.py\n+++ b/controller/api/serializers.py\n@@ -239,23 +239,22 @@\n \"\"\"\n Check that the hostname is valid\n \"\"\"\n- match = re.match(\n- r'^(\\*\\.)?(' + settings.APP_URL_REGEX + r'\\.)*([a-z0-9-]+)\\.([a-z0-9]{2,})$',\n- value)\n- if not match:\n+ if len(value) > 255:\n+ raise serializers.ValidationError('Hostname must be 255 characters or less.')\n+ if value[-1:] == \".\":\n+ value = value[:-1] # strip exactly one dot from the right, if present\n+ labels = value.split('.')\n+ if labels[0] == '*':\n raise serializers.ValidationError(\n- \"Hostname does not look like a valid hostname. \"\n- \"Only lowercase characters are allowed.\")\n-\n+ 'Adding a wildcard subdomain is currently not supported.')\n+ allowed = re.compile(\"^(?!-)[a-z0-9-]{1,63}(?<!-)$\", re.IGNORECASE)\n+ for label in labels:\n+ match = allowed.match(label)\n+ if not match or '--' in label or label[-1].isdigit() or label.isdigit():\n+ raise serializers.ValidationError('Hostname does not look valid.')\n if models.Domain.objects.filter(domain=value).exists():\n raise serializers.ValidationError(\n \"The domain {} is already in use by another app\".format(value))\n-\n- domain_parts = value.split('.')\n- if domain_parts[0] == '*':\n- raise serializers.ValidationError(\n- \"Adding a wildcard subdomain is currently not supported\".format(value))\n-\n return value\n", "issue": "deis blocks domain creation under improper circumstances\n`$ deis domains:add django.paas-sandbox`\n`Adding django.paas-sandbox to rubber-undertow... 400 BAD REQUEST`\n`{u'domain': [u'Hostname does not look like a valid hostname. Only lowercase characters are allowed.']}`\n\ndeis will not let me create a domain where the top of the domain is \"paas-sandbox\", as displayed above, however this is not a top level domain nor is it intended as one. Our corporation uses an internal domain that is set as the dns search on all corporate computers. For example, if someone types `http://www/` in their browser, it will come up because their dns searches `corp.example.com` for non qualifying domains. They are really going to the site `www.corp.example.com` but all they type and all the host header on the server sees is \"www\". deis requires the domain to exist how the user will display it in the browser in order to render the page. Our DNS system is almost identical to Active Directory so anyone using Active Directory can relate but this is not a MS / Active Directory specific scenario. This can be done using only dhcp and most ISP's will do this from their dhcp as well.\n\nAnyways, please fix. Thank you.\n\n", "before_files": [{"content": "\"\"\"\nClasses to serialize the RESTful representation of Deis API models.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom api import models\n\n\nPROCTYPE_MATCH = re.compile(r'^(?P<type>[a-z]+)')\nMEMLIMIT_MATCH = re.compile(r'^(?P<mem>[0-9]+[BbKkMmGg])$')\nCPUSHARE_MATCH = re.compile(r'^(?P<cpu>[0-9]+)$')\nTAGKEY_MATCH = re.compile(r'^[a-z]+$')\nTAGVAL_MATCH = re.compile(r'^\\w+$')\n\n\nclass JSONFieldSerializer(serializers.Field):\n def to_representation(self, obj):\n return obj\n\n def to_internal_value(self, data):\n try:\n val = json.loads(data)\n except TypeError:\n val = data\n return val\n\n\nclass ModelSerializer(serializers.ModelSerializer):\n\n uuid = serializers.ReadOnlyField()\n\n def get_validators(self):\n \"\"\"\n Hack to remove DRF's UniqueTogetherValidator when it concerns the UUID.\n\n See https://github.com/deis/deis/pull/2898#discussion_r23105147\n \"\"\"\n validators = super(ModelSerializer, self).get_validators()\n for v in validators:\n if isinstance(v, UniqueTogetherValidator) and 'uuid' in v.fields:\n validators.remove(v)\n return validators\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['email', 'username', 'password', 'first_name', 'last_name', 'is_superuser',\n 'is_staff', 'groups', 'user_permissions', 'last_login', 'date_joined',\n 'is_active']\n read_only_fields = ['is_superuser', 'is_staff', 'groups',\n 'user_permissions', 'last_login', 'date_joined', 'is_active']\n extra_kwargs = {'password': {'write_only': True}}\n\n def create(self, validated_data):\n now = timezone.now()\n user = User(\n email=validated_data.get('email'),\n username=validated_data.get('username'),\n last_login=now,\n date_joined=now,\n is_active=True\n )\n if validated_data.get('first_name'):\n user.first_name = validated_data['first_name']\n if validated_data.get('last_name'):\n user.last_name = validated_data['last_name']\n user.set_password(validated_data['password'])\n # Make the first signup an admin / superuser\n if not User.objects.filter(is_superuser=True).exists():\n user.is_superuser = user.is_staff = True\n user.save()\n return user\n\n\nclass AdminUserSerializer(serializers.ModelSerializer):\n \"\"\"Serialize admin status for a User model.\"\"\"\n\n class Meta:\n model = User\n fields = ['username', 'is_superuser']\n read_only_fields = ['username']\n\n\nclass AppSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.App` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n structure = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`AppSerializer`.\"\"\"\n model = models.App\n fields = ['uuid', 'id', 'owner', 'url', 'structure', 'created', 'updated']\n read_only_fields = ['uuid']\n\n\nclass BuildSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Build` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n procfile = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`BuildSerializer`.\"\"\"\n model = models.Build\n fields = ['owner', 'app', 'image', 'sha', 'procfile', 'dockerfile', 'created',\n 'updated', 'uuid']\n read_only_fields = ['uuid']\n\n\nclass ConfigSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Config` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n values = JSONFieldSerializer(required=False)\n memory = JSONFieldSerializer(required=False)\n cpu = JSONFieldSerializer(required=False)\n tags = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`ConfigSerializer`.\"\"\"\n model = models.Config\n\n def validate_memory(self, value):\n for k, v in value.items():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(PROCTYPE_MATCH, k):\n raise serializers.ValidationError(\"Process types can only contain [a-z]\")\n if not re.match(MEMLIMIT_MATCH, str(v)):\n raise serializers.ValidationError(\n \"Limit format: <number><unit>, where unit = B, K, M or G\")\n return value\n\n def validate_cpu(self, value):\n for k, v in value.items():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(PROCTYPE_MATCH, k):\n raise serializers.ValidationError(\"Process types can only contain [a-z]\")\n shares = re.match(CPUSHARE_MATCH, str(v))\n if not shares:\n raise serializers.ValidationError(\"CPU shares must be an integer\")\n for v in shares.groupdict().values():\n try:\n i = int(v)\n except ValueError:\n raise serializers.ValidationError(\"CPU shares must be an integer\")\n if i > 1024 or i < 0:\n raise serializers.ValidationError(\"CPU shares must be between 0 and 1024\")\n return value\n\n def validate_tags(self, value):\n for k, v in value.items():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(TAGKEY_MATCH, k):\n raise serializers.ValidationError(\"Tag keys can only contain [a-z]\")\n if not re.match(TAGVAL_MATCH, str(v)):\n raise serializers.ValidationError(\"Invalid tag value\")\n return value\n\n\nclass ReleaseSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Release` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`ReleaseSerializer`.\"\"\"\n model = models.Release\n\n\nclass ContainerSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Container` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n release = serializers.SerializerMethodField()\n\n class Meta:\n \"\"\"Metadata options for a :class:`ContainerSerializer`.\"\"\"\n model = models.Container\n\n def get_release(self, obj):\n return \"v{}\".format(obj.release.version)\n\n\nclass KeySerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Key` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a KeySerializer.\"\"\"\n model = models.Key\n\n\nclass DomainSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Domain` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`DomainSerializer`.\"\"\"\n model = models.Domain\n fields = ['uuid', 'owner', 'created', 'updated', 'app', 'domain']\n\n def validate_domain(self, value):\n \"\"\"\n Check that the hostname is valid\n \"\"\"\n match = re.match(\n r'^(\\*\\.)?(' + settings.APP_URL_REGEX + r'\\.)*([a-z0-9-]+)\\.([a-z0-9]{2,})$',\n value)\n if not match:\n raise serializers.ValidationError(\n \"Hostname does not look like a valid hostname. \"\n \"Only lowercase characters are allowed.\")\n\n if models.Domain.objects.filter(domain=value).exists():\n raise serializers.ValidationError(\n \"The domain {} is already in use by another app\".format(value))\n\n domain_parts = value.split('.')\n if domain_parts[0] == '*':\n raise serializers.ValidationError(\n \"Adding a wildcard subdomain is currently not supported\".format(value))\n\n return value\n\n\nclass PushSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Push` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`PushSerializer`.\"\"\"\n model = models.Push\n fields = ['uuid', 'owner', 'app', 'sha', 'fingerprint', 'receive_user', 'receive_repo',\n 'ssh_connection', 'ssh_original_command', 'created', 'updated']\n", "path": "controller/api/serializers.py"}]} | 3,772 | 398 |
gh_patches_debug_7074 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-2969 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ddp_backend in 0.9.0rc12 fails if no CUDA_VISIBLE_DEVICES found
## 🐛 Bug
In ddp_backend, training immediately fails if the environment variable CUDA_VISIBLE_DEVICES isn't set. This line should handle the None case gracefully: https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/accelerators/ddp_backend.py#L90
### To Reproduce
Start a run using ddp on CPU. This was discovered using torchelastic to launch
#### Code sample
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
### Expected behavior
This shouldn't crash if the environment variable isn't set. We could default to `num_gpus = 0` in this case.
Replacing the line above with something like this could work:
`num_gpus = os.environ.get('CUDA_VISIBLE_DEVICES', []).split(',').__len__()
`
</issue>
<code>
[start of pytorch_lightning/accelerators/ddp_backend.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License
14
15 import os
16 import subprocess
17 import sys
18 from os.path import abspath
19 from time import sleep
20 from typing import Optional
21
22 import numpy as np
23 import torch
24
25 from pytorch_lightning import _logger as log
26 from pytorch_lightning.utilities import AMPType
27 from pytorch_lightning.utilities.distributed import rank_zero_only
28
29 try:
30 from hydra.utils import to_absolute_path, get_original_cwd
31 from hydra.core.hydra_config import HydraConfig
32 except ImportError:
33 HYDRA_AVAILABLE = False
34 else:
35 HYDRA_AVAILABLE = True
36
37 try:
38 from apex import amp
39 except ImportError:
40 amp = None
41
42
43 class DDPBackend(object):
44
45 def __init__(self, trainer):
46 self.trainer = trainer
47 self.task_idx = None
48
49 def slurm_setup(self):
50 self.task_idx = int(os.environ['SLURM_LOCALID'])
51
52 def torchelastic_setup(self):
53 self.task_idx = int(os.environ['LOCAL_RANK'])
54
55 def train(self, model):
56 self.ddp_train(process_idx=self.task_idx, mp_queue=None, model=model)
57
58 def spawn_ddp_children(self, model):
59 port = os.environ['MASTER_PORT']
60
61 master_address = '127.0.0.1' if 'MASTER_ADDR' not in os.environ else os.environ['MASTER_ADDR']
62 os.environ['MASTER_PORT'] = f'{port}'
63 os.environ['MASTER_ADDR'] = f'{master_address}'
64
65 # allow the user to pass the node rank
66 node_rank = '0'
67 if 'NODE_RANK' in os.environ:
68 node_rank = os.environ['NODE_RANK']
69 if 'GROUP_RANK' in os.environ:
70 node_rank = os.environ['GROUP_RANK']
71
72 os.environ['NODE_RANK'] = node_rank
73 os.environ['LOCAL_RANK'] = '0'
74
75 # when user is using hydra find the absolute path
76 path_lib = abspath if not HYDRA_AVAILABLE else to_absolute_path
77
78 # pull out the commands used to run the script and resolve the abs file path
79 command = sys.argv
80 try:
81 full_path = path_lib(command[0])
82 except Exception as e:
83 full_path = abspath(command[0])
84
85 command[0] = full_path
86 # use the same python interpreter and actually running
87 command = [sys.executable] + command
88
89 # since this script sets the visible devices we replace the gpus flag with a number
90 num_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',').__len__()
91
92 if '--gpus' in command:
93 gpu_flag_idx = command.index('--gpus')
94 command[gpu_flag_idx + 1] = f'{num_gpus}'
95
96 os.environ['WORLD_SIZE'] = f'{num_gpus * self.trainer.num_nodes}'
97
98 self.trainer.interactive_ddp_procs = []
99 for local_rank in range(1, self.trainer.num_processes):
100 env_copy = os.environ.copy()
101 env_copy['LOCAL_RANK'] = f'{local_rank}'
102
103 # start process
104 # if hydra is available and initialized, make sure to set the cwd correctly
105 cwd: Optional[str] = None
106 if HYDRA_AVAILABLE:
107 if HydraConfig.initialized():
108 cwd = get_original_cwd()
109 proc = subprocess.Popen(command, env=env_copy, cwd=cwd)
110 self.trainer.interactive_ddp_procs.append(proc)
111
112 # starting all processes at once can cause issues
113 # with dataloaders delay between 1-10 seconds
114 delay = np.random.uniform(1, 5, 1)[0]
115 sleep(delay)
116
117 local_rank = 0
118 results = self.ddp_train(local_rank, mp_queue=None, model=model, is_master=True)
119 del os.environ['WORLD_SIZE']
120
121 return results
122
123 def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0):
124 """
125 Entry point for ddp
126
127 Args:
128 process_idx:
129 mp_queue: multiprocessing queue
130 model:
131 is_master:
132 proc_offset:
133
134 Returns:
135
136 """
137 # offset the process id if requested
138 process_idx = process_idx + proc_offset
139
140 # show progressbar only on progress_rank 0
141 if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:
142 self.trainer.progress_bar_callback.disable()
143
144 # determine which process we are and world size
145 self.trainer.local_rank = process_idx
146 self.trainer.global_rank = self.trainer.node_rank * self.trainer.num_processes + process_idx
147 self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes
148
149 # set warning rank
150 rank_zero_only.rank = self.trainer.global_rank
151
152 # set up server using proc 0's ip address
153 # try to init for 20 times at max in case ports are taken
154 # where to store ip_table
155 model.trainer = self.trainer
156 model.init_ddp_connection(
157 self.trainer.global_rank,
158 self.trainer.world_size,
159 self.trainer.is_slurm_managing_tasks
160 )
161
162 # call setup after the ddp process has connected
163 self.trainer.call_setup_hook(model)
164
165 # on world_size=0 let everyone know training is starting
166 if self.trainer.is_global_zero:
167 log.info('-' * 100)
168 log.info(f'distributed_backend={self.trainer.distributed_backend}')
169 log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')
170 log.info('-' * 100)
171
172 # call sync_bn before .cuda(), configure_apex and configure_ddp
173 if self.trainer.sync_batchnorm:
174 model = model.configure_sync_batchnorm(model)
175
176 # MODEL
177 # copy model to each gpu
178 if self.trainer.on_gpu:
179 gpu_idx = process_idx
180
181 # when using ddp, the master process (proc 0) continues running as the main one
182 # this means that the local rank will always be 0
183 # (even if cuda visible devices has other visible gpus)
184 # this means that the master process needs to pull the 0th visible index as the device number
185 if is_master:
186 available_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
187 gpu_idx = int(available_gpus[self.trainer.local_rank])
188
189 self.trainer.root_gpu = gpu_idx
190 torch.cuda.set_device(self.trainer.root_gpu)
191 model.cuda(self.trainer.root_gpu)
192
193 # CHOOSE OPTIMIZER
194 # allow for lr schedulers as well
195 optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)
196 self.trainer.optimizers = optimizers
197 self.trainer.lr_schedulers = lr_schedulers
198 self.trainer.optimizer_frequencies = optimizer_frequencies
199
200 # set model properties before going into wrapper
201 self.trainer.copy_trainer_model_properties(model)
202
203 # AMP - run through amp wrapper before going to distributed DP
204 if self.trainer.amp_backend == AMPType.APEX:
205 model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)
206 self.trainer.optimizers = optimizers
207 self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)
208
209 # DDP2 uses all GPUs on the machine
210 if self.trainer.distributed_backend == 'ddp' or self.trainer.distributed_backend == 'ddp_spawn':
211 device_ids = [self.trainer.root_gpu]
212 else: # includes ddp_cpu
213 device_ids = None
214
215 # allow user to configure ddp
216 model = model.configure_ddp(model, device_ids)
217
218 # continue training routine
219 results = self.trainer.run_pretrain_routine(model)
220
221 # get original model
222 model = self.trainer.get_model()
223
224 # persist info in ddp_spawn
225 self.trainer.transfer_distrib_spawn_state_on_fit_end(model, mp_queue, results)
226
227 # clean up memory
228 torch.cuda.empty_cache()
229
230 if self.trainer.global_rank == 0 and self.trainer.distributed_backend not in ['ddp_spawn', 'ddp_cpu']:
231 return results
232
[end of pytorch_lightning/accelerators/ddp_backend.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_lightning/accelerators/ddp_backend.py b/pytorch_lightning/accelerators/ddp_backend.py
--- a/pytorch_lightning/accelerators/ddp_backend.py
+++ b/pytorch_lightning/accelerators/ddp_backend.py
@@ -87,7 +87,7 @@
command = [sys.executable] + command
# since this script sets the visible devices we replace the gpus flag with a number
- num_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',').__len__()
+ num_gpus = os.environ.get('CUDA_VISIBLE_DEVICES', []).split(',').__len__()
if '--gpus' in command:
gpu_flag_idx = command.index('--gpus')
| {"golden_diff": "diff --git a/pytorch_lightning/accelerators/ddp_backend.py b/pytorch_lightning/accelerators/ddp_backend.py\n--- a/pytorch_lightning/accelerators/ddp_backend.py\n+++ b/pytorch_lightning/accelerators/ddp_backend.py\n@@ -87,7 +87,7 @@\n command = [sys.executable] + command\n \n # since this script sets the visible devices we replace the gpus flag with a number\n- num_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',').__len__()\n+ num_gpus = os.environ.get('CUDA_VISIBLE_DEVICES', []).split(',').__len__()\n \n if '--gpus' in command:\n gpu_flag_idx = command.index('--gpus')\n", "issue": "ddp_backend in 0.9.0rc12 fails if no CUDA_VISIBLE_DEVICES found\n## \ud83d\udc1b Bug\r\nIn ddp_backend, training immediately fails if the environment variable CUDA_VISIBLE_DEVICES isn't set. This line should handle the None case gracefully: https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/accelerators/ddp_backend.py#L90\r\n\r\n### To Reproduce\r\nStart a run using ddp on CPU. This was discovered using torchelastic to launch\r\n\r\n\r\n#### Code sample\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue. \r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n### Expected behavior\r\nThis shouldn't crash if the environment variable isn't set. We could default to `num_gpus = 0` in this case. \r\nReplacing the line above with something like this could work:\r\n\r\n`num_gpus = os.environ.get('CUDA_VISIBLE_DEVICES', []).split(',').__len__()\r\n`\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\nimport os\nimport subprocess\nimport sys\nfrom os.path import abspath\nfrom time import sleep\nfrom typing import Optional\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.utilities import AMPType\nfrom pytorch_lightning.utilities.distributed import rank_zero_only\n\ntry:\n from hydra.utils import to_absolute_path, get_original_cwd\n from hydra.core.hydra_config import HydraConfig\nexcept ImportError:\n HYDRA_AVAILABLE = False\nelse:\n HYDRA_AVAILABLE = True\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\n\nclass DDPBackend(object):\n\n def __init__(self, trainer):\n self.trainer = trainer\n self.task_idx = None\n\n def slurm_setup(self):\n self.task_idx = int(os.environ['SLURM_LOCALID'])\n\n def torchelastic_setup(self):\n self.task_idx = int(os.environ['LOCAL_RANK'])\n\n def train(self, model):\n self.ddp_train(process_idx=self.task_idx, mp_queue=None, model=model)\n\n def spawn_ddp_children(self, model):\n port = os.environ['MASTER_PORT']\n\n master_address = '127.0.0.1' if 'MASTER_ADDR' not in os.environ else os.environ['MASTER_ADDR']\n os.environ['MASTER_PORT'] = f'{port}'\n os.environ['MASTER_ADDR'] = f'{master_address}'\n\n # allow the user to pass the node rank\n node_rank = '0'\n if 'NODE_RANK' in os.environ:\n node_rank = os.environ['NODE_RANK']\n if 'GROUP_RANK' in os.environ:\n node_rank = os.environ['GROUP_RANK']\n\n os.environ['NODE_RANK'] = node_rank\n os.environ['LOCAL_RANK'] = '0'\n\n # when user is using hydra find the absolute path\n path_lib = abspath if not HYDRA_AVAILABLE else to_absolute_path\n\n # pull out the commands used to run the script and resolve the abs file path\n command = sys.argv\n try:\n full_path = path_lib(command[0])\n except Exception as e:\n full_path = abspath(command[0])\n\n command[0] = full_path\n # use the same python interpreter and actually running\n command = [sys.executable] + command\n\n # since this script sets the visible devices we replace the gpus flag with a number\n num_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',').__len__()\n\n if '--gpus' in command:\n gpu_flag_idx = command.index('--gpus')\n command[gpu_flag_idx + 1] = f'{num_gpus}'\n\n os.environ['WORLD_SIZE'] = f'{num_gpus * self.trainer.num_nodes}'\n\n self.trainer.interactive_ddp_procs = []\n for local_rank in range(1, self.trainer.num_processes):\n env_copy = os.environ.copy()\n env_copy['LOCAL_RANK'] = f'{local_rank}'\n\n # start process\n # if hydra is available and initialized, make sure to set the cwd correctly\n cwd: Optional[str] = None\n if HYDRA_AVAILABLE:\n if HydraConfig.initialized():\n cwd = get_original_cwd()\n proc = subprocess.Popen(command, env=env_copy, cwd=cwd)\n self.trainer.interactive_ddp_procs.append(proc)\n\n # starting all processes at once can cause issues\n # with dataloaders delay between 1-10 seconds\n delay = np.random.uniform(1, 5, 1)[0]\n sleep(delay)\n\n local_rank = 0\n results = self.ddp_train(local_rank, mp_queue=None, model=model, is_master=True)\n del os.environ['WORLD_SIZE']\n\n return results\n\n def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0):\n \"\"\"\n Entry point for ddp\n\n Args:\n process_idx:\n mp_queue: multiprocessing queue\n model:\n is_master:\n proc_offset:\n\n Returns:\n\n \"\"\"\n # offset the process id if requested\n process_idx = process_idx + proc_offset\n\n # show progressbar only on progress_rank 0\n if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:\n self.trainer.progress_bar_callback.disable()\n\n # determine which process we are and world size\n self.trainer.local_rank = process_idx\n self.trainer.global_rank = self.trainer.node_rank * self.trainer.num_processes + process_idx\n self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes\n\n # set warning rank\n rank_zero_only.rank = self.trainer.global_rank\n\n # set up server using proc 0's ip address\n # try to init for 20 times at max in case ports are taken\n # where to store ip_table\n model.trainer = self.trainer\n model.init_ddp_connection(\n self.trainer.global_rank,\n self.trainer.world_size,\n self.trainer.is_slurm_managing_tasks\n )\n\n # call setup after the ddp process has connected\n self.trainer.call_setup_hook(model)\n\n # on world_size=0 let everyone know training is starting\n if self.trainer.is_global_zero:\n log.info('-' * 100)\n log.info(f'distributed_backend={self.trainer.distributed_backend}')\n log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')\n log.info('-' * 100)\n\n # call sync_bn before .cuda(), configure_apex and configure_ddp\n if self.trainer.sync_batchnorm:\n model = model.configure_sync_batchnorm(model)\n\n # MODEL\n # copy model to each gpu\n if self.trainer.on_gpu:\n gpu_idx = process_idx\n\n # when using ddp, the master process (proc 0) continues running as the main one\n # this means that the local rank will always be 0\n # (even if cuda visible devices has other visible gpus)\n # this means that the master process needs to pull the 0th visible index as the device number\n if is_master:\n available_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')\n gpu_idx = int(available_gpus[self.trainer.local_rank])\n\n self.trainer.root_gpu = gpu_idx\n torch.cuda.set_device(self.trainer.root_gpu)\n model.cuda(self.trainer.root_gpu)\n\n # CHOOSE OPTIMIZER\n # allow for lr schedulers as well\n optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)\n self.trainer.optimizers = optimizers\n self.trainer.lr_schedulers = lr_schedulers\n self.trainer.optimizer_frequencies = optimizer_frequencies\n\n # set model properties before going into wrapper\n self.trainer.copy_trainer_model_properties(model)\n\n # AMP - run through amp wrapper before going to distributed DP\n if self.trainer.amp_backend == AMPType.APEX:\n model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)\n self.trainer.optimizers = optimizers\n self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)\n\n # DDP2 uses all GPUs on the machine\n if self.trainer.distributed_backend == 'ddp' or self.trainer.distributed_backend == 'ddp_spawn':\n device_ids = [self.trainer.root_gpu]\n else: # includes ddp_cpu\n device_ids = None\n\n # allow user to configure ddp\n model = model.configure_ddp(model, device_ids)\n\n # continue training routine\n results = self.trainer.run_pretrain_routine(model)\n\n # get original model\n model = self.trainer.get_model()\n\n # persist info in ddp_spawn\n self.trainer.transfer_distrib_spawn_state_on_fit_end(model, mp_queue, results)\n\n # clean up memory\n torch.cuda.empty_cache()\n\n if self.trainer.global_rank == 0 and self.trainer.distributed_backend not in ['ddp_spawn', 'ddp_cpu']:\n return results\n", "path": "pytorch_lightning/accelerators/ddp_backend.py"}]} | 3,311 | 163 |
gh_patches_debug_3563 | rasdani/github-patches | git_diff | kivy__kivy-335 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Regression when upgrading from 1.0.9-stable to 1.0.10-dev?
Hello,
I just upgraded Kivy to the latest version via the "git pull" command. I was running 1.0.9 stable and updated Kivy to 1.0.10-dev.
When rerunning my program (on 1.0.9 everything works just fine), this error / stacktrace pops up: http://dpaste.com/hold/676134/.
</issue>
<code>
[start of kivy/uix/image.py]
1 '''
2 Image
3 =====
4
5 The :class:`Image` widget is used to display an image. ::
6
7 wimg = Image(source='mylogo.png')
8
9 Asynchronous loading
10 --------------------
11
12 To load an image asynchronously (for example from an external webserver), use
13 the :class:`AsyncImage` subclass ::
14
15 aimg = AsyncImage(source='http://mywebsite.com/logo.png')
16
17 Alignement
18 ----------
19
20 By default, the image is centered and fitted inside the widget bounding box.
21 If you don't want that, you can inherit from Image and create your own style.
22
23 For example, if you want your image to take the same size of your widget, you
24 can do ::
25
26 class FullImage(Image):
27 pass
28
29 And in your kivy language file, you can do ::
30
31 <FullImage>:
32 canvas:
33 Color:
34 rgb: (1, 1, 1)
35 Rectangle:
36 texture: self.texture
37 size: self.size
38 pos: self.pos
39
40 '''
41
42 __all__ = ('Image', 'AsyncImage')
43
44 from kivy.uix.widget import Widget
45 from kivy.core.image import Image as CoreImage
46 from kivy.resources import resource_find
47 from kivy.properties import StringProperty, ObjectProperty, ListProperty, \
48 AliasProperty, BooleanProperty, NumericProperty
49 from kivy.loader import Loader
50
51
52 class Image(Widget):
53 '''Image class, see module documentation for more information.
54 '''
55
56 source = StringProperty(None)
57 '''Filename / source of your image.
58
59 :data:`source` a :class:`~kivy.properties.StringProperty`, default to None.
60 '''
61
62 texture = ObjectProperty(None, allownone=True)
63 '''Texture object of the image.
64
65 Depending of the texture creation, the value will be a
66 :class:`~kivy.graphics.texture.Texture` or
67 :class:`~kivy.graphics.texture.TextureRegion` object.
68
69 :data:`texture` is a :class:`~kivy.properties.ObjectProperty`, default to
70 None.
71 '''
72
73 texture_size = ListProperty([0, 0])
74 '''Texture size of the image.
75
76 .. warning::
77
78 The texture size is set after the texture property. So if you listen on
79 the change to :data:`texture`, the property texture_size will be not yet
80 updated. Use self.texture.size instead.
81 '''
82
83 def get_image_ratio(self):
84 if self.texture:
85 return self.texture.width / float(self.texture.height)
86 return 1.
87
88 mipmap = BooleanProperty(False)
89 '''Indicate if you want OpenGL mipmapping to be apply on the texture or not.
90 Read :ref:`mipmap` for more information.
91
92 .. versionadded:: 1.0.7
93
94 :data:`mipmap` is a :class:`~kivy.properties.BooleanProperty`, default to
95 False.
96 '''
97
98 image_ratio = AliasProperty(get_image_ratio, None, bind=('texture', ))
99 '''Ratio of the image (width / float(height)
100
101 :data:`image_ratio` is a :class:`~kivy.properties.AliasProperty`, and is
102 read-only.
103 '''
104
105 color = ListProperty([1, 1, 1, 1])
106 '''Image color, in the format (r, g, b, a). This attribute can be used for
107 'tint' an image. Be careful, if the source image is not gray/white, the
108 color will not really work as expected.
109
110 .. versionadded:: 1.0.6
111
112 :data:`color` is a :class:`~kivy.properties.ListProperty`, default to [1, 1,
113 1, 1].
114 '''
115
116 allow_stretch = BooleanProperty(False)
117 '''If True, the normalized image size will be maximized to fit in the image
118 box. Otherwise, if the box is too tall, the image will not be streched more
119 than 1:1 pixels
120
121 .. versionadded:: 1.0.7
122
123 :data:`allow_stretch` is a :class:`~kivy.properties.BooleanProperty`,
124 default to False
125 '''
126
127 keep_ratio = BooleanProperty(True)
128 '''If False along with allow_stretch being True, the normalized image
129 size will be maximized to fit in the image box disregarding the aspect
130 ratio of the image.
131 Otherwise, if the box is too tall, the image will not be streched more
132 than 1:1 pixels
133
134 .. versionadded:: 1.0.8
135
136 :data:`keep_ratio` is a :class:`~kivy.properties.BooleanProperty`,
137 default to True
138 '''
139
140 anim_delay = NumericProperty(.25)
141 '''Delay of animation if the image is sequenced (like animated gif).
142 If the anim_delay is set to -1, the animation will be stopped.
143
144 .. versionadded:: 1.0.8
145
146 :data:`anim_delay` is a :class:`~kivy.properties.NumericProperty`, default
147 to .25 (4 FPS)
148 '''
149
150 def get_norm_image_size(self):
151 if not self.texture:
152 return self.size
153 ratio = self.image_ratio
154 w, h = self.size
155 tw, th = self.texture.size
156
157 # ensure that the width is always maximized to the containter width
158 if self.allow_stretch:
159 if not self.keep_ratio:
160 return w, h
161 iw = w
162 else:
163 iw = min(w, tw)
164 # calculate the appropriate height
165 ih = iw / ratio
166 # if the height is too higher, take the height of the container
167 # and calculate appropriate width. no need to test further. :)
168 if ih > h:
169 if self.allow_stretch:
170 ih = h
171 else:
172 ih = min(h, th)
173 iw = ih * ratio
174
175 return iw, ih
176
177
178 norm_image_size = AliasProperty(get_norm_image_size, None, bind=(
179 'texture', 'size', 'image_ratio', 'allow_stretch'))
180 '''Normalized image size withing the widget box.
181
182 This size will be always fitted to the widget size, and preserve the image
183 ratio.
184
185 :data:`norm_image_size` is a :class:`~kivy.properties.AliasProperty`, and is
186 read-only.
187 '''
188
189 def __init__(self, **kwargs):
190 self._coreimage = None
191 super(Image, self).__init__(**kwargs)
192 self.bind(source=self.texture_update,
193 mipmap=self.texture_update)
194 if self.source is not None:
195 self.texture_update()
196
197 def texture_update(self, *largs):
198 if not self.source:
199 self.texture = None
200 else:
201 filename = resource_find(self.source)
202 if filename is None:
203 return
204 mipmap = self.mipmap
205 if self._coreimage is not None:
206 self._coreimage.unbind(on_texture=self._on_tex_change)
207 self._coreimage = ci = CoreImage(filename, mipmap=mipmap,
208 anim_delay=self.anim_delay)
209 ci.bind(on_texture=self._on_tex_change)
210 self.texture = ci.texture
211
212 def on_anim_delay(self, instance, value):
213 if self._coreimage is None:
214 return
215 self._coreimage.anim_delay = value
216 if value < 0:
217 self._coreimage.anim_reset(False)
218
219 def on_texture(self, instance, value):
220 if value is not None:
221 self.texture_size = list(value.size)
222
223 def _on_tex_change(self, *largs):
224 # update texture from core image
225 self.texture = self._coreimage.texture
226
227
228 class AsyncImage(Image):
229 '''Asynchronous Image class, see module documentation for more information.
230 '''
231
232 def __init__(self, **kwargs):
233 self._coreimage = None
234 super(AsyncImage, self).__init__(**kwargs)
235 self.unbind(source=self.texture_update,
236 mipmap=self.texture_update)
237
238 def on_source(self, instance, value):
239 if not value:
240 self._coreimage.unbind(on_texture=self._on_tex_change)
241 self.texture = None
242 self._coreimage = None
243 else:
244 if not self.is_uri(value):
245 value = resource_find(value)
246 self._coreimage = image = Loader.image(value)
247 image.bind(on_load=self.on_source_load)
248 image.bind(on_texture=self._on_tex_change)
249 self.texture = image.texture
250
251 def on_source_load(self, value):
252 image = self._coreimage.image
253 if not image:
254 return
255 self.texture = image.texture
256
257 def is_uri(self, filename):
258 proto = filename.split('://', 1)[0]
259 return proto in ('http', 'https', 'ftp')
260
261 def _on_tex_change(self, *largs):
262 if self._coreimage:
263 self.texture = self._coreimage.texture
264
[end of kivy/uix/image.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kivy/uix/image.py b/kivy/uix/image.py
--- a/kivy/uix/image.py
+++ b/kivy/uix/image.py
@@ -237,7 +237,8 @@
def on_source(self, instance, value):
if not value:
- self._coreimage.unbind(on_texture=self._on_tex_change)
+ if self._coreimage is not None:
+ self._coreimage.unbind(on_texture=self._on_tex_change)
self.texture = None
self._coreimage = None
else:
| {"golden_diff": "diff --git a/kivy/uix/image.py b/kivy/uix/image.py\n--- a/kivy/uix/image.py\n+++ b/kivy/uix/image.py\n@@ -237,7 +237,8 @@\n \n def on_source(self, instance, value):\n if not value:\n- self._coreimage.unbind(on_texture=self._on_tex_change)\n+ if self._coreimage is not None:\n+ self._coreimage.unbind(on_texture=self._on_tex_change)\n self.texture = None\n self._coreimage = None\n else:\n", "issue": "Regression when upgrading from 1.0.9-stable to 1.0.10-dev?\nHello,\n\nI just upgraded Kivy to the latest version via the \"git pull\" command. I was running 1.0.9 stable and updated Kivy to 1.0.10-dev.\n\nWhen rerunning my program (on 1.0.9 everything works just fine), this error / stacktrace pops up: http://dpaste.com/hold/676134/.\n\n", "before_files": [{"content": "'''\nImage\n=====\n\nThe :class:`Image` widget is used to display an image. ::\n\n wimg = Image(source='mylogo.png')\n\nAsynchronous loading\n--------------------\n\nTo load an image asynchronously (for example from an external webserver), use\nthe :class:`AsyncImage` subclass ::\n\n aimg = AsyncImage(source='http://mywebsite.com/logo.png')\n\nAlignement\n----------\n\nBy default, the image is centered and fitted inside the widget bounding box.\nIf you don't want that, you can inherit from Image and create your own style.\n\nFor example, if you want your image to take the same size of your widget, you\ncan do ::\n\n class FullImage(Image):\n pass\n\nAnd in your kivy language file, you can do ::\n\n <FullImage>:\n canvas:\n Color:\n rgb: (1, 1, 1)\n Rectangle:\n texture: self.texture\n size: self.size\n pos: self.pos\n\n'''\n\n__all__ = ('Image', 'AsyncImage')\n\nfrom kivy.uix.widget import Widget\nfrom kivy.core.image import Image as CoreImage\nfrom kivy.resources import resource_find\nfrom kivy.properties import StringProperty, ObjectProperty, ListProperty, \\\n AliasProperty, BooleanProperty, NumericProperty\nfrom kivy.loader import Loader\n\n\nclass Image(Widget):\n '''Image class, see module documentation for more information.\n '''\n\n source = StringProperty(None)\n '''Filename / source of your image.\n\n :data:`source` a :class:`~kivy.properties.StringProperty`, default to None.\n '''\n\n texture = ObjectProperty(None, allownone=True)\n '''Texture object of the image.\n\n Depending of the texture creation, the value will be a\n :class:`~kivy.graphics.texture.Texture` or\n :class:`~kivy.graphics.texture.TextureRegion` object.\n\n :data:`texture` is a :class:`~kivy.properties.ObjectProperty`, default to\n None.\n '''\n\n texture_size = ListProperty([0, 0])\n '''Texture size of the image.\n\n .. warning::\n\n The texture size is set after the texture property. So if you listen on\n the change to :data:`texture`, the property texture_size will be not yet\n updated. Use self.texture.size instead.\n '''\n\n def get_image_ratio(self):\n if self.texture:\n return self.texture.width / float(self.texture.height)\n return 1.\n\n mipmap = BooleanProperty(False)\n '''Indicate if you want OpenGL mipmapping to be apply on the texture or not.\n Read :ref:`mipmap` for more information.\n\n .. versionadded:: 1.0.7\n\n :data:`mipmap` is a :class:`~kivy.properties.BooleanProperty`, default to\n False.\n '''\n\n image_ratio = AliasProperty(get_image_ratio, None, bind=('texture', ))\n '''Ratio of the image (width / float(height)\n\n :data:`image_ratio` is a :class:`~kivy.properties.AliasProperty`, and is\n read-only.\n '''\n\n color = ListProperty([1, 1, 1, 1])\n '''Image color, in the format (r, g, b, a). This attribute can be used for\n 'tint' an image. Be careful, if the source image is not gray/white, the\n color will not really work as expected.\n\n .. versionadded:: 1.0.6\n\n :data:`color` is a :class:`~kivy.properties.ListProperty`, default to [1, 1,\n 1, 1].\n '''\n\n allow_stretch = BooleanProperty(False)\n '''If True, the normalized image size will be maximized to fit in the image\n box. Otherwise, if the box is too tall, the image will not be streched more\n than 1:1 pixels\n\n .. versionadded:: 1.0.7\n\n :data:`allow_stretch` is a :class:`~kivy.properties.BooleanProperty`,\n default to False\n '''\n\n keep_ratio = BooleanProperty(True)\n '''If False along with allow_stretch being True, the normalized image\n size will be maximized to fit in the image box disregarding the aspect\n ratio of the image.\n Otherwise, if the box is too tall, the image will not be streched more\n than 1:1 pixels\n\n .. versionadded:: 1.0.8\n\n :data:`keep_ratio` is a :class:`~kivy.properties.BooleanProperty`,\n default to True\n '''\n\n anim_delay = NumericProperty(.25)\n '''Delay of animation if the image is sequenced (like animated gif).\n If the anim_delay is set to -1, the animation will be stopped.\n\n .. versionadded:: 1.0.8\n\n :data:`anim_delay` is a :class:`~kivy.properties.NumericProperty`, default\n to .25 (4 FPS)\n '''\n\n def get_norm_image_size(self):\n if not self.texture:\n return self.size\n ratio = self.image_ratio\n w, h = self.size\n tw, th = self.texture.size\n\n # ensure that the width is always maximized to the containter width\n if self.allow_stretch:\n if not self.keep_ratio:\n return w, h\n iw = w\n else:\n iw = min(w, tw)\n # calculate the appropriate height\n ih = iw / ratio\n # if the height is too higher, take the height of the container\n # and calculate appropriate width. no need to test further. :)\n if ih > h:\n if self.allow_stretch:\n ih = h\n else:\n ih = min(h, th)\n iw = ih * ratio\n\n return iw, ih\n\n\n norm_image_size = AliasProperty(get_norm_image_size, None, bind=(\n 'texture', 'size', 'image_ratio', 'allow_stretch'))\n '''Normalized image size withing the widget box.\n\n This size will be always fitted to the widget size, and preserve the image\n ratio.\n\n :data:`norm_image_size` is a :class:`~kivy.properties.AliasProperty`, and is\n read-only.\n '''\n\n def __init__(self, **kwargs):\n self._coreimage = None\n super(Image, self).__init__(**kwargs)\n self.bind(source=self.texture_update,\n mipmap=self.texture_update)\n if self.source is not None:\n self.texture_update()\n\n def texture_update(self, *largs):\n if not self.source:\n self.texture = None\n else:\n filename = resource_find(self.source)\n if filename is None:\n return\n mipmap = self.mipmap\n if self._coreimage is not None:\n self._coreimage.unbind(on_texture=self._on_tex_change)\n self._coreimage = ci = CoreImage(filename, mipmap=mipmap,\n anim_delay=self.anim_delay)\n ci.bind(on_texture=self._on_tex_change)\n self.texture = ci.texture\n\n def on_anim_delay(self, instance, value):\n if self._coreimage is None:\n return\n self._coreimage.anim_delay = value\n if value < 0:\n self._coreimage.anim_reset(False)\n\n def on_texture(self, instance, value):\n if value is not None:\n self.texture_size = list(value.size)\n\n def _on_tex_change(self, *largs):\n # update texture from core image\n self.texture = self._coreimage.texture\n\n\nclass AsyncImage(Image):\n '''Asynchronous Image class, see module documentation for more information.\n '''\n\n def __init__(self, **kwargs):\n self._coreimage = None\n super(AsyncImage, self).__init__(**kwargs)\n self.unbind(source=self.texture_update,\n mipmap=self.texture_update)\n\n def on_source(self, instance, value):\n if not value:\n self._coreimage.unbind(on_texture=self._on_tex_change)\n self.texture = None\n self._coreimage = None\n else:\n if not self.is_uri(value):\n value = resource_find(value)\n self._coreimage = image = Loader.image(value)\n image.bind(on_load=self.on_source_load)\n image.bind(on_texture=self._on_tex_change)\n self.texture = image.texture\n\n def on_source_load(self, value):\n image = self._coreimage.image\n if not image:\n return\n self.texture = image.texture\n\n def is_uri(self, filename):\n proto = filename.split('://', 1)[0]\n return proto in ('http', 'https', 'ftp')\n\n def _on_tex_change(self, *largs):\n if self._coreimage:\n self.texture = self._coreimage.texture\n", "path": "kivy/uix/image.py"}]} | 3,262 | 124 |
gh_patches_debug_14311 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-6264 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Monitoring: where is CallOptions on monitoring API example?
[OS] macOS Sierra 10.12.6
[Versions]
- Python 3.6.1
```
google-api-core==1.2.1
google-api-python-client==1.7.3
google-auth==1.5.0
google-auth-httplib2==0.0.3
google-cloud-monitoring==0.30.0
googleapis-common-protos==1.5.3
```
----
## CallOptions class was not found!
Hi. I'm new to GCP and Stackdriver. I wanted to use Google Kubernetes Engine and its auto scaling by custom metrics. Then, it is required to export the metrics to Stackdriver Monitoring, so I am trying to do it.
But, After installing above-mentioned libraries, the example code on monitoring API README document failed. The pit hole is that `CallOptions` was not found, thus I've searched it in this repository and some other repositories.
And finally, I couldn't find it...
`CallOptions` is defined in gax.python, but the package is currently deprecated and moved to google-api-core. So I guess that also the dependency is currently corrupted or the some examples are out-of-date.
Please tell me how handle this problem.
_Thank you for the great package and platform._
</issue>
<code>
[start of vision/google/cloud/vision_helpers/__init__.py]
1 # Copyright 2017, Google LLC All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 import io
17
18 from google.api_core import protobuf_helpers as protobuf
19
20
21 class VisionHelpers(object):
22 """A set of convenience methods to make the Vision GAPIC easier to use.
23
24 This class should be considered abstract; it is used as a superclass
25 in a multiple-inheritance construction alongside the applicable GAPIC.
26 See the :class:`~google.cloud.vision_v1.ImageAnnotatorClient`.
27 """
28 def annotate_image(self, request, retry=None, timeout=None):
29 """Run image detection and annotation for an image.
30
31 Example:
32 >>> from google.cloud.vision_v1 import ImageAnnotatorClient
33 >>> client = ImageAnnotatorClient()
34 >>> request = {
35 ... 'image': {
36 ... 'source': {'image_uri': 'https://foo.com/image.jpg'},
37 ... },
38 ... }
39 >>> response = client.annotate_image(request)
40
41 Args:
42 request (:class:`~.vision_v1.types.AnnotateImageRequest`)
43 options (:class:`google.gax.CallOptions`): Overrides the default
44 settings for this call, e.g, timeout, retries, etc.
45
46 Returns:
47 :class:`~.vision_v1.types.AnnotateImageResponse` The API response.
48 """
49 # If the image is a file handler, set the content.
50 image = protobuf.get(request, 'image')
51 if hasattr(image, 'read'):
52 img_bytes = image.read()
53 protobuf.set(request, 'image', {})
54 protobuf.set(request, 'image.content', img_bytes)
55 image = protobuf.get(request, 'image')
56
57 # If a filename is provided, read the file.
58 filename = protobuf.get(image, 'source.filename', default=None)
59 if filename:
60 with io.open(filename, 'rb') as img_file:
61 protobuf.set(request, 'image.content', img_file.read())
62 protobuf.set(request, 'image.source', None)
63
64 # This method allows features not to be specified, and you get all
65 # of them.
66 protobuf.setdefault(request, 'features', self._get_all_features())
67 r = self.batch_annotate_images([request], retry=retry, timeout=timeout)
68 return r.responses[0]
69
70 def _get_all_features(self):
71 """Return a list of all features.
72
73 Returns:
74 list: A list of all available features.
75 """
76 return [
77 {'type': feature}
78 for feature in self.enums.Feature.Type if feature != 0]
79
[end of vision/google/cloud/vision_helpers/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vision/google/cloud/vision_helpers/__init__.py b/vision/google/cloud/vision_helpers/__init__.py
--- a/vision/google/cloud/vision_helpers/__init__.py
+++ b/vision/google/cloud/vision_helpers/__init__.py
@@ -40,8 +40,12 @@
Args:
request (:class:`~.vision_v1.types.AnnotateImageRequest`)
- options (:class:`google.gax.CallOptions`): Overrides the default
- settings for this call, e.g, timeout, retries, etc.
+ retry (Optional[google.api_core.retry.Retry]): A retry object used
+ to retry requests. If ``None`` is specified, requests will not
+ be retried.
+ timeout (Optional[float]): The amount of time, in seconds, to wait
+ for the request to complete. Note that if ``retry`` is
+ specified, the timeout applies to each individual attempt.
Returns:
:class:`~.vision_v1.types.AnnotateImageResponse` The API response.
| {"golden_diff": "diff --git a/vision/google/cloud/vision_helpers/__init__.py b/vision/google/cloud/vision_helpers/__init__.py\n--- a/vision/google/cloud/vision_helpers/__init__.py\n+++ b/vision/google/cloud/vision_helpers/__init__.py\n@@ -40,8 +40,12 @@\n \n Args:\n request (:class:`~.vision_v1.types.AnnotateImageRequest`)\n- options (:class:`google.gax.CallOptions`): Overrides the default\n- settings for this call, e.g, timeout, retries, etc.\n+ retry (Optional[google.api_core.retry.Retry]): A retry object used\n+ to retry requests. If ``None`` is specified, requests will not\n+ be retried.\n+ timeout (Optional[float]): The amount of time, in seconds, to wait\n+ for the request to complete. Note that if ``retry`` is\n+ specified, the timeout applies to each individual attempt.\n \n Returns:\n :class:`~.vision_v1.types.AnnotateImageResponse` The API response.\n", "issue": "Monitoring: where is CallOptions on monitoring API example?\n[OS] macOS Sierra 10.12.6\r\n[Versions]\r\n\r\n- Python 3.6.1\r\n\r\n```\r\ngoogle-api-core==1.2.1\r\ngoogle-api-python-client==1.7.3\r\ngoogle-auth==1.5.0\r\ngoogle-auth-httplib2==0.0.3\r\ngoogle-cloud-monitoring==0.30.0\r\ngoogleapis-common-protos==1.5.3\r\n```\r\n\r\n----\r\n\r\n## CallOptions class was not found!\r\n\r\nHi. I'm new to GCP and Stackdriver. I wanted to use Google Kubernetes Engine and its auto scaling by custom metrics. Then, it is required to export the metrics to Stackdriver Monitoring, so I am trying to do it.\r\n\r\nBut, After installing above-mentioned libraries, the example code on monitoring API README document failed. The pit hole is that `CallOptions` was not found, thus I've searched it in this repository and some other repositories.\r\n\r\nAnd finally, I couldn't find it...\r\n\r\n`CallOptions` is defined in gax.python, but the package is currently deprecated and moved to google-api-core. So I guess that also the dependency is currently corrupted or the some examples are out-of-date.\r\n\r\nPlease tell me how handle this problem.\r\n\r\n_Thank you for the great package and platform._\n", "before_files": [{"content": "# Copyright 2017, Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport io\n\nfrom google.api_core import protobuf_helpers as protobuf\n\n\nclass VisionHelpers(object):\n \"\"\"A set of convenience methods to make the Vision GAPIC easier to use.\n\n This class should be considered abstract; it is used as a superclass\n in a multiple-inheritance construction alongside the applicable GAPIC.\n See the :class:`~google.cloud.vision_v1.ImageAnnotatorClient`.\n \"\"\"\n def annotate_image(self, request, retry=None, timeout=None):\n \"\"\"Run image detection and annotation for an image.\n\n Example:\n >>> from google.cloud.vision_v1 import ImageAnnotatorClient\n >>> client = ImageAnnotatorClient()\n >>> request = {\n ... 'image': {\n ... 'source': {'image_uri': 'https://foo.com/image.jpg'},\n ... },\n ... }\n >>> response = client.annotate_image(request)\n\n Args:\n request (:class:`~.vision_v1.types.AnnotateImageRequest`)\n options (:class:`google.gax.CallOptions`): Overrides the default\n settings for this call, e.g, timeout, retries, etc.\n\n Returns:\n :class:`~.vision_v1.types.AnnotateImageResponse` The API response.\n \"\"\"\n # If the image is a file handler, set the content.\n image = protobuf.get(request, 'image')\n if hasattr(image, 'read'):\n img_bytes = image.read()\n protobuf.set(request, 'image', {})\n protobuf.set(request, 'image.content', img_bytes)\n image = protobuf.get(request, 'image')\n\n # If a filename is provided, read the file.\n filename = protobuf.get(image, 'source.filename', default=None)\n if filename:\n with io.open(filename, 'rb') as img_file:\n protobuf.set(request, 'image.content', img_file.read())\n protobuf.set(request, 'image.source', None)\n\n # This method allows features not to be specified, and you get all\n # of them.\n protobuf.setdefault(request, 'features', self._get_all_features())\n r = self.batch_annotate_images([request], retry=retry, timeout=timeout)\n return r.responses[0]\n\n def _get_all_features(self):\n \"\"\"Return a list of all features.\n\n Returns:\n list: A list of all available features.\n \"\"\"\n return [\n {'type': feature}\n for feature in self.enums.Feature.Type if feature != 0]\n", "path": "vision/google/cloud/vision_helpers/__init__.py"}]} | 1,643 | 233 |
gh_patches_debug_34678 | rasdani/github-patches | git_diff | easybuilders__easybuild-easyblocks-2903 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
meson/ninja scipy install not producing egg / dist info
And that means it does not appear in the result of `pip list` and causes anything requiring `scipy` to fail the sanity check.
</issue>
<code>
[start of easybuild/easyblocks/s/scipy.py]
1 ##
2 # Copyright 2009-2023 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 EasyBuild support for building and installing scipy, implemented as an easyblock
27
28 @author: Stijn De Weirdt (Ghent University)
29 @author: Dries Verdegem (Ghent University)
30 @author: Kenneth Hoste (Ghent University)
31 @author: Pieter De Baets (Ghent University)
32 @author: Jens Timmerman (Ghent University)
33 @author: Jasper Grimm (University of York)
34 """
35 import os
36 import tempfile
37 from distutils.version import LooseVersion
38
39 import easybuild.tools.environment as env
40 import easybuild.tools.toolchain as toolchain
41 from easybuild.easyblocks.generic.fortranpythonpackage import FortranPythonPackage
42 from easybuild.easyblocks.generic.mesonninja import MesonNinja
43 from easybuild.easyblocks.generic.pythonpackage import PythonPackage, det_pylibdir
44 from easybuild.framework.easyconfig import CUSTOM
45 from easybuild.tools.build_log import EasyBuildError
46 from easybuild.tools.filetools import change_dir, copy_dir
47
48
49 class EB_scipy(FortranPythonPackage, PythonPackage, MesonNinja):
50 """Support for installing the scipy Python package as part of a Python installation."""
51
52 @staticmethod
53 def extra_options():
54 """Easyconfig parameters specific to scipy."""
55 extra_vars = ({
56 'enable_slow_tests': [False, "Run scipy test suite, including tests marked as slow", CUSTOM],
57 'ignore_test_result': [None, "Run scipy test suite, but ignore test failures (True/False/None). Default "
58 "(None) implies True for scipy < 1.9, and False for scipy >= 1.9", CUSTOM],
59 })
60
61 return PythonPackage.extra_options(extra_vars=extra_vars)
62
63 def __init__(self, *args, **kwargs):
64 """Set scipy-specific test command."""
65 # calling PythonPackage __init__ also lets MesonNinja work in an extension
66 PythonPackage.__init__(self, *args, **kwargs)
67 self.testinstall = True
68
69 if LooseVersion(self.version) >= LooseVersion('1.9'):
70 self.use_meson = True
71
72 # enforce scipy test suite results if not explicitly disabled for scipy >= 1.9
73 # strip inherited PythonPackage installopts
74 installopts = self.cfg['installopts']
75 pythonpackage_installopts = ['--no-deps', '--ignore-installed', '--no-index', '--egg',
76 '--zip-ok', '--no-index']
77 self.log.info("Stripping inherited PythonPackage installopts %s from installopts %s",
78 pythonpackage_installopts, installopts)
79 for i in pythonpackage_installopts:
80 installopts = installopts.replace(i, '')
81 self.cfg['installopts'] = installopts
82
83 else:
84 self.use_meson = False
85
86 if self.cfg['ignore_test_result'] is None:
87 # automatically ignore scipy test suite results for scipy < 1.9, as we did in older easyconfigs
88 self.cfg['ignore_test_result'] = LooseVersion(self.version) < '1.9'
89 self.log.info("ignore_test_result not specified, so automatically set to %s for scipy %s",
90 self.cfg['ignore_test_result'], self.version)
91
92 if self.cfg['ignore_test_result']:
93 # used to maintain compatibility with easyconfigs predating scipy 1.9;
94 # runs tests (serially) in a way that exits with code 0 regardless of test results,
95 # see https://github.com/easybuilders/easybuild-easyblocks/issues/2237
96 self.testcmd = "cd .. && %(python)s -c 'import numpy; import scipy; scipy.test(verbose=2)'"
97 else:
98 self.testcmd = " && ".join([
99 "cd ..",
100 "touch %(srcdir)s/.coveragerc",
101 "%(python)s %(srcdir)s/runtests.py -v --no-build --parallel %(parallel)s",
102 ])
103 if self.cfg['enable_slow_tests']:
104 self.testcmd += " -m full "
105
106 def configure_step(self):
107 """Custom configure step for scipy: set extra installation options when needed."""
108
109 # scipy >= 1.9.0 uses Meson/Ninja
110 if self.use_meson:
111 # configure BLAS/LAPACK library to use with Meson for scipy >= 1.9.0
112 lapack_lib = self.toolchain.lapack_family()
113 if lapack_lib == toolchain.FLEXIBLAS:
114 blas_lapack = 'flexiblas'
115 elif lapack_lib == toolchain.INTELMKL:
116 blas_lapack = 'mkl'
117 elif lapack_lib == toolchain.OPENBLAS:
118 blas_lapack = 'openblas'
119 else:
120 raise EasyBuildError("Unknown BLAS/LAPACK library used: %s", lapack_lib)
121
122 for opt in ('blas', 'lapack'):
123 self.cfg.update('configopts', '-D%(opt)s=%(blas_lapack)s' % {'opt': opt, 'blas_lapack': blas_lapack})
124
125 # need to have already installed extensions in PATH, PYTHONPATH for configure/build/install steps
126 pythonpath = os.getenv('PYTHONPATH')
127 pylibdir = det_pylibdir()
128 env.setvar('PYTHONPATH', os.pathsep.join([os.path.join(self.installdir, pylibdir), pythonpath]))
129
130 path = os.getenv('PATH')
131 env.setvar('PATH', os.pathsep.join([os.path.join(self.installdir, 'bin'), path]))
132
133 MesonNinja.configure_step(self)
134
135 else:
136 # scipy < 1.9.0 uses install procedure using setup.py
137 FortranPythonPackage.configure_step(self)
138
139 if LooseVersion(self.version) >= LooseVersion('0.13'):
140 # in recent scipy versions, additional compilation is done in the install step,
141 # which requires unsetting $LDFLAGS
142 if self.toolchain.comp_family() in [toolchain.GCC, toolchain.CLANGGCC]: # @UndefinedVariable
143 self.cfg.update('preinstallopts', "unset LDFLAGS && ")
144
145 def build_step(self):
146 """Custom build step for scipy: use ninja for scipy >= 1.9.0"""
147 if self.use_meson:
148 MesonNinja.build_step(self)
149 else:
150 FortranPythonPackage.build_step(self)
151
152 def test_step(self):
153 """Run available scipy unit tests. Adapted from numpy easyblock"""
154
155 if self.use_meson:
156 # temporarily install scipy so we can run the test suite
157 tmpdir = tempfile.mkdtemp()
158 cwd = os.getcwd()
159
160 tmp_builddir = os.path.join(tmpdir, 'build')
161 tmp_installdir = os.path.join(tmpdir, 'install')
162
163 # create a copy of the builddir
164 copy_dir(cwd, tmp_builddir)
165 change_dir(tmp_builddir)
166
167 # reconfigure (to update prefix), and install to tmpdir
168 MesonNinja.configure_step(self, cmd_prefix=tmp_installdir)
169 MesonNinja.install_step(self)
170
171 tmp_pylibdir = [os.path.join(tmp_installdir, det_pylibdir())]
172 self.prepare_python()
173
174 self.cfg['pretestopts'] = " && ".join([
175 # LDFLAGS should not be set when testing numpy/scipy, because it overwrites whatever numpy/scipy sets
176 # see http://projects.scipy.org/numpy/ticket/182
177 "unset LDFLAGS",
178 "export PYTHONPATH=%s:$PYTHONPATH" % tmp_pylibdir,
179 "",
180 ])
181 self.cfg['runtest'] = self.testcmd % {
182 'python': self.python_cmd,
183 'srcdir': self.cfg['start_dir'],
184 'parallel': self.cfg['parallel'],
185 }
186
187 MesonNinja.test_step(self)
188
189 else:
190 self.testcmd = self.testcmd % {
191 'python': '%(python)s',
192 'srcdir': self.cfg['start_dir'],
193 'parallel': self.cfg['parallel'],
194 }
195 FortranPythonPackage.test_step(self)
196
197 def install_step(self):
198 """Custom install step for scipy: use ninja for scipy >= 1.9.0"""
199 if self.use_meson:
200 MesonNinja.install_step(self)
201 else:
202 FortranPythonPackage.install_step(self)
203
204 def sanity_check_step(self, *args, **kwargs):
205 """Custom sanity check for scipy."""
206
207 # can't use self.pylibdir here, need to determine path on the fly using currently active 'python' command;
208 # this is important for numpy installations for multiple Python version (via multi_deps)
209 custom_paths = {
210 'files': [],
211 'dirs': [det_pylibdir()],
212 }
213
214 return PythonPackage.sanity_check_step(self, custom_paths=custom_paths)
215
[end of easybuild/easyblocks/s/scipy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/easybuild/easyblocks/s/scipy.py b/easybuild/easyblocks/s/scipy.py
--- a/easybuild/easyblocks/s/scipy.py
+++ b/easybuild/easyblocks/s/scipy.py
@@ -42,8 +42,8 @@
from easybuild.easyblocks.generic.mesonninja import MesonNinja
from easybuild.easyblocks.generic.pythonpackage import PythonPackage, det_pylibdir
from easybuild.framework.easyconfig import CUSTOM
-from easybuild.tools.build_log import EasyBuildError
-from easybuild.tools.filetools import change_dir, copy_dir
+from easybuild.tools.build_log import EasyBuildError, print_warning
+from easybuild.tools.filetools import change_dir, copy_dir, copy_file
class EB_scipy(FortranPythonPackage, PythonPackage, MesonNinja):
@@ -198,6 +198,18 @@
"""Custom install step for scipy: use ninja for scipy >= 1.9.0"""
if self.use_meson:
MesonNinja.install_step(self)
+
+ # copy PKG-INFO file included in scipy source tarball to scipy-<version>.egg-info in installation,
+ # so pip is aware of the scipy installation (required for 'pip list', 'pip check', etc.);
+ # see also https://github.com/easybuilders/easybuild-easyblocks/issues/2901
+ pkg_info = os.path.join(self.cfg['start_dir'], 'PKG-INFO')
+ target_egg_info = os.path.join(self.installdir, self.pylibdir, 'scipy-%s.egg-info' % self.version)
+ if os.path.isfile(pkg_info):
+ copy_file(pkg_info, target_egg_info)
+ else:
+ cwd = os.getcwd()
+ print_warning("%s not found in %s, so can't use it to create %s!", pkg_info, cwd, target_egg_info,
+ log=self.log)
else:
FortranPythonPackage.install_step(self)
@@ -211,4 +223,9 @@
'dirs': [det_pylibdir()],
}
- return PythonPackage.sanity_check_step(self, custom_paths=custom_paths)
+ # make sure that scipy is included in output of 'pip list',
+ # so that 'pip check' passes if scipy is a required dependency for another Python package;
+ # use case-insensitive match, since name is sometimes reported as 'SciPy'
+ custom_commands = [r"pip list | grep -iE '^scipy\s+%s\s*$'" % self.version.replace('.', r'\.')]
+
+ return PythonPackage.sanity_check_step(self, custom_paths=custom_paths, custom_commands=custom_commands)
| {"golden_diff": "diff --git a/easybuild/easyblocks/s/scipy.py b/easybuild/easyblocks/s/scipy.py\n--- a/easybuild/easyblocks/s/scipy.py\n+++ b/easybuild/easyblocks/s/scipy.py\n@@ -42,8 +42,8 @@\n from easybuild.easyblocks.generic.mesonninja import MesonNinja\n from easybuild.easyblocks.generic.pythonpackage import PythonPackage, det_pylibdir\n from easybuild.framework.easyconfig import CUSTOM\n-from easybuild.tools.build_log import EasyBuildError\n-from easybuild.tools.filetools import change_dir, copy_dir\n+from easybuild.tools.build_log import EasyBuildError, print_warning\n+from easybuild.tools.filetools import change_dir, copy_dir, copy_file\n \n \n class EB_scipy(FortranPythonPackage, PythonPackage, MesonNinja):\n@@ -198,6 +198,18 @@\n \"\"\"Custom install step for scipy: use ninja for scipy >= 1.9.0\"\"\"\n if self.use_meson:\n MesonNinja.install_step(self)\n+\n+ # copy PKG-INFO file included in scipy source tarball to scipy-<version>.egg-info in installation,\n+ # so pip is aware of the scipy installation (required for 'pip list', 'pip check', etc.);\n+ # see also https://github.com/easybuilders/easybuild-easyblocks/issues/2901\n+ pkg_info = os.path.join(self.cfg['start_dir'], 'PKG-INFO')\n+ target_egg_info = os.path.join(self.installdir, self.pylibdir, 'scipy-%s.egg-info' % self.version)\n+ if os.path.isfile(pkg_info):\n+ copy_file(pkg_info, target_egg_info)\n+ else:\n+ cwd = os.getcwd()\n+ print_warning(\"%s not found in %s, so can't use it to create %s!\", pkg_info, cwd, target_egg_info,\n+ log=self.log)\n else:\n FortranPythonPackage.install_step(self)\n \n@@ -211,4 +223,9 @@\n 'dirs': [det_pylibdir()],\n }\n \n- return PythonPackage.sanity_check_step(self, custom_paths=custom_paths)\n+ # make sure that scipy is included in output of 'pip list',\n+ # so that 'pip check' passes if scipy is a required dependency for another Python package;\n+ # use case-insensitive match, since name is sometimes reported as 'SciPy'\n+ custom_commands = [r\"pip list | grep -iE '^scipy\\s+%s\\s*$'\" % self.version.replace('.', r'\\.')]\n+\n+ return PythonPackage.sanity_check_step(self, custom_paths=custom_paths, custom_commands=custom_commands)\n", "issue": "meson/ninja scipy install not producing egg / dist info \nAnd that means it does not appear in the result of `pip list` and causes anything requiring `scipy` to fail the sanity check.\n", "before_files": [{"content": "##\n# Copyright 2009-2023 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing scipy, implemented as an easyblock\n\n@author: Stijn De Weirdt (Ghent University)\n@author: Dries Verdegem (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n@author: Pieter De Baets (Ghent University)\n@author: Jens Timmerman (Ghent University)\n@author: Jasper Grimm (University of York)\n\"\"\"\nimport os\nimport tempfile\nfrom distutils.version import LooseVersion\n\nimport easybuild.tools.environment as env\nimport easybuild.tools.toolchain as toolchain\nfrom easybuild.easyblocks.generic.fortranpythonpackage import FortranPythonPackage\nfrom easybuild.easyblocks.generic.mesonninja import MesonNinja\nfrom easybuild.easyblocks.generic.pythonpackage import PythonPackage, det_pylibdir\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.filetools import change_dir, copy_dir\n\n\nclass EB_scipy(FortranPythonPackage, PythonPackage, MesonNinja):\n \"\"\"Support for installing the scipy Python package as part of a Python installation.\"\"\"\n\n @staticmethod\n def extra_options():\n \"\"\"Easyconfig parameters specific to scipy.\"\"\"\n extra_vars = ({\n 'enable_slow_tests': [False, \"Run scipy test suite, including tests marked as slow\", CUSTOM],\n 'ignore_test_result': [None, \"Run scipy test suite, but ignore test failures (True/False/None). Default \"\n \"(None) implies True for scipy < 1.9, and False for scipy >= 1.9\", CUSTOM],\n })\n\n return PythonPackage.extra_options(extra_vars=extra_vars)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Set scipy-specific test command.\"\"\"\n # calling PythonPackage __init__ also lets MesonNinja work in an extension\n PythonPackage.__init__(self, *args, **kwargs)\n self.testinstall = True\n\n if LooseVersion(self.version) >= LooseVersion('1.9'):\n self.use_meson = True\n\n # enforce scipy test suite results if not explicitly disabled for scipy >= 1.9\n # strip inherited PythonPackage installopts\n installopts = self.cfg['installopts']\n pythonpackage_installopts = ['--no-deps', '--ignore-installed', '--no-index', '--egg',\n '--zip-ok', '--no-index']\n self.log.info(\"Stripping inherited PythonPackage installopts %s from installopts %s\",\n pythonpackage_installopts, installopts)\n for i in pythonpackage_installopts:\n installopts = installopts.replace(i, '')\n self.cfg['installopts'] = installopts\n\n else:\n self.use_meson = False\n\n if self.cfg['ignore_test_result'] is None:\n # automatically ignore scipy test suite results for scipy < 1.9, as we did in older easyconfigs\n self.cfg['ignore_test_result'] = LooseVersion(self.version) < '1.9'\n self.log.info(\"ignore_test_result not specified, so automatically set to %s for scipy %s\",\n self.cfg['ignore_test_result'], self.version)\n\n if self.cfg['ignore_test_result']:\n # used to maintain compatibility with easyconfigs predating scipy 1.9;\n # runs tests (serially) in a way that exits with code 0 regardless of test results,\n # see https://github.com/easybuilders/easybuild-easyblocks/issues/2237\n self.testcmd = \"cd .. && %(python)s -c 'import numpy; import scipy; scipy.test(verbose=2)'\"\n else:\n self.testcmd = \" && \".join([\n \"cd ..\",\n \"touch %(srcdir)s/.coveragerc\",\n \"%(python)s %(srcdir)s/runtests.py -v --no-build --parallel %(parallel)s\",\n ])\n if self.cfg['enable_slow_tests']:\n self.testcmd += \" -m full \"\n\n def configure_step(self):\n \"\"\"Custom configure step for scipy: set extra installation options when needed.\"\"\"\n\n # scipy >= 1.9.0 uses Meson/Ninja\n if self.use_meson:\n # configure BLAS/LAPACK library to use with Meson for scipy >= 1.9.0\n lapack_lib = self.toolchain.lapack_family()\n if lapack_lib == toolchain.FLEXIBLAS:\n blas_lapack = 'flexiblas'\n elif lapack_lib == toolchain.INTELMKL:\n blas_lapack = 'mkl'\n elif lapack_lib == toolchain.OPENBLAS:\n blas_lapack = 'openblas'\n else:\n raise EasyBuildError(\"Unknown BLAS/LAPACK library used: %s\", lapack_lib)\n\n for opt in ('blas', 'lapack'):\n self.cfg.update('configopts', '-D%(opt)s=%(blas_lapack)s' % {'opt': opt, 'blas_lapack': blas_lapack})\n\n # need to have already installed extensions in PATH, PYTHONPATH for configure/build/install steps\n pythonpath = os.getenv('PYTHONPATH')\n pylibdir = det_pylibdir()\n env.setvar('PYTHONPATH', os.pathsep.join([os.path.join(self.installdir, pylibdir), pythonpath]))\n\n path = os.getenv('PATH')\n env.setvar('PATH', os.pathsep.join([os.path.join(self.installdir, 'bin'), path]))\n\n MesonNinja.configure_step(self)\n\n else:\n # scipy < 1.9.0 uses install procedure using setup.py\n FortranPythonPackage.configure_step(self)\n\n if LooseVersion(self.version) >= LooseVersion('0.13'):\n # in recent scipy versions, additional compilation is done in the install step,\n # which requires unsetting $LDFLAGS\n if self.toolchain.comp_family() in [toolchain.GCC, toolchain.CLANGGCC]: # @UndefinedVariable\n self.cfg.update('preinstallopts', \"unset LDFLAGS && \")\n\n def build_step(self):\n \"\"\"Custom build step for scipy: use ninja for scipy >= 1.9.0\"\"\"\n if self.use_meson:\n MesonNinja.build_step(self)\n else:\n FortranPythonPackage.build_step(self)\n\n def test_step(self):\n \"\"\"Run available scipy unit tests. Adapted from numpy easyblock\"\"\"\n\n if self.use_meson:\n # temporarily install scipy so we can run the test suite\n tmpdir = tempfile.mkdtemp()\n cwd = os.getcwd()\n\n tmp_builddir = os.path.join(tmpdir, 'build')\n tmp_installdir = os.path.join(tmpdir, 'install')\n\n # create a copy of the builddir\n copy_dir(cwd, tmp_builddir)\n change_dir(tmp_builddir)\n\n # reconfigure (to update prefix), and install to tmpdir\n MesonNinja.configure_step(self, cmd_prefix=tmp_installdir)\n MesonNinja.install_step(self)\n\n tmp_pylibdir = [os.path.join(tmp_installdir, det_pylibdir())]\n self.prepare_python()\n\n self.cfg['pretestopts'] = \" && \".join([\n # LDFLAGS should not be set when testing numpy/scipy, because it overwrites whatever numpy/scipy sets\n # see http://projects.scipy.org/numpy/ticket/182\n \"unset LDFLAGS\",\n \"export PYTHONPATH=%s:$PYTHONPATH\" % tmp_pylibdir,\n \"\",\n ])\n self.cfg['runtest'] = self.testcmd % {\n 'python': self.python_cmd,\n 'srcdir': self.cfg['start_dir'],\n 'parallel': self.cfg['parallel'],\n }\n\n MesonNinja.test_step(self)\n\n else:\n self.testcmd = self.testcmd % {\n 'python': '%(python)s',\n 'srcdir': self.cfg['start_dir'],\n 'parallel': self.cfg['parallel'],\n }\n FortranPythonPackage.test_step(self)\n\n def install_step(self):\n \"\"\"Custom install step for scipy: use ninja for scipy >= 1.9.0\"\"\"\n if self.use_meson:\n MesonNinja.install_step(self)\n else:\n FortranPythonPackage.install_step(self)\n\n def sanity_check_step(self, *args, **kwargs):\n \"\"\"Custom sanity check for scipy.\"\"\"\n\n # can't use self.pylibdir here, need to determine path on the fly using currently active 'python' command;\n # this is important for numpy installations for multiple Python version (via multi_deps)\n custom_paths = {\n 'files': [],\n 'dirs': [det_pylibdir()],\n }\n\n return PythonPackage.sanity_check_step(self, custom_paths=custom_paths)\n", "path": "easybuild/easyblocks/s/scipy.py"}]} | 3,325 | 601 |
gh_patches_debug_21937 | rasdani/github-patches | git_diff | TheAlgorithms__Python-9228 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Concatenate/consolidate all algorithms with different implementations
### Feature description
There are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file
</issue>
<code>
[start of maths/miller_rabin.py]
1 import random
2
3 from .binary_exp_mod import bin_exp_mod
4
5
6 # This is a probabilistic check to test primality, useful for big numbers!
7 # if it's a prime, it will return true
8 # if it's not a prime, the chance of it returning true is at most 1/4**prec
9 def is_prime_big(n, prec=1000):
10 """
11 >>> from maths.prime_check import is_prime
12 >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s
13 >>> all(is_prime_big(i) == is_prime(i) for i in range(256))
14 True
15 """
16 if n < 2:
17 return False
18
19 if n % 2 == 0:
20 return n == 2
21
22 # this means n is odd
23 d = n - 1
24 exp = 0
25 while d % 2 == 0:
26 d /= 2
27 exp += 1
28
29 # n - 1=d*(2**exp)
30 count = 0
31 while count < prec:
32 a = random.randint(2, n - 1)
33 b = bin_exp_mod(a, d, n)
34 if b != 1:
35 flag = True
36 for _ in range(exp):
37 if b == n - 1:
38 flag = False
39 break
40 b = b * b
41 b %= n
42 if flag:
43 return False
44 count += 1
45 return True
46
47
48 if __name__ == "__main__":
49 n = abs(int(input("Enter bound : ").strip()))
50 print("Here's the list of primes:")
51 print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
52
[end of maths/miller_rabin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/maths/miller_rabin.py b/maths/miller_rabin.py
deleted file mode 100644
--- a/maths/miller_rabin.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import random
-
-from .binary_exp_mod import bin_exp_mod
-
-
-# This is a probabilistic check to test primality, useful for big numbers!
-# if it's a prime, it will return true
-# if it's not a prime, the chance of it returning true is at most 1/4**prec
-def is_prime_big(n, prec=1000):
- """
- >>> from maths.prime_check import is_prime
- >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s
- >>> all(is_prime_big(i) == is_prime(i) for i in range(256))
- True
- """
- if n < 2:
- return False
-
- if n % 2 == 0:
- return n == 2
-
- # this means n is odd
- d = n - 1
- exp = 0
- while d % 2 == 0:
- d /= 2
- exp += 1
-
- # n - 1=d*(2**exp)
- count = 0
- while count < prec:
- a = random.randint(2, n - 1)
- b = bin_exp_mod(a, d, n)
- if b != 1:
- flag = True
- for _ in range(exp):
- if b == n - 1:
- flag = False
- break
- b = b * b
- b %= n
- if flag:
- return False
- count += 1
- return True
-
-
-if __name__ == "__main__":
- n = abs(int(input("Enter bound : ").strip()))
- print("Here's the list of primes:")
- print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| {"golden_diff": "diff --git a/maths/miller_rabin.py b/maths/miller_rabin.py\ndeleted file mode 100644\n--- a/maths/miller_rabin.py\n+++ /dev/null\n@@ -1,51 +0,0 @@\n-import random\n-\n-from .binary_exp_mod import bin_exp_mod\n-\n-\n-# This is a probabilistic check to test primality, useful for big numbers!\n-# if it's a prime, it will return true\n-# if it's not a prime, the chance of it returning true is at most 1/4**prec\n-def is_prime_big(n, prec=1000):\n- \"\"\"\n- >>> from maths.prime_check import is_prime\n- >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s\n- >>> all(is_prime_big(i) == is_prime(i) for i in range(256))\n- True\n- \"\"\"\n- if n < 2:\n- return False\n-\n- if n % 2 == 0:\n- return n == 2\n-\n- # this means n is odd\n- d = n - 1\n- exp = 0\n- while d % 2 == 0:\n- d /= 2\n- exp += 1\n-\n- # n - 1=d*(2**exp)\n- count = 0\n- while count < prec:\n- a = random.randint(2, n - 1)\n- b = bin_exp_mod(a, d, n)\n- if b != 1:\n- flag = True\n- for _ in range(exp):\n- if b == n - 1:\n- flag = False\n- break\n- b = b * b\n- b %= n\n- if flag:\n- return False\n- count += 1\n- return True\n-\n-\n-if __name__ == \"__main__\":\n- n = abs(int(input(\"Enter bound : \").strip()))\n- print(\"Here's the list of primes:\")\n- print(\", \".join(str(i) for i in range(n + 1) if is_prime_big(i)))\n", "issue": "Concatenate/consolidate all algorithms with different implementations\n### Feature description\n\nThere are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file\n", "before_files": [{"content": "import random\n\nfrom .binary_exp_mod import bin_exp_mod\n\n\n# This is a probabilistic check to test primality, useful for big numbers!\n# if it's a prime, it will return true\n# if it's not a prime, the chance of it returning true is at most 1/4**prec\ndef is_prime_big(n, prec=1000):\n \"\"\"\n >>> from maths.prime_check import is_prime\n >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s\n >>> all(is_prime_big(i) == is_prime(i) for i in range(256))\n True\n \"\"\"\n if n < 2:\n return False\n\n if n % 2 == 0:\n return n == 2\n\n # this means n is odd\n d = n - 1\n exp = 0\n while d % 2 == 0:\n d /= 2\n exp += 1\n\n # n - 1=d*(2**exp)\n count = 0\n while count < prec:\n a = random.randint(2, n - 1)\n b = bin_exp_mod(a, d, n)\n if b != 1:\n flag = True\n for _ in range(exp):\n if b == n - 1:\n flag = False\n break\n b = b * b\n b %= n\n if flag:\n return False\n count += 1\n return True\n\n\nif __name__ == \"__main__\":\n n = abs(int(input(\"Enter bound : \").strip()))\n print(\"Here's the list of primes:\")\n print(\", \".join(str(i) for i in range(n + 1) if is_prime_big(i)))\n", "path": "maths/miller_rabin.py"}]} | 1,076 | 497 |
gh_patches_debug_9138 | rasdani/github-patches | git_diff | keras-team__autokeras-277 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cannot install autokeras because of package dependency confliction
### Bug Description
following package dependency is configured at setup.py
https://github.com/jhfjhfj1/autokeras/blob/master/setup.py#L6
```
install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
'tensorflow>=1.10.0', 'tqdm==4.25.0'],
```
When execute `pip install autokeras`, following error is appeared.
```
keras 2.2.2 has requirement keras-applications==1.0.4, but you'll have keras-applications 1.0.6 which is incompatible.
keras 2.2.2 has requirement keras-preprocessing==1.0.2, but you'll have keras-preprocessing 1.0.5 which is incompatible.
```
It is because that tensorflow==1.11.0 is installed first and
keras-applications >= 1.0.5 and keras-preprocessing > = 1.0.3 can installed with tensorflow==1.11.0.
On the other hand, keras==2.2.2's dependency versions are keras-applications==1.0.4 and keras-preprocessing==1.0.2.
tensorflow version should be defined as `tensorflow==1.10.0`at [setup.py L6](https://github.com/jhfjhfj1/autokeras/blob/master/setup.py#L6).
```
# before
install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
'tensorflow>=1.10.0', 'tqdm==4.25.0'],
# after
install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
'tensorflow==1.10.0', 'tqdm==4.25.0'],
```
### Reproducing Steps
Step1: curl https://gist.githubusercontent.com/chie8842/b3b9f3ea2d886bbb5aa5c903b9e42ee3/raw/e94cc375ca1265c66d4517a25a748f1e13a3de9d/Dockerfile -o Dockerfile
Step2: docker build -t autokeras -f Dockerfile .
Step3: docker run -it --rm autokeras /bin/bash
Step4: sudo pip install autokeras
</issue>
<code>
[start of setup.py]
1 from distutils.core import setup
2
3 setup(
4 name='autokeras',
5 packages=['autokeras'], # this must be the same as the name above
6 install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
7 'tensorflow>=1.10.0', 'tqdm==4.25.0'],
8 version='0.2.18',
9 description='AutoML for deep learning',
10 author='Haifeng Jin',
11 author_email='[email protected]',
12 url='http://autokeras.com',
13 download_url='https://github.com/jhfjhfj1/autokeras/archive/0.2.18.tar.gz',
14 keywords=['automl'], # arbitrary keywords
15 classifiers=[]
16 )
17
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,7 @@
name='autokeras',
packages=['autokeras'], # this must be the same as the name above
install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
- 'tensorflow>=1.10.0', 'tqdm==4.25.0'],
+ 'tensorflow==1.10.0', 'tqdm==4.25.0'],
version='0.2.18',
description='AutoML for deep learning',
author='Haifeng Jin',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,7 +4,7 @@\n name='autokeras',\n packages=['autokeras'], # this must be the same as the name above\n install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\n- 'tensorflow>=1.10.0', 'tqdm==4.25.0'],\n+ 'tensorflow==1.10.0', 'tqdm==4.25.0'],\n version='0.2.18',\n description='AutoML for deep learning',\n author='Haifeng Jin',\n", "issue": "cannot install autokeras because of package dependency confliction\n### Bug Description\r\nfollowing package dependency is configured at setup.py\r\nhttps://github.com/jhfjhfj1/autokeras/blob/master/setup.py#L6\r\n\r\n```\r\ninstall_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\r\n 'tensorflow>=1.10.0', 'tqdm==4.25.0'],\r\n```\r\n\r\nWhen execute `pip install autokeras`, following error is appeared.\r\n\r\n```\r\nkeras 2.2.2 has requirement keras-applications==1.0.4, but you'll have keras-applications 1.0.6 which is incompatible.\r\nkeras 2.2.2 has requirement keras-preprocessing==1.0.2, but you'll have keras-preprocessing 1.0.5 which is incompatible.\r\n```\r\n\r\nIt is because that tensorflow==1.11.0 is installed first and\r\nkeras-applications >= 1.0.5 and keras-preprocessing > = 1.0.3 can installed with tensorflow==1.11.0.\r\nOn the other hand, keras==2.2.2's dependency versions are keras-applications==1.0.4 and keras-preprocessing==1.0.2.\r\n\r\n tensorflow version should be defined as `tensorflow==1.10.0`at [setup.py L6](https://github.com/jhfjhfj1/autokeras/blob/master/setup.py#L6).\r\n\r\n```\r\n# before\r\ninstall_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\r\n 'tensorflow>=1.10.0', 'tqdm==4.25.0'],\r\n\r\n# after\r\ninstall_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\r\n 'tensorflow==1.10.0', 'tqdm==4.25.0'],\r\n```\r\n\r\n### Reproducing Steps\r\n\u00a0\r\nStep1: curl https://gist.githubusercontent.com/chie8842/b3b9f3ea2d886bbb5aa5c903b9e42ee3/raw/e94cc375ca1265c66d4517a25a748f1e13a3de9d/Dockerfile -o Dockerfile\r\nStep2: docker build -t autokeras -f Dockerfile .\r\nStep3: docker run -it --rm autokeras /bin/bash\r\nStep4: sudo pip install autokeras\n", "before_files": [{"content": "from distutils.core import setup\n\nsetup(\n name='autokeras',\n packages=['autokeras'], # this must be the same as the name above\n install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\n 'tensorflow>=1.10.0', 'tqdm==4.25.0'],\n version='0.2.18',\n description='AutoML for deep learning',\n author='Haifeng Jin',\n author_email='[email protected]',\n url='http://autokeras.com',\n download_url='https://github.com/jhfjhfj1/autokeras/archive/0.2.18.tar.gz',\n keywords=['automl'], # arbitrary keywords\n classifiers=[]\n)\n", "path": "setup.py"}]} | 1,417 | 186 |
gh_patches_debug_17732 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1381 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Import from Goodreads doesn't work correctly
**Describe the bug**
Import from goodreads csv imports only first line of csv and stops with 'success' status. If user tries to reimport same csv again importer takes the same first imported line yet again.
Broken import examples https://bookwyrm.social/import/775 https://bookwyrm.social/import/776
**Expected behavior**
Importer correctly imports all lines of csv or returns error message to user
</issue>
<code>
[start of bookwyrm/views/import_data.py]
1 """ import books from another app """
2 from io import TextIOWrapper
3
4 from django.contrib.auth.decorators import login_required
5 from django.core.exceptions import PermissionDenied
6 from django.http import HttpResponseBadRequest
7 from django.shortcuts import get_object_or_404, redirect
8 from django.template.response import TemplateResponse
9 from django.utils.decorators import method_decorator
10 from django.utils.translation import gettext_lazy as _
11 from django.views import View
12
13 from bookwyrm import forms, models
14 from bookwyrm.importers import (
15 Importer,
16 LibrarythingImporter,
17 GoodreadsImporter,
18 StorygraphImporter,
19 )
20 from bookwyrm.tasks import app
21
22 # pylint: disable= no-self-use
23 @method_decorator(login_required, name="dispatch")
24 class Import(View):
25 """import view"""
26
27 def get(self, request):
28 """load import page"""
29 return TemplateResponse(
30 request,
31 "import.html",
32 {
33 "import_form": forms.ImportForm(),
34 "jobs": models.ImportJob.objects.filter(user=request.user).order_by(
35 "-created_date"
36 ),
37 },
38 )
39
40 def post(self, request):
41 """ingest a goodreads csv"""
42 form = forms.ImportForm(request.POST, request.FILES)
43 if form.is_valid():
44 include_reviews = request.POST.get("include_reviews") == "on"
45 privacy = request.POST.get("privacy")
46 source = request.POST.get("source")
47
48 importer = None
49 if source == "LibraryThing":
50 importer = LibrarythingImporter()
51 elif source == "Storygraph":
52 importer = StorygraphImporter()
53 else:
54 # Default : GoodReads
55 importer = GoodreadsImporter()
56
57 try:
58 job = importer.create_job(
59 request.user,
60 TextIOWrapper(
61 request.FILES["csv_file"], encoding=importer.encoding
62 ),
63 include_reviews,
64 privacy,
65 )
66 except (UnicodeDecodeError, ValueError, KeyError):
67 return HttpResponseBadRequest(_("Not a valid csv file"))
68
69 importer.start_import(job)
70
71 return redirect("/import/%d" % job.id)
72 return HttpResponseBadRequest()
73
74
75 @method_decorator(login_required, name="dispatch")
76 class ImportStatus(View):
77 """status of an existing import"""
78
79 def get(self, request, job_id):
80 """status of an import job"""
81 job = get_object_or_404(models.ImportJob, id=job_id)
82 if job.user != request.user:
83 raise PermissionDenied
84
85 try:
86 task = app.AsyncResult(job.task_id)
87 # triggers attribute error if the task won't load
88 task.status # pylint: disable=pointless-statement
89 except (ValueError, AttributeError):
90 task = None
91
92 items = job.items.order_by("index").all()
93 failed_items = [i for i in items if i.fail_reason]
94 items = [i for i in items if not i.fail_reason]
95 return TemplateResponse(
96 request,
97 "import_status.html",
98 {"job": job, "items": items, "failed_items": failed_items, "task": task},
99 )
100
101 def post(self, request, job_id):
102 """retry lines from an import"""
103 job = get_object_or_404(models.ImportJob, id=job_id)
104 items = []
105 for item in request.POST.getlist("import_item"):
106 items.append(get_object_or_404(models.ImportItem, id=item))
107
108 importer = Importer()
109 job = importer.create_retry_job(
110 request.user,
111 job,
112 items,
113 )
114 importer.start_import(job)
115 return redirect("/import/%d" % job.id)
116
[end of bookwyrm/views/import_data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/views/import_data.py b/bookwyrm/views/import_data.py
--- a/bookwyrm/views/import_data.py
+++ b/bookwyrm/views/import_data.py
@@ -28,7 +28,7 @@
"""load import page"""
return TemplateResponse(
request,
- "import.html",
+ "import/import.html",
{
"import_form": forms.ImportForm(),
"jobs": models.ImportJob.objects.filter(user=request.user).order_by(
@@ -94,7 +94,7 @@
items = [i for i in items if not i.fail_reason]
return TemplateResponse(
request,
- "import_status.html",
+ "import/import_status.html",
{"job": job, "items": items, "failed_items": failed_items, "task": task},
)
| {"golden_diff": "diff --git a/bookwyrm/views/import_data.py b/bookwyrm/views/import_data.py\n--- a/bookwyrm/views/import_data.py\n+++ b/bookwyrm/views/import_data.py\n@@ -28,7 +28,7 @@\n \"\"\"load import page\"\"\"\n return TemplateResponse(\n request,\n- \"import.html\",\n+ \"import/import.html\",\n {\n \"import_form\": forms.ImportForm(),\n \"jobs\": models.ImportJob.objects.filter(user=request.user).order_by(\n@@ -94,7 +94,7 @@\n items = [i for i in items if not i.fail_reason]\n return TemplateResponse(\n request,\n- \"import_status.html\",\n+ \"import/import_status.html\",\n {\"job\": job, \"items\": items, \"failed_items\": failed_items, \"task\": task},\n )\n", "issue": "Import from Goodreads doesn't work correctly\n**Describe the bug**\r\n\r\nImport from goodreads csv imports only first line of csv and stops with 'success' status. If user tries to reimport same csv again importer takes the same first imported line yet again. \r\n\r\nBroken import examples https://bookwyrm.social/import/775 https://bookwyrm.social/import/776\r\n\r\n**Expected behavior**\r\nImporter correctly imports all lines of csv or returns error message to user\n", "before_files": [{"content": "\"\"\" import books from another app \"\"\"\nfrom io import TextIOWrapper\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.importers import (\n Importer,\n LibrarythingImporter,\n GoodreadsImporter,\n StorygraphImporter,\n)\nfrom bookwyrm.tasks import app\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass Import(View):\n \"\"\"import view\"\"\"\n\n def get(self, request):\n \"\"\"load import page\"\"\"\n return TemplateResponse(\n request,\n \"import.html\",\n {\n \"import_form\": forms.ImportForm(),\n \"jobs\": models.ImportJob.objects.filter(user=request.user).order_by(\n \"-created_date\"\n ),\n },\n )\n\n def post(self, request):\n \"\"\"ingest a goodreads csv\"\"\"\n form = forms.ImportForm(request.POST, request.FILES)\n if form.is_valid():\n include_reviews = request.POST.get(\"include_reviews\") == \"on\"\n privacy = request.POST.get(\"privacy\")\n source = request.POST.get(\"source\")\n\n importer = None\n if source == \"LibraryThing\":\n importer = LibrarythingImporter()\n elif source == \"Storygraph\":\n importer = StorygraphImporter()\n else:\n # Default : GoodReads\n importer = GoodreadsImporter()\n\n try:\n job = importer.create_job(\n request.user,\n TextIOWrapper(\n request.FILES[\"csv_file\"], encoding=importer.encoding\n ),\n include_reviews,\n privacy,\n )\n except (UnicodeDecodeError, ValueError, KeyError):\n return HttpResponseBadRequest(_(\"Not a valid csv file\"))\n\n importer.start_import(job)\n\n return redirect(\"/import/%d\" % job.id)\n return HttpResponseBadRequest()\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ImportStatus(View):\n \"\"\"status of an existing import\"\"\"\n\n def get(self, request, job_id):\n \"\"\"status of an import job\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n if job.user != request.user:\n raise PermissionDenied\n\n try:\n task = app.AsyncResult(job.task_id)\n # triggers attribute error if the task won't load\n task.status # pylint: disable=pointless-statement\n except (ValueError, AttributeError):\n task = None\n\n items = job.items.order_by(\"index\").all()\n failed_items = [i for i in items if i.fail_reason]\n items = [i for i in items if not i.fail_reason]\n return TemplateResponse(\n request,\n \"import_status.html\",\n {\"job\": job, \"items\": items, \"failed_items\": failed_items, \"task\": task},\n )\n\n def post(self, request, job_id):\n \"\"\"retry lines from an import\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n items = []\n for item in request.POST.getlist(\"import_item\"):\n items.append(get_object_or_404(models.ImportItem, id=item))\n\n importer = Importer()\n job = importer.create_retry_job(\n request.user,\n job,\n items,\n )\n importer.start_import(job)\n return redirect(\"/import/%d\" % job.id)\n", "path": "bookwyrm/views/import_data.py"}]} | 1,644 | 181 |
gh_patches_debug_35754 | rasdani/github-patches | git_diff | beetbox__beets-1595 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plexupdate: Doesn't work with libaries not named "Music"
I've named my music libaries `Music (New)` and `Music (Untagged)`. The plex update plugin should update the `Music (New)` section, but instead of updating at least both music libaries it doesn't update anything. If I change the library name from `Music (New)` to `Music` it works like a charm. This is specified on line 33 of the beets plugin. A config option to add libraries other than `Music` would make sense imo.
</issue>
<code>
[start of beetsplug/plexupdate.py]
1 """Updates an Plex library whenever the beets library is changed.
2
3 Plex Home users enter the Plex Token to enable updating.
4 Put something like the following in your config.yaml to configure:
5 plex:
6 host: localhost
7 port: 32400
8 token: token
9 """
10 from __future__ import (division, absolute_import, print_function,
11 unicode_literals)
12
13 import requests
14 from urlparse import urljoin
15 from urllib import urlencode
16 import xml.etree.ElementTree as ET
17 from beets import config
18 from beets.plugins import BeetsPlugin
19
20
21 def get_music_section(host, port, token):
22 """Getting the section key for the music library in Plex.
23 """
24 api_endpoint = append_token('library/sections', token)
25 url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
26
27 # Sends request.
28 r = requests.get(url)
29
30 # Parse xml tree and extract music section key.
31 tree = ET.fromstring(r.text)
32 for child in tree.findall('Directory'):
33 if child.get('title') == 'Music':
34 return child.get('key')
35
36
37 def update_plex(host, port, token):
38 """Sends request to the Plex api to start a library refresh.
39 """
40 # Getting section key and build url.
41 section_key = get_music_section(host, port, token)
42 api_endpoint = 'library/sections/{0}/refresh'.format(section_key)
43 api_endpoint = append_token(api_endpoint, token)
44 url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
45
46 # Sends request and returns requests object.
47 r = requests.get(url)
48 return r
49
50
51 def append_token(url, token):
52 """Appends the Plex Home token to the api call if required.
53 """
54 if token:
55 url += '?' + urlencode({'X-Plex-Token': token})
56 return url
57
58
59 class PlexUpdate(BeetsPlugin):
60 def __init__(self):
61 super(PlexUpdate, self).__init__()
62
63 # Adding defaults.
64 config['plex'].add({
65 u'host': u'localhost',
66 u'port': 32400,
67 u'token': u''})
68
69 self.register_listener('database_change', self.listen_for_db_change)
70
71 def listen_for_db_change(self, lib, model):
72 """Listens for beets db change and register the update for the end"""
73 self.register_listener('cli_exit', self.update)
74
75 def update(self, lib):
76 """When the client exists try to send refresh request to Plex server.
77 """
78 self._log.info('Updating Plex library...')
79
80 # Try to send update request.
81 try:
82 update_plex(
83 config['plex']['host'].get(),
84 config['plex']['port'].get(),
85 config['plex']['token'].get())
86 self._log.info('... started.')
87
88 except requests.exceptions.RequestException:
89 self._log.warning('Update failed.')
90
[end of beetsplug/plexupdate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py
--- a/beetsplug/plexupdate.py
+++ b/beetsplug/plexupdate.py
@@ -18,7 +18,7 @@
from beets.plugins import BeetsPlugin
-def get_music_section(host, port, token):
+def get_music_section(host, port, token, library_name):
"""Getting the section key for the music library in Plex.
"""
api_endpoint = append_token('library/sections', token)
@@ -30,15 +30,15 @@
# Parse xml tree and extract music section key.
tree = ET.fromstring(r.text)
for child in tree.findall('Directory'):
- if child.get('title') == 'Music':
+ if child.get('title') == library_name:
return child.get('key')
-def update_plex(host, port, token):
+def update_plex(host, port, token, library_name):
"""Sends request to the Plex api to start a library refresh.
"""
# Getting section key and build url.
- section_key = get_music_section(host, port, token)
+ section_key = get_music_section(host, port, token, library_name)
api_endpoint = 'library/sections/{0}/refresh'.format(section_key)
api_endpoint = append_token(api_endpoint, token)
url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
@@ -64,7 +64,8 @@
config['plex'].add({
u'host': u'localhost',
u'port': 32400,
- u'token': u''})
+ u'token': u'',
+ u'library_name': u'Music'})
self.register_listener('database_change', self.listen_for_db_change)
@@ -82,7 +83,8 @@
update_plex(
config['plex']['host'].get(),
config['plex']['port'].get(),
- config['plex']['token'].get())
+ config['plex']['token'].get(),
+ config['plex']['library_name'].get())
self._log.info('... started.')
except requests.exceptions.RequestException:
| {"golden_diff": "diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py\n--- a/beetsplug/plexupdate.py\n+++ b/beetsplug/plexupdate.py\n@@ -18,7 +18,7 @@\n from beets.plugins import BeetsPlugin\n \n \n-def get_music_section(host, port, token):\n+def get_music_section(host, port, token, library_name):\n \"\"\"Getting the section key for the music library in Plex.\n \"\"\"\n api_endpoint = append_token('library/sections', token)\n@@ -30,15 +30,15 @@\n # Parse xml tree and extract music section key.\n tree = ET.fromstring(r.text)\n for child in tree.findall('Directory'):\n- if child.get('title') == 'Music':\n+ if child.get('title') == library_name:\n return child.get('key')\n \n \n-def update_plex(host, port, token):\n+def update_plex(host, port, token, library_name):\n \"\"\"Sends request to the Plex api to start a library refresh.\n \"\"\"\n # Getting section key and build url.\n- section_key = get_music_section(host, port, token)\n+ section_key = get_music_section(host, port, token, library_name)\n api_endpoint = 'library/sections/{0}/refresh'.format(section_key)\n api_endpoint = append_token(api_endpoint, token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n@@ -64,7 +64,8 @@\n config['plex'].add({\n u'host': u'localhost',\n u'port': 32400,\n- u'token': u''})\n+ u'token': u'',\n+ u'library_name': u'Music'})\n \n self.register_listener('database_change', self.listen_for_db_change)\n \n@@ -82,7 +83,8 @@\n update_plex(\n config['plex']['host'].get(),\n config['plex']['port'].get(),\n- config['plex']['token'].get())\n+ config['plex']['token'].get(),\n+ config['plex']['library_name'].get())\n self._log.info('... started.')\n \n except requests.exceptions.RequestException:\n", "issue": "plexupdate: Doesn't work with libaries not named \"Music\"\nI've named my music libaries `Music (New)` and `Music (Untagged)`. The plex update plugin should update the `Music (New)` section, but instead of updating at least both music libaries it doesn't update anything. If I change the library name from `Music (New)` to `Music` it works like a charm. This is specified on line 33 of the beets plugin. A config option to add libraries other than `Music` would make sense imo.\n\n", "before_files": [{"content": "\"\"\"Updates an Plex library whenever the beets library is changed.\n\nPlex Home users enter the Plex Token to enable updating.\nPut something like the following in your config.yaml to configure:\n plex:\n host: localhost\n port: 32400\n token: token\n\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport requests\nfrom urlparse import urljoin\nfrom urllib import urlencode\nimport xml.etree.ElementTree as ET\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n\ndef get_music_section(host, port, token):\n \"\"\"Getting the section key for the music library in Plex.\n \"\"\"\n api_endpoint = append_token('library/sections', token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request.\n r = requests.get(url)\n\n # Parse xml tree and extract music section key.\n tree = ET.fromstring(r.text)\n for child in tree.findall('Directory'):\n if child.get('title') == 'Music':\n return child.get('key')\n\n\ndef update_plex(host, port, token):\n \"\"\"Sends request to the Plex api to start a library refresh.\n \"\"\"\n # Getting section key and build url.\n section_key = get_music_section(host, port, token)\n api_endpoint = 'library/sections/{0}/refresh'.format(section_key)\n api_endpoint = append_token(api_endpoint, token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request and returns requests object.\n r = requests.get(url)\n return r\n\n\ndef append_token(url, token):\n \"\"\"Appends the Plex Home token to the api call if required.\n \"\"\"\n if token:\n url += '?' + urlencode({'X-Plex-Token': token})\n return url\n\n\nclass PlexUpdate(BeetsPlugin):\n def __init__(self):\n super(PlexUpdate, self).__init__()\n\n # Adding defaults.\n config['plex'].add({\n u'host': u'localhost',\n u'port': 32400,\n u'token': u''})\n\n self.register_listener('database_change', self.listen_for_db_change)\n\n def listen_for_db_change(self, lib, model):\n \"\"\"Listens for beets db change and register the update for the end\"\"\"\n self.register_listener('cli_exit', self.update)\n\n def update(self, lib):\n \"\"\"When the client exists try to send refresh request to Plex server.\n \"\"\"\n self._log.info('Updating Plex library...')\n\n # Try to send update request.\n try:\n update_plex(\n config['plex']['host'].get(),\n config['plex']['port'].get(),\n config['plex']['token'].get())\n self._log.info('... started.')\n\n except requests.exceptions.RequestException:\n self._log.warning('Update failed.')\n", "path": "beetsplug/plexupdate.py"}]} | 1,465 | 486 |
gh_patches_debug_21909 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-2512 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Strawberry cli commands fail with error: strawberry.exe\__main__.py not found
After upgrading strawberry to latest version (0.154.1), I am unable to run any strawberry cli commands.
## Describe the Bug
- Upgraded strawberry from 0.152.0 to 0.154.1
```
poetry add strawberry-graphql[debug-server]@0.154.1
```
- Executed below commands:
```
strawberry server myapp.schema
strawberry export-schema myapp.schema:schema
```
- Both these commands are failing in below error:
**FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\myyuser\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\straw-k47ybk7v-py3.10\\Scripts\\strawberry.exe\\\_\_main\_\_.py'**
## System Information
- Operating system: Windows 10
- Strawberry version (if applicable): 0.154.1
- Python: 3.10.9
## Additional Context
There is no issue with strawberry cli in version 0.152.0 which I am using currently. If we downgrade the package to this version, cli commands work just fine.
Strawberry cli commands fail with error: strawberry.exe\__main__.py not found
After upgrading strawberry to latest version (0.154.1), I am unable to run any strawberry cli commands.
## Describe the Bug
- Upgraded strawberry from 0.152.0 to 0.154.1
```
poetry add strawberry-graphql[debug-server]@0.154.1
```
- Executed below commands:
```
strawberry server myapp.schema
strawberry export-schema myapp.schema:schema
```
- Both these commands are failing in below error:
**FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\myyuser\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\straw-k47ybk7v-py3.10\\Scripts\\strawberry.exe\\\_\_main\_\_.py'**
## System Information
- Operating system: Windows 10
- Strawberry version (if applicable): 0.154.1
- Python: 3.10.9
## Additional Context
There is no issue with strawberry cli in version 0.152.0 which I am using currently. If we downgrade the package to this version, cli commands work just fine.
</issue>
<code>
[start of strawberry/lazy_type.py]
1 import importlib
2 import inspect
3 import sys
4 import warnings
5 from dataclasses import dataclass
6 from pathlib import Path
7 from typing import ForwardRef, Generic, Optional, Type, TypeVar, cast
8
9 TypeName = TypeVar("TypeName")
10 Module = TypeVar("Module")
11
12
13 @dataclass(frozen=True)
14 class LazyType(Generic[TypeName, Module]):
15 type_name: str
16 module: str
17 package: Optional[str] = None
18
19 def __class_getitem__(cls, params):
20 warnings.warn(
21 (
22 "LazyType is deprecated, use "
23 "Annotated[YourType, strawberry.lazy(path)] instead"
24 ),
25 DeprecationWarning,
26 stacklevel=2,
27 )
28
29 type_name, module = params
30
31 package = None
32
33 if module.startswith("."):
34 current_frame = inspect.currentframe()
35 assert current_frame is not None
36 assert current_frame.f_back is not None
37 package = current_frame.f_back.f_globals["__package__"]
38
39 return cls(type_name, module, package)
40
41 def resolve_type(self) -> Type:
42 module = importlib.import_module(self.module, self.package)
43 main_module = sys.modules.get("__main__", None)
44 if main_module:
45 # If lazy type points to the main module, use it instead of the imported
46 # module. Otherwise duplication checks during schema-conversion might fail.
47 # Refer to: https://github.com/strawberry-graphql/strawberry/issues/2397
48 if main_module.__spec__ and main_module.__spec__.name == self.module:
49 module = main_module
50 elif hasattr(main_module, "__file__") and hasattr(module, "__file__"):
51 if (
52 main_module.__file__
53 and module.__file__
54 and Path(main_module.__file__).samefile(module.__file__)
55 ):
56 module = main_module
57 return module.__dict__[self.type_name]
58
59 # this empty call method allows LazyTypes to be used in generic types
60 # for example: List[LazyType["A", "module"]]
61
62 def __call__(self): # pragma: no cover
63 return None
64
65
66 class StrawberryLazyReference:
67 def __init__(self, module: str) -> None:
68 self.module = module
69 self.package = None
70
71 if module.startswith("."):
72 frame = inspect.stack()[2][0]
73 # TODO: raise a nice error if frame is None
74 assert frame is not None
75 self.package = cast(str, frame.f_globals["__package__"])
76
77 def resolve_forward_ref(self, forward_ref: ForwardRef) -> LazyType:
78 return LazyType(forward_ref.__forward_arg__, self.module, self.package)
79
80
81 def lazy(module_path: str) -> StrawberryLazyReference:
82 return StrawberryLazyReference(module_path)
83
[end of strawberry/lazy_type.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/lazy_type.py b/strawberry/lazy_type.py
--- a/strawberry/lazy_type.py
+++ b/strawberry/lazy_type.py
@@ -48,12 +48,16 @@
if main_module.__spec__ and main_module.__spec__.name == self.module:
module = main_module
elif hasattr(main_module, "__file__") and hasattr(module, "__file__"):
- if (
- main_module.__file__
- and module.__file__
- and Path(main_module.__file__).samefile(module.__file__)
- ):
- module = main_module
+ main_file = main_module.__file__
+ module_file = module.__file__
+ if main_file and module_file:
+ try:
+ is_samefile = Path(main_file).samefile(module_file)
+ except FileNotFoundError:
+ # Can be raised when run through the CLI as the __main__ file
+ # path contains `strawberry.exe`
+ is_samefile = False
+ module = main_module if is_samefile else module
return module.__dict__[self.type_name]
# this empty call method allows LazyTypes to be used in generic types
| {"golden_diff": "diff --git a/strawberry/lazy_type.py b/strawberry/lazy_type.py\n--- a/strawberry/lazy_type.py\n+++ b/strawberry/lazy_type.py\n@@ -48,12 +48,16 @@\n if main_module.__spec__ and main_module.__spec__.name == self.module:\n module = main_module\n elif hasattr(main_module, \"__file__\") and hasattr(module, \"__file__\"):\n- if (\n- main_module.__file__\n- and module.__file__\n- and Path(main_module.__file__).samefile(module.__file__)\n- ):\n- module = main_module\n+ main_file = main_module.__file__\n+ module_file = module.__file__\n+ if main_file and module_file:\n+ try:\n+ is_samefile = Path(main_file).samefile(module_file)\n+ except FileNotFoundError:\n+ # Can be raised when run through the CLI as the __main__ file\n+ # path contains `strawberry.exe`\n+ is_samefile = False\n+ module = main_module if is_samefile else module\n return module.__dict__[self.type_name]\n \n # this empty call method allows LazyTypes to be used in generic types\n", "issue": "Strawberry cli commands fail with error: strawberry.exe\\__main__.py not found\nAfter upgrading strawberry to latest version (0.154.1), I am unable to run any strawberry cli commands.\r\n\r\n## Describe the Bug\r\n- Upgraded strawberry from 0.152.0 to 0.154.1\r\n```\r\npoetry add strawberry-graphql[debug-server]@0.154.1\r\n```\r\n- Executed below commands:\r\n```\r\nstrawberry server myapp.schema\r\nstrawberry export-schema myapp.schema:schema\r\n```\r\n- Both these commands are failing in below error:\r\n\r\n**FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\\\Users\\\\myyuser\\\\AppData\\\\Local\\\\pypoetry\\\\Cache\\\\virtualenvs\\\\straw-k47ybk7v-py3.10\\\\Scripts\\\\strawberry.exe\\\\\\_\\_main\\_\\_.py'**\r\n\r\n## System Information\r\n\r\n - Operating system: Windows 10\r\n - Strawberry version (if applicable): 0.154.1\r\n - Python: 3.10.9\r\n\r\n## Additional Context\r\n\r\nThere is no issue with strawberry cli in version 0.152.0 which I am using currently. If we downgrade the package to this version, cli commands work just fine.\r\n\nStrawberry cli commands fail with error: strawberry.exe\\__main__.py not found\nAfter upgrading strawberry to latest version (0.154.1), I am unable to run any strawberry cli commands.\r\n\r\n## Describe the Bug\r\n- Upgraded strawberry from 0.152.0 to 0.154.1\r\n```\r\npoetry add strawberry-graphql[debug-server]@0.154.1\r\n```\r\n- Executed below commands:\r\n```\r\nstrawberry server myapp.schema\r\nstrawberry export-schema myapp.schema:schema\r\n```\r\n- Both these commands are failing in below error:\r\n\r\n**FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\\\Users\\\\myyuser\\\\AppData\\\\Local\\\\pypoetry\\\\Cache\\\\virtualenvs\\\\straw-k47ybk7v-py3.10\\\\Scripts\\\\strawberry.exe\\\\\\_\\_main\\_\\_.py'**\r\n\r\n## System Information\r\n\r\n - Operating system: Windows 10\r\n - Strawberry version (if applicable): 0.154.1\r\n - Python: 3.10.9\r\n\r\n## Additional Context\r\n\r\nThere is no issue with strawberry cli in version 0.152.0 which I am using currently. If we downgrade the package to this version, cli commands work just fine.\r\n\n", "before_files": [{"content": "import importlib\nimport inspect\nimport sys\nimport warnings\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import ForwardRef, Generic, Optional, Type, TypeVar, cast\n\nTypeName = TypeVar(\"TypeName\")\nModule = TypeVar(\"Module\")\n\n\n@dataclass(frozen=True)\nclass LazyType(Generic[TypeName, Module]):\n type_name: str\n module: str\n package: Optional[str] = None\n\n def __class_getitem__(cls, params):\n warnings.warn(\n (\n \"LazyType is deprecated, use \"\n \"Annotated[YourType, strawberry.lazy(path)] instead\"\n ),\n DeprecationWarning,\n stacklevel=2,\n )\n\n type_name, module = params\n\n package = None\n\n if module.startswith(\".\"):\n current_frame = inspect.currentframe()\n assert current_frame is not None\n assert current_frame.f_back is not None\n package = current_frame.f_back.f_globals[\"__package__\"]\n\n return cls(type_name, module, package)\n\n def resolve_type(self) -> Type:\n module = importlib.import_module(self.module, self.package)\n main_module = sys.modules.get(\"__main__\", None)\n if main_module:\n # If lazy type points to the main module, use it instead of the imported\n # module. Otherwise duplication checks during schema-conversion might fail.\n # Refer to: https://github.com/strawberry-graphql/strawberry/issues/2397\n if main_module.__spec__ and main_module.__spec__.name == self.module:\n module = main_module\n elif hasattr(main_module, \"__file__\") and hasattr(module, \"__file__\"):\n if (\n main_module.__file__\n and module.__file__\n and Path(main_module.__file__).samefile(module.__file__)\n ):\n module = main_module\n return module.__dict__[self.type_name]\n\n # this empty call method allows LazyTypes to be used in generic types\n # for example: List[LazyType[\"A\", \"module\"]]\n\n def __call__(self): # pragma: no cover\n return None\n\n\nclass StrawberryLazyReference:\n def __init__(self, module: str) -> None:\n self.module = module\n self.package = None\n\n if module.startswith(\".\"):\n frame = inspect.stack()[2][0]\n # TODO: raise a nice error if frame is None\n assert frame is not None\n self.package = cast(str, frame.f_globals[\"__package__\"])\n\n def resolve_forward_ref(self, forward_ref: ForwardRef) -> LazyType:\n return LazyType(forward_ref.__forward_arg__, self.module, self.package)\n\n\ndef lazy(module_path: str) -> StrawberryLazyReference:\n return StrawberryLazyReference(module_path)\n", "path": "strawberry/lazy_type.py"}]} | 1,884 | 272 |
gh_patches_debug_31697 | rasdani/github-patches | git_diff | conda-forge__conda-smithy-240 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't use Exception.message if you want to work on py3k
Found in https://github.com/conda-forge/staged-recipes/issues/720. Currently exists in 2 places at https://github.com/conda-forge/conda-smithy/blob/6c1856a51832b5a014bb9126467804d75d712029/conda_smithy/cli.py.
</issue>
<code>
[start of conda_smithy/cli.py]
1 from __future__ import print_function, absolute_import
2
3 import os
4 import requests
5 import subprocess
6 import sys
7 import time
8 import argparse
9
10 from conda_build.metadata import MetaData
11
12 from . import ci_register
13 from . import configure_feedstock
14 from . import lint_recipe
15 from . import __version__
16
17
18 PY2 = sys.version_info[0] == 2
19
20 def generate_feedstock_content(target_directory, source_recipe_dir, meta):
21 recipe_dir = "recipe"
22 target_recipe_dir = os.path.join(target_directory, recipe_dir)
23 if not os.path.exists(target_recipe_dir):
24 os.makedirs(target_recipe_dir)
25 # If there is a source recipe, copy it now to the right dir
26 if source_recipe_dir:
27 configure_feedstock.copytree(source_recipe_dir, target_recipe_dir)
28
29 forge_yml = os.path.join(target_directory, 'conda-forge.yml')
30 if not os.path.exists(forge_yml):
31 with open(forge_yml, 'w') as fh:
32 fh.write('[]')
33
34 configure_feedstock.main(target_directory)
35
36
37 def init_git_repo(target):
38 subprocess.check_call(['git', 'init'], cwd=target)
39
40
41 def create_git_repo(target, msg):
42 init_git_repo(target)
43 subprocess.check_call(['git', 'add', '*'], cwd=target)
44 if sys.platform == "win32":
45 # prevent this:
46 # bash: line 1: ./ci_support/run_docker_build.sh: Permission denied
47 # ./ci_support/run_docker_build.sh returned exit code 126
48 subprocess.check_call(['git', 'update-index', '--chmod=+x', 'ci_support/run_docker_build.sh'], cwd=target)
49 subprocess.check_call(['git', 'commit', '-m', msg], cwd=target)
50
51
52 class Subcommand(object):
53 #: The name of the subcommand
54 subcommand = None
55 aliases = []
56 def __init__(self, parser, help=None):
57 if PY2:
58 # aliases not allowed in 2.7 :-(
59 subcommand_parser = parser.add_parser(self.subcommand, help=help)
60 else:
61 subcommand_parser = parser.add_parser(self.subcommand, help=help, aliases=self.aliases)
62
63 subcommand_parser.set_defaults(subcommand_func=self)
64 self.subcommand_parser = subcommand_parser
65
66 def __call__(self, args):
67 pass
68
69
70 class Init(Subcommand):
71 subcommand = 'init'
72 def __init__(self, parser):
73 # conda-smithy init /path/to/udunits-recipe ./
74
75 super(Init, self).__init__(parser, "Create a feedstock git repository, which can contain "
76 "one conda recipes.")
77 scp = self.subcommand_parser
78 scp.add_argument("recipe_directory", help="The path to the source recipe directory.")
79 scp.add_argument("--feedstock-directory", default='./{package.name}-feedstock',
80 help="Target directory, where the new feedstock git repository should be "
81 "created. (Default: './<packagename>-feedstock')")
82 scp.add_argument("--no-git-repo", action='store_true',
83 default=False,
84 help="Do not init the feedstock as a git repository.")
85
86 def __call__(self, args):
87 # check some error conditions
88 if args.recipe_directory and not os.path.isdir(args.recipe_directory):
89 raise IOError("The source recipe directory should be the directory of the "
90 "conda-recipe you want to build a feedstock for. Got {}".format(
91 args.recipe_directory))
92
93 # Get some information about the source recipe.
94 if args.recipe_directory:
95 meta = MetaData(args.recipe_directory)
96 else:
97 meta = None
98
99 feedstock_directory = args.feedstock_directory.format(package=argparse.Namespace(name=meta.name()))
100 msg = 'Initial commit of the {} feedstock.'.format(meta.name())
101
102 try:
103 generate_feedstock_content(feedstock_directory, args.recipe_directory, meta)
104 if not args.no_git_repo:
105 create_git_repo(feedstock_directory, msg)
106
107 print("\nRepository created, please edit conda-forge.yml to configure the upload channels\n"
108 "and afterwards call 'conda smithy register-github'")
109 except RuntimeError as e:
110 print(e.message)
111
112
113 class RegisterGithub(Subcommand):
114 subcommand = 'register-github'
115 def __init__(self, parser):
116 # conda-smithy register-github ./ --organization=conda-forge
117 super(RegisterGithub, self).__init__(parser, "Register a repo for a feedstock at github.")
118 scp = self.subcommand_parser
119 scp.add_argument("feedstock_directory",
120 help="The directory of the feedstock git repository.")
121 group = scp.add_mutually_exclusive_group()
122 group.add_argument("--user", help="github username under which to register this repo")
123 group.add_argument("--organization", default="conda-forge",
124 help="github organisation under which to register this repo")
125 scp.add_argument("--remote-name", default="upstream",
126 help="The name of the remote to add to the local repo (default: upstream). "
127 "An empty string will disable adding of a remote.")
128
129 def __call__(self, args):
130 from . import github
131 try:
132 github.create_github_repo(args)
133 print("\nRepository registered at github, now call 'conda smithy register-ci'")
134 except RuntimeError as e:
135 print(e.message)
136
137
138 class RegisterCI(Subcommand):
139 subcommand = 'register-ci'
140 def __init__(self, parser):
141 # conda-smithy register-ci ./
142 super(RegisterCI, self).__init__(parser, "Register a feedstock at the CI "
143 "services which do the builds.")
144 scp = self.subcommand_parser
145 scp.add_argument("--feedstock_directory", default=os.getcwd(),
146 help="The directory of the feedstock git repository.")
147 group = scp.add_mutually_exclusive_group()
148 group.add_argument("--user", help="github username under which to register this repo")
149 group.add_argument("--organization", default="conda-forge",
150 help="github organisation under which to register this repo")
151
152 def __call__(self, args):
153 owner = args.user or args.organization
154 repo = os.path.basename(os.path.abspath(args.feedstock_directory))
155
156 print('CI Summary for {}/{} (can take ~30s):'.format(owner, repo))
157 try:
158 ci_register.add_project_to_travis(owner, repo)
159 ci_register.travis_token_update_conda_forge_config(args.feedstock_directory, owner, repo)
160 time.sleep(1)
161 ci_register.travis_configure(owner, repo)
162 ci_register.add_project_to_circle(owner, repo)
163 ci_register.add_token_to_circle(owner, repo)
164 ci_register.add_project_to_appveyor(owner, repo)
165 ci_register.appveyor_encrypt_binstar_token(args.feedstock_directory, owner, repo)
166 ci_register.appveyor_configure(owner, repo)
167 ci_register.add_conda_linting(owner, repo)
168 print("\nCI services have been enabled enabled. You may wish to regnerate the feedstock.\n"
169 "Any changes will need commiting to the repo.")
170 except RuntimeError as e:
171 print(e.message)
172
173 class Regenerate(Subcommand):
174 subcommand = 'regenerate'
175 aliases = ['rerender']
176 def __init__(self, parser):
177 super(Regenerate, self).__init__(parser, "Regenerate / update the CI support files of the "
178 "feedstock.")
179 scp = self.subcommand_parser
180 scp.add_argument("--feedstock_directory", default=os.getcwd(),
181 help="The directory of the feedstock git repository.")
182
183 def __call__(self, args):
184 try:
185 configure_feedstock.main(args.feedstock_directory)
186 print("\nCI support files regenerated. These need to be pushed to github!")
187 except RuntimeError as e:
188 print(e.message)
189
190
191 class RecipeLint(Subcommand):
192 subcommand = 'recipe-lint'
193 def __init__(self, parser):
194 super(RecipeLint, self).__init__(parser, "Lint a single conda recipe.")
195 scp = self.subcommand_parser
196 scp.add_argument("recipe_directory", default=[os.getcwd()], nargs='*')
197
198 def __call__(self, args):
199 all_good = True
200 for recipe in args.recipe_directory:
201 lint = lint_recipe.main(os.path.join(recipe))
202 if lint:
203 all_good = False
204 print('{} has some lint:\n {}'.format(recipe, '\n '.join(lint)))
205 else:
206 print('{} is in fine form'.format(recipe))
207 # Exit code 1 for some lint, 0 for no lint.
208 sys.exit(int(not all_good))
209
210
211
212 def main():
213
214 parser = argparse.ArgumentParser("a tool to help create, administer and manage feedstocks.")
215 subparser = parser.add_subparsers()
216 # TODO: Consider allowing plugins/extensions using entry_points.
217 # http://reinout.vanrees.org/weblog/2010/01/06/zest-releaser-entry-points.html
218 for subcommand in Subcommand.__subclasses__():
219 subcommand(subparser)
220 # And the alias for rerender
221 if PY2:
222 class Rerender(Regenerate):
223 # A poor-man's alias for regenerate.
224 subcommand = 'rerender'
225 Rerender(subparser)
226
227 parser.add_argument('--version', action='version',
228 version=__version__,
229 help="Show conda-smithy's version, and exit.")
230
231 if not sys.argv[1:]:
232 args = parser.parse_args(['--help'])
233 else:
234 args = parser.parse_args()
235
236 args.subcommand_func(args)
237
238
239 if __name__ == '__main__':
240 main()
241
[end of conda_smithy/cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_smithy/cli.py b/conda_smithy/cli.py
--- a/conda_smithy/cli.py
+++ b/conda_smithy/cli.py
@@ -107,7 +107,7 @@
print("\nRepository created, please edit conda-forge.yml to configure the upload channels\n"
"and afterwards call 'conda smithy register-github'")
except RuntimeError as e:
- print(e.message)
+ print(e)
class RegisterGithub(Subcommand):
@@ -132,7 +132,7 @@
github.create_github_repo(args)
print("\nRepository registered at github, now call 'conda smithy register-ci'")
except RuntimeError as e:
- print(e.message)
+ print(e)
class RegisterCI(Subcommand):
@@ -168,7 +168,7 @@
print("\nCI services have been enabled enabled. You may wish to regnerate the feedstock.\n"
"Any changes will need commiting to the repo.")
except RuntimeError as e:
- print(e.message)
+ print(e)
class Regenerate(Subcommand):
subcommand = 'regenerate'
@@ -185,7 +185,7 @@
configure_feedstock.main(args.feedstock_directory)
print("\nCI support files regenerated. These need to be pushed to github!")
except RuntimeError as e:
- print(e.message)
+ print(e)
class RecipeLint(Subcommand):
| {"golden_diff": "diff --git a/conda_smithy/cli.py b/conda_smithy/cli.py\n--- a/conda_smithy/cli.py\n+++ b/conda_smithy/cli.py\n@@ -107,7 +107,7 @@\n print(\"\\nRepository created, please edit conda-forge.yml to configure the upload channels\\n\"\n \"and afterwards call 'conda smithy register-github'\")\n except RuntimeError as e:\n- print(e.message)\n+ print(e)\n \n \n class RegisterGithub(Subcommand):\n@@ -132,7 +132,7 @@\n github.create_github_repo(args)\n print(\"\\nRepository registered at github, now call 'conda smithy register-ci'\")\n except RuntimeError as e:\n- print(e.message)\n+ print(e)\n \n \n class RegisterCI(Subcommand):\n@@ -168,7 +168,7 @@\n print(\"\\nCI services have been enabled enabled. You may wish to regnerate the feedstock.\\n\"\n \"Any changes will need commiting to the repo.\")\n except RuntimeError as e:\n- print(e.message)\n+ print(e)\n \n class Regenerate(Subcommand):\n subcommand = 'regenerate'\n@@ -185,7 +185,7 @@\n configure_feedstock.main(args.feedstock_directory)\n print(\"\\nCI support files regenerated. These need to be pushed to github!\")\n except RuntimeError as e:\n- print(e.message)\n+ print(e)\n \n \n class RecipeLint(Subcommand):\n", "issue": "Don't use Exception.message if you want to work on py3k\nFound in https://github.com/conda-forge/staged-recipes/issues/720. Currently exists in 2 places at https://github.com/conda-forge/conda-smithy/blob/6c1856a51832b5a014bb9126467804d75d712029/conda_smithy/cli.py.\n\n", "before_files": [{"content": "from __future__ import print_function, absolute_import\n\nimport os\nimport requests\nimport subprocess\nimport sys\nimport time\nimport argparse\n\nfrom conda_build.metadata import MetaData\n\nfrom . import ci_register\nfrom . import configure_feedstock\nfrom . import lint_recipe\nfrom . import __version__\n\n\nPY2 = sys.version_info[0] == 2\n\ndef generate_feedstock_content(target_directory, source_recipe_dir, meta):\n recipe_dir = \"recipe\"\n target_recipe_dir = os.path.join(target_directory, recipe_dir)\n if not os.path.exists(target_recipe_dir):\n os.makedirs(target_recipe_dir)\n # If there is a source recipe, copy it now to the right dir\n if source_recipe_dir:\n configure_feedstock.copytree(source_recipe_dir, target_recipe_dir)\n\n forge_yml = os.path.join(target_directory, 'conda-forge.yml')\n if not os.path.exists(forge_yml):\n with open(forge_yml, 'w') as fh:\n fh.write('[]')\n\n configure_feedstock.main(target_directory)\n\n\ndef init_git_repo(target):\n subprocess.check_call(['git', 'init'], cwd=target)\n\n\ndef create_git_repo(target, msg):\n init_git_repo(target)\n subprocess.check_call(['git', 'add', '*'], cwd=target)\n if sys.platform == \"win32\":\n # prevent this:\n # bash: line 1: ./ci_support/run_docker_build.sh: Permission denied\n # ./ci_support/run_docker_build.sh returned exit code 126\n subprocess.check_call(['git', 'update-index', '--chmod=+x', 'ci_support/run_docker_build.sh'], cwd=target)\n subprocess.check_call(['git', 'commit', '-m', msg], cwd=target)\n\n\nclass Subcommand(object):\n #: The name of the subcommand\n subcommand = None\n aliases = []\n def __init__(self, parser, help=None):\n if PY2:\n # aliases not allowed in 2.7 :-(\n subcommand_parser = parser.add_parser(self.subcommand, help=help)\n else:\n subcommand_parser = parser.add_parser(self.subcommand, help=help, aliases=self.aliases)\n\n subcommand_parser.set_defaults(subcommand_func=self)\n self.subcommand_parser = subcommand_parser\n\n def __call__(self, args):\n pass\n\n\nclass Init(Subcommand):\n subcommand = 'init'\n def __init__(self, parser):\n # conda-smithy init /path/to/udunits-recipe ./\n\n super(Init, self).__init__(parser, \"Create a feedstock git repository, which can contain \"\n \"one conda recipes.\")\n scp = self.subcommand_parser\n scp.add_argument(\"recipe_directory\", help=\"The path to the source recipe directory.\")\n scp.add_argument(\"--feedstock-directory\", default='./{package.name}-feedstock',\n help=\"Target directory, where the new feedstock git repository should be \"\n \"created. (Default: './<packagename>-feedstock')\")\n scp.add_argument(\"--no-git-repo\", action='store_true',\n default=False,\n help=\"Do not init the feedstock as a git repository.\")\n\n def __call__(self, args):\n # check some error conditions\n if args.recipe_directory and not os.path.isdir(args.recipe_directory):\n raise IOError(\"The source recipe directory should be the directory of the \"\n \"conda-recipe you want to build a feedstock for. Got {}\".format(\n args.recipe_directory))\n\n # Get some information about the source recipe.\n if args.recipe_directory:\n meta = MetaData(args.recipe_directory)\n else:\n meta = None\n\n feedstock_directory = args.feedstock_directory.format(package=argparse.Namespace(name=meta.name()))\n msg = 'Initial commit of the {} feedstock.'.format(meta.name())\n\n try:\n generate_feedstock_content(feedstock_directory, args.recipe_directory, meta)\n if not args.no_git_repo:\n create_git_repo(feedstock_directory, msg)\n\n print(\"\\nRepository created, please edit conda-forge.yml to configure the upload channels\\n\"\n \"and afterwards call 'conda smithy register-github'\")\n except RuntimeError as e:\n print(e.message)\n\n\nclass RegisterGithub(Subcommand):\n subcommand = 'register-github'\n def __init__(self, parser):\n # conda-smithy register-github ./ --organization=conda-forge\n super(RegisterGithub, self).__init__(parser, \"Register a repo for a feedstock at github.\")\n scp = self.subcommand_parser\n scp.add_argument(\"feedstock_directory\",\n help=\"The directory of the feedstock git repository.\")\n group = scp.add_mutually_exclusive_group()\n group.add_argument(\"--user\", help=\"github username under which to register this repo\")\n group.add_argument(\"--organization\", default=\"conda-forge\",\n help=\"github organisation under which to register this repo\")\n scp.add_argument(\"--remote-name\", default=\"upstream\",\n help=\"The name of the remote to add to the local repo (default: upstream). \"\n \"An empty string will disable adding of a remote.\")\n\n def __call__(self, args):\n from . import github\n try:\n github.create_github_repo(args)\n print(\"\\nRepository registered at github, now call 'conda smithy register-ci'\")\n except RuntimeError as e:\n print(e.message)\n\n\nclass RegisterCI(Subcommand):\n subcommand = 'register-ci'\n def __init__(self, parser):\n # conda-smithy register-ci ./\n super(RegisterCI, self).__init__(parser, \"Register a feedstock at the CI \"\n \"services which do the builds.\")\n scp = self.subcommand_parser\n scp.add_argument(\"--feedstock_directory\", default=os.getcwd(),\n help=\"The directory of the feedstock git repository.\")\n group = scp.add_mutually_exclusive_group()\n group.add_argument(\"--user\", help=\"github username under which to register this repo\")\n group.add_argument(\"--organization\", default=\"conda-forge\",\n help=\"github organisation under which to register this repo\")\n\n def __call__(self, args):\n owner = args.user or args.organization\n repo = os.path.basename(os.path.abspath(args.feedstock_directory))\n\n print('CI Summary for {}/{} (can take ~30s):'.format(owner, repo))\n try:\n ci_register.add_project_to_travis(owner, repo)\n ci_register.travis_token_update_conda_forge_config(args.feedstock_directory, owner, repo)\n time.sleep(1)\n ci_register.travis_configure(owner, repo)\n ci_register.add_project_to_circle(owner, repo)\n ci_register.add_token_to_circle(owner, repo)\n ci_register.add_project_to_appveyor(owner, repo)\n ci_register.appveyor_encrypt_binstar_token(args.feedstock_directory, owner, repo)\n ci_register.appveyor_configure(owner, repo)\n ci_register.add_conda_linting(owner, repo)\n print(\"\\nCI services have been enabled enabled. You may wish to regnerate the feedstock.\\n\"\n \"Any changes will need commiting to the repo.\")\n except RuntimeError as e:\n print(e.message)\n\nclass Regenerate(Subcommand):\n subcommand = 'regenerate'\n aliases = ['rerender']\n def __init__(self, parser):\n super(Regenerate, self).__init__(parser, \"Regenerate / update the CI support files of the \"\n \"feedstock.\")\n scp = self.subcommand_parser\n scp.add_argument(\"--feedstock_directory\", default=os.getcwd(),\n help=\"The directory of the feedstock git repository.\")\n\n def __call__(self, args):\n try:\n configure_feedstock.main(args.feedstock_directory)\n print(\"\\nCI support files regenerated. These need to be pushed to github!\")\n except RuntimeError as e:\n print(e.message)\n\n\nclass RecipeLint(Subcommand):\n subcommand = 'recipe-lint'\n def __init__(self, parser):\n super(RecipeLint, self).__init__(parser, \"Lint a single conda recipe.\")\n scp = self.subcommand_parser\n scp.add_argument(\"recipe_directory\", default=[os.getcwd()], nargs='*')\n\n def __call__(self, args):\n all_good = True\n for recipe in args.recipe_directory:\n lint = lint_recipe.main(os.path.join(recipe))\n if lint:\n all_good = False\n print('{} has some lint:\\n {}'.format(recipe, '\\n '.join(lint)))\n else:\n print('{} is in fine form'.format(recipe))\n # Exit code 1 for some lint, 0 for no lint.\n sys.exit(int(not all_good))\n\n\n\ndef main():\n\n parser = argparse.ArgumentParser(\"a tool to help create, administer and manage feedstocks.\")\n subparser = parser.add_subparsers()\n # TODO: Consider allowing plugins/extensions using entry_points.\n # http://reinout.vanrees.org/weblog/2010/01/06/zest-releaser-entry-points.html\n for subcommand in Subcommand.__subclasses__():\n subcommand(subparser)\n # And the alias for rerender\n if PY2:\n class Rerender(Regenerate):\n # A poor-man's alias for regenerate.\n subcommand = 'rerender'\n Rerender(subparser)\n\n parser.add_argument('--version', action='version',\n version=__version__,\n help=\"Show conda-smithy's version, and exit.\")\n\n if not sys.argv[1:]:\n args = parser.parse_args(['--help'])\n else:\n args = parser.parse_args()\n\n args.subcommand_func(args)\n\n\nif __name__ == '__main__':\n main()\n", "path": "conda_smithy/cli.py"}]} | 3,353 | 323 |
gh_patches_debug_29315 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-2572 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a warning when using `@strawberry.federation.type` but not using `strawberry.federation.Schema`
This should make developer life easier if they forget to use the federation schema :)
See: https://discord.com/channels/689806334337482765/773519351423827978/950480727630303232
</issue>
<code>
[start of strawberry/schema/schema.py]
1 from __future__ import annotations
2
3 from functools import lru_cache
4 from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Type, Union, cast
5
6 from graphql import (
7 GraphQLNamedType,
8 GraphQLNonNull,
9 GraphQLSchema,
10 get_introspection_query,
11 parse,
12 validate_schema,
13 )
14 from graphql.subscription import subscribe
15 from graphql.type.directives import specified_directives
16
17 from strawberry.annotation import StrawberryAnnotation
18 from strawberry.extensions.directives import (
19 DirectivesExtension,
20 DirectivesExtensionSync,
21 )
22 from strawberry.schema.schema_converter import GraphQLCoreConverter
23 from strawberry.schema.types.scalar import DEFAULT_SCALAR_REGISTRY
24 from strawberry.types import ExecutionContext
25 from strawberry.types.graphql import OperationType
26 from strawberry.types.types import TypeDefinition
27
28 from ..printer import print_schema
29 from . import compat
30 from .base import BaseSchema
31 from .config import StrawberryConfig
32 from .execute import execute, execute_sync
33
34 if TYPE_CHECKING:
35 from graphql import ExecutionContext as GraphQLExecutionContext
36
37 from strawberry.custom_scalar import ScalarDefinition, ScalarWrapper
38 from strawberry.directive import StrawberryDirective
39 from strawberry.enum import EnumDefinition
40 from strawberry.extensions import SchemaExtension
41 from strawberry.field import StrawberryField
42 from strawberry.types import ExecutionResult
43 from strawberry.union import StrawberryUnion
44
45 DEFAULT_ALLOWED_OPERATION_TYPES = {
46 OperationType.QUERY,
47 OperationType.MUTATION,
48 OperationType.SUBSCRIPTION,
49 }
50
51
52 class Schema(BaseSchema):
53 def __init__(
54 self,
55 # TODO: can we make sure we only allow to pass
56 # something that has been decorated?
57 query: Type,
58 mutation: Optional[Type] = None,
59 subscription: Optional[Type] = None,
60 directives: Iterable[StrawberryDirective] = (),
61 types=(),
62 extensions: Iterable[Union[Type[SchemaExtension], SchemaExtension]] = (),
63 execution_context_class: Optional[Type[GraphQLExecutionContext]] = None,
64 config: Optional[StrawberryConfig] = None,
65 scalar_overrides: Optional[
66 Dict[object, Union[Type, ScalarWrapper, ScalarDefinition]]
67 ] = None,
68 schema_directives: Iterable[object] = (),
69 ):
70 self.query = query
71 self.mutation = mutation
72 self.subscription = subscription
73
74 self.extensions = extensions
75 self.execution_context_class = execution_context_class
76 self.config = config or StrawberryConfig()
77
78 SCALAR_OVERRIDES_DICT_TYPE = Dict[
79 object, Union["ScalarWrapper", "ScalarDefinition"]
80 ]
81
82 scalar_registry: SCALAR_OVERRIDES_DICT_TYPE = {**DEFAULT_SCALAR_REGISTRY}
83 if scalar_overrides:
84 # TODO: check that the overrides are valid
85 scalar_registry.update(cast(SCALAR_OVERRIDES_DICT_TYPE, scalar_overrides))
86
87 self.schema_converter = GraphQLCoreConverter(self.config, scalar_registry)
88 self.directives = directives
89 self.schema_directives = list(schema_directives)
90
91 query_type = self.schema_converter.from_object(query._type_definition)
92 mutation_type = (
93 self.schema_converter.from_object(mutation._type_definition)
94 if mutation
95 else None
96 )
97 subscription_type = (
98 self.schema_converter.from_object(subscription._type_definition)
99 if subscription
100 else None
101 )
102
103 graphql_directives = [
104 self.schema_converter.from_directive(directive) for directive in directives
105 ]
106
107 graphql_types = []
108 for type_ in types:
109 if compat.is_schema_directive(type_):
110 graphql_directives.append(
111 self.schema_converter.from_schema_directive(type_)
112 )
113 else:
114 if hasattr(type_, "_type_definition"):
115 if type_._type_definition.is_generic:
116 type_ = StrawberryAnnotation(type_).resolve()
117 graphql_type = self.schema_converter.from_maybe_optional(type_)
118 if isinstance(graphql_type, GraphQLNonNull):
119 graphql_type = graphql_type.of_type
120 if not isinstance(graphql_type, GraphQLNamedType):
121 raise TypeError(f"{graphql_type} is not a named GraphQL Type")
122 graphql_types.append(graphql_type)
123
124 try:
125 self._schema = GraphQLSchema(
126 query=query_type,
127 mutation=mutation_type,
128 subscription=subscription_type if subscription else None,
129 directives=specified_directives + tuple(graphql_directives),
130 types=graphql_types,
131 extensions={
132 GraphQLCoreConverter.DEFINITION_BACKREF: self,
133 },
134 )
135
136 except TypeError as error:
137 # GraphQL core throws a TypeError if there's any exception raised
138 # during the schema creation, so we check if the cause was a
139 # StrawberryError and raise it instead if that's the case.
140
141 from strawberry.exceptions import StrawberryException
142
143 if isinstance(error.__cause__, StrawberryException):
144 raise error.__cause__ from None
145
146 raise
147
148 # attach our schema to the GraphQL schema instance
149 self._schema._strawberry_schema = self # type: ignore
150
151 # Validate schema early because we want developers to know about
152 # possible issues as soon as possible
153 errors = validate_schema(self._schema)
154 if errors:
155 formatted_errors = "\n\n".join(f"❌ {error.message}" for error in errors)
156 raise ValueError(f"Invalid Schema. Errors:\n\n{formatted_errors}")
157
158 def get_extensions(
159 self, sync: bool = False
160 ) -> List[Union[Type[SchemaExtension], SchemaExtension]]:
161 extensions = list(self.extensions)
162
163 if self.directives:
164 extensions.append(DirectivesExtensionSync if sync else DirectivesExtension)
165
166 return extensions
167
168 @lru_cache()
169 def get_type_by_name(
170 self, name: str
171 ) -> Optional[
172 Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]
173 ]:
174 # TODO: respect auto_camel_case
175 if name in self.schema_converter.type_map:
176 return self.schema_converter.type_map[name].definition
177
178 return None
179
180 def get_field_for_type(
181 self, field_name: str, type_name: str
182 ) -> Optional[StrawberryField]:
183 type_ = self.get_type_by_name(type_name)
184
185 if not type_:
186 return None # pragma: no cover
187
188 assert isinstance(type_, TypeDefinition)
189
190 return next(
191 (
192 field
193 for field in type_.fields
194 if self.config.name_converter.get_graphql_name(field) == field_name
195 ),
196 None,
197 )
198
199 @lru_cache()
200 def get_directive_by_name(self, graphql_name: str) -> Optional[StrawberryDirective]:
201 return next(
202 (
203 directive
204 for directive in self.directives
205 if self.config.name_converter.from_directive(directive) == graphql_name
206 ),
207 None,
208 )
209
210 async def execute(
211 self,
212 query: Optional[str],
213 variable_values: Optional[Dict[str, Any]] = None,
214 context_value: Optional[Any] = None,
215 root_value: Optional[Any] = None,
216 operation_name: Optional[str] = None,
217 allowed_operation_types: Optional[Iterable[OperationType]] = None,
218 ) -> ExecutionResult:
219 if allowed_operation_types is None:
220 allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES
221
222 # Create execution context
223 execution_context = ExecutionContext(
224 query=query,
225 schema=self,
226 context=context_value,
227 root_value=root_value,
228 variables=variable_values,
229 provided_operation_name=operation_name,
230 )
231
232 result = await execute(
233 self._schema,
234 extensions=self.get_extensions(),
235 execution_context_class=self.execution_context_class,
236 execution_context=execution_context,
237 allowed_operation_types=allowed_operation_types,
238 process_errors=self.process_errors,
239 )
240
241 return result
242
243 def execute_sync(
244 self,
245 query: Optional[str],
246 variable_values: Optional[Dict[str, Any]] = None,
247 context_value: Optional[Any] = None,
248 root_value: Optional[Any] = None,
249 operation_name: Optional[str] = None,
250 allowed_operation_types: Optional[Iterable[OperationType]] = None,
251 ) -> ExecutionResult:
252 if allowed_operation_types is None:
253 allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES
254
255 execution_context = ExecutionContext(
256 query=query,
257 schema=self,
258 context=context_value,
259 root_value=root_value,
260 variables=variable_values,
261 provided_operation_name=operation_name,
262 )
263
264 result = execute_sync(
265 self._schema,
266 extensions=self.get_extensions(sync=True),
267 execution_context_class=self.execution_context_class,
268 execution_context=execution_context,
269 allowed_operation_types=allowed_operation_types,
270 process_errors=self.process_errors,
271 )
272
273 return result
274
275 async def subscribe(
276 self,
277 # TODO: make this optional when we support extensions
278 query: str,
279 variable_values: Optional[Dict[str, Any]] = None,
280 context_value: Optional[Any] = None,
281 root_value: Optional[Any] = None,
282 operation_name: Optional[str] = None,
283 ):
284 return await subscribe(
285 self._schema,
286 parse(query),
287 root_value=root_value,
288 context_value=context_value,
289 variable_values=variable_values,
290 operation_name=operation_name,
291 )
292
293 def as_str(self) -> str:
294 return print_schema(self)
295
296 __str__ = as_str
297
298 def introspect(self) -> Dict[str, Any]:
299 """Return the introspection query result for the current schema
300
301 Raises:
302 ValueError: If the introspection query fails due to an invalid schema
303 """
304 introspection = self.execute_sync(get_introspection_query())
305 if introspection.errors or not introspection.data:
306 raise ValueError(f"Invalid Schema. Errors {introspection.errors!r}")
307
308 return introspection.data
309
[end of strawberry/schema/schema.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/schema/schema.py b/strawberry/schema/schema.py
--- a/strawberry/schema/schema.py
+++ b/strawberry/schema/schema.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import warnings
from functools import lru_cache
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Type, Union, cast
@@ -148,6 +149,8 @@
# attach our schema to the GraphQL schema instance
self._schema._strawberry_schema = self # type: ignore
+ self._warn_for_federation_directives()
+
# Validate schema early because we want developers to know about
# possible issues as soon as possible
errors = validate_schema(self._schema)
@@ -290,6 +293,25 @@
operation_name=operation_name,
)
+ def _warn_for_federation_directives(self):
+ """Raises a warning if the schema has any federation directives."""
+ from strawberry.federation.schema_directives import FederationDirective
+
+ if any(
+ type_
+ for type_ in self.schema_converter.type_map.values()
+ if any(
+ directive
+ for directive in (type_.definition.directives or [])
+ if isinstance(directive, FederationDirective)
+ )
+ ):
+ warnings.warn(
+ "Federation directive found in schema. "
+ "Should use strawberry.federation.Schema instead.",
+ UserWarning,
+ )
+
def as_str(self) -> str:
return print_schema(self)
| {"golden_diff": "diff --git a/strawberry/schema/schema.py b/strawberry/schema/schema.py\n--- a/strawberry/schema/schema.py\n+++ b/strawberry/schema/schema.py\n@@ -1,5 +1,6 @@\n from __future__ import annotations\n \n+import warnings\n from functools import lru_cache\n from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Type, Union, cast\n \n@@ -148,6 +149,8 @@\n # attach our schema to the GraphQL schema instance\n self._schema._strawberry_schema = self # type: ignore\n \n+ self._warn_for_federation_directives()\n+\n # Validate schema early because we want developers to know about\n # possible issues as soon as possible\n errors = validate_schema(self._schema)\n@@ -290,6 +293,25 @@\n operation_name=operation_name,\n )\n \n+ def _warn_for_federation_directives(self):\n+ \"\"\"Raises a warning if the schema has any federation directives.\"\"\"\n+ from strawberry.federation.schema_directives import FederationDirective\n+\n+ if any(\n+ type_\n+ for type_ in self.schema_converter.type_map.values()\n+ if any(\n+ directive\n+ for directive in (type_.definition.directives or [])\n+ if isinstance(directive, FederationDirective)\n+ )\n+ ):\n+ warnings.warn(\n+ \"Federation directive found in schema. \"\n+ \"Should use strawberry.federation.Schema instead.\",\n+ UserWarning,\n+ )\n+\n def as_str(self) -> str:\n return print_schema(self)\n", "issue": "Add a warning when using `@strawberry.federation.type` but not using `strawberry.federation.Schema`\nThis should make developer life easier if they forget to use the federation schema :)\r\n\r\nSee: https://discord.com/channels/689806334337482765/773519351423827978/950480727630303232\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom functools import lru_cache\nfrom typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Type, Union, cast\n\nfrom graphql import (\n GraphQLNamedType,\n GraphQLNonNull,\n GraphQLSchema,\n get_introspection_query,\n parse,\n validate_schema,\n)\nfrom graphql.subscription import subscribe\nfrom graphql.type.directives import specified_directives\n\nfrom strawberry.annotation import StrawberryAnnotation\nfrom strawberry.extensions.directives import (\n DirectivesExtension,\n DirectivesExtensionSync,\n)\nfrom strawberry.schema.schema_converter import GraphQLCoreConverter\nfrom strawberry.schema.types.scalar import DEFAULT_SCALAR_REGISTRY\nfrom strawberry.types import ExecutionContext\nfrom strawberry.types.graphql import OperationType\nfrom strawberry.types.types import TypeDefinition\n\nfrom ..printer import print_schema\nfrom . import compat\nfrom .base import BaseSchema\nfrom .config import StrawberryConfig\nfrom .execute import execute, execute_sync\n\nif TYPE_CHECKING:\n from graphql import ExecutionContext as GraphQLExecutionContext\n\n from strawberry.custom_scalar import ScalarDefinition, ScalarWrapper\n from strawberry.directive import StrawberryDirective\n from strawberry.enum import EnumDefinition\n from strawberry.extensions import SchemaExtension\n from strawberry.field import StrawberryField\n from strawberry.types import ExecutionResult\n from strawberry.union import StrawberryUnion\n\nDEFAULT_ALLOWED_OPERATION_TYPES = {\n OperationType.QUERY,\n OperationType.MUTATION,\n OperationType.SUBSCRIPTION,\n}\n\n\nclass Schema(BaseSchema):\n def __init__(\n self,\n # TODO: can we make sure we only allow to pass\n # something that has been decorated?\n query: Type,\n mutation: Optional[Type] = None,\n subscription: Optional[Type] = None,\n directives: Iterable[StrawberryDirective] = (),\n types=(),\n extensions: Iterable[Union[Type[SchemaExtension], SchemaExtension]] = (),\n execution_context_class: Optional[Type[GraphQLExecutionContext]] = None,\n config: Optional[StrawberryConfig] = None,\n scalar_overrides: Optional[\n Dict[object, Union[Type, ScalarWrapper, ScalarDefinition]]\n ] = None,\n schema_directives: Iterable[object] = (),\n ):\n self.query = query\n self.mutation = mutation\n self.subscription = subscription\n\n self.extensions = extensions\n self.execution_context_class = execution_context_class\n self.config = config or StrawberryConfig()\n\n SCALAR_OVERRIDES_DICT_TYPE = Dict[\n object, Union[\"ScalarWrapper\", \"ScalarDefinition\"]\n ]\n\n scalar_registry: SCALAR_OVERRIDES_DICT_TYPE = {**DEFAULT_SCALAR_REGISTRY}\n if scalar_overrides:\n # TODO: check that the overrides are valid\n scalar_registry.update(cast(SCALAR_OVERRIDES_DICT_TYPE, scalar_overrides))\n\n self.schema_converter = GraphQLCoreConverter(self.config, scalar_registry)\n self.directives = directives\n self.schema_directives = list(schema_directives)\n\n query_type = self.schema_converter.from_object(query._type_definition)\n mutation_type = (\n self.schema_converter.from_object(mutation._type_definition)\n if mutation\n else None\n )\n subscription_type = (\n self.schema_converter.from_object(subscription._type_definition)\n if subscription\n else None\n )\n\n graphql_directives = [\n self.schema_converter.from_directive(directive) for directive in directives\n ]\n\n graphql_types = []\n for type_ in types:\n if compat.is_schema_directive(type_):\n graphql_directives.append(\n self.schema_converter.from_schema_directive(type_)\n )\n else:\n if hasattr(type_, \"_type_definition\"):\n if type_._type_definition.is_generic:\n type_ = StrawberryAnnotation(type_).resolve()\n graphql_type = self.schema_converter.from_maybe_optional(type_)\n if isinstance(graphql_type, GraphQLNonNull):\n graphql_type = graphql_type.of_type\n if not isinstance(graphql_type, GraphQLNamedType):\n raise TypeError(f\"{graphql_type} is not a named GraphQL Type\")\n graphql_types.append(graphql_type)\n\n try:\n self._schema = GraphQLSchema(\n query=query_type,\n mutation=mutation_type,\n subscription=subscription_type if subscription else None,\n directives=specified_directives + tuple(graphql_directives),\n types=graphql_types,\n extensions={\n GraphQLCoreConverter.DEFINITION_BACKREF: self,\n },\n )\n\n except TypeError as error:\n # GraphQL core throws a TypeError if there's any exception raised\n # during the schema creation, so we check if the cause was a\n # StrawberryError and raise it instead if that's the case.\n\n from strawberry.exceptions import StrawberryException\n\n if isinstance(error.__cause__, StrawberryException):\n raise error.__cause__ from None\n\n raise\n\n # attach our schema to the GraphQL schema instance\n self._schema._strawberry_schema = self # type: ignore\n\n # Validate schema early because we want developers to know about\n # possible issues as soon as possible\n errors = validate_schema(self._schema)\n if errors:\n formatted_errors = \"\\n\\n\".join(f\"\u274c {error.message}\" for error in errors)\n raise ValueError(f\"Invalid Schema. Errors:\\n\\n{formatted_errors}\")\n\n def get_extensions(\n self, sync: bool = False\n ) -> List[Union[Type[SchemaExtension], SchemaExtension]]:\n extensions = list(self.extensions)\n\n if self.directives:\n extensions.append(DirectivesExtensionSync if sync else DirectivesExtension)\n\n return extensions\n\n @lru_cache()\n def get_type_by_name(\n self, name: str\n ) -> Optional[\n Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]\n ]:\n # TODO: respect auto_camel_case\n if name in self.schema_converter.type_map:\n return self.schema_converter.type_map[name].definition\n\n return None\n\n def get_field_for_type(\n self, field_name: str, type_name: str\n ) -> Optional[StrawberryField]:\n type_ = self.get_type_by_name(type_name)\n\n if not type_:\n return None # pragma: no cover\n\n assert isinstance(type_, TypeDefinition)\n\n return next(\n (\n field\n for field in type_.fields\n if self.config.name_converter.get_graphql_name(field) == field_name\n ),\n None,\n )\n\n @lru_cache()\n def get_directive_by_name(self, graphql_name: str) -> Optional[StrawberryDirective]:\n return next(\n (\n directive\n for directive in self.directives\n if self.config.name_converter.from_directive(directive) == graphql_name\n ),\n None,\n )\n\n async def execute(\n self,\n query: Optional[str],\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n allowed_operation_types: Optional[Iterable[OperationType]] = None,\n ) -> ExecutionResult:\n if allowed_operation_types is None:\n allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES\n\n # Create execution context\n execution_context = ExecutionContext(\n query=query,\n schema=self,\n context=context_value,\n root_value=root_value,\n variables=variable_values,\n provided_operation_name=operation_name,\n )\n\n result = await execute(\n self._schema,\n extensions=self.get_extensions(),\n execution_context_class=self.execution_context_class,\n execution_context=execution_context,\n allowed_operation_types=allowed_operation_types,\n process_errors=self.process_errors,\n )\n\n return result\n\n def execute_sync(\n self,\n query: Optional[str],\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n allowed_operation_types: Optional[Iterable[OperationType]] = None,\n ) -> ExecutionResult:\n if allowed_operation_types is None:\n allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES\n\n execution_context = ExecutionContext(\n query=query,\n schema=self,\n context=context_value,\n root_value=root_value,\n variables=variable_values,\n provided_operation_name=operation_name,\n )\n\n result = execute_sync(\n self._schema,\n extensions=self.get_extensions(sync=True),\n execution_context_class=self.execution_context_class,\n execution_context=execution_context,\n allowed_operation_types=allowed_operation_types,\n process_errors=self.process_errors,\n )\n\n return result\n\n async def subscribe(\n self,\n # TODO: make this optional when we support extensions\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n ):\n return await subscribe(\n self._schema,\n parse(query),\n root_value=root_value,\n context_value=context_value,\n variable_values=variable_values,\n operation_name=operation_name,\n )\n\n def as_str(self) -> str:\n return print_schema(self)\n\n __str__ = as_str\n\n def introspect(self) -> Dict[str, Any]:\n \"\"\"Return the introspection query result for the current schema\n\n Raises:\n ValueError: If the introspection query fails due to an invalid schema\n \"\"\"\n introspection = self.execute_sync(get_introspection_query())\n if introspection.errors or not introspection.data:\n raise ValueError(f\"Invalid Schema. Errors {introspection.errors!r}\")\n\n return introspection.data\n", "path": "strawberry/schema/schema.py"}]} | 3,529 | 353 |
gh_patches_debug_9378 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-346 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New user creation results in TypeError
If one wants to create a new user via the network settings an error will occur. The user gets created anyway, but this should be fixed quite fast.

New user creation results in TypeError
If one wants to create a new user via the network settings an error will occur. The user gets created anyway, but this should be fixed quite fast.

</issue>
<code>
[start of src/cms/forms/users/user_profile_form.py]
1 """
2 Form for creating a user object
3 """
4 import logging
5
6 from django import forms
7
8 from ...models import UserProfile
9
10
11 logger = logging.getLogger(__name__)
12
13
14 class UserProfileForm(forms.ModelForm):
15
16 class Meta:
17 model = UserProfile
18 fields = [
19 'regions',
20 'organization'
21 ]
22
23 # pylint: disable=arguments-differ
24 def save(self, *args, **kwargs):
25
26 logger.info(
27 'UserProfileForm saved with args %s and kwargs %s',
28 args,
29 kwargs
30 )
31
32 # pop kwarg to make sure the super class does not get this param
33 user = kwargs.pop('user', None)
34
35 if not self.instance.id:
36 # don't commit saving of ModelForm, because required user field is still missing
37 kwargs['commit'] = False
38
39 # save ModelForm
40 user_profile = super(UserProfileForm, self).save(*args, **kwargs)
41
42 if not self.instance.id:
43 user_profile.user = user
44 user_profile.save()
45 # check if called from UserProfileForm or RegionUserProfileForm
46 if 'regions' in self.cleaned_data:
47 # regions can't be saved if commit=False on the ModelForm, so we have to save them explicitly
48 user_profile.regions = self.cleaned_data['regions']
49 user_profile.save()
50
51 return user_profile
52
[end of src/cms/forms/users/user_profile_form.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cms/forms/users/user_profile_form.py b/src/cms/forms/users/user_profile_form.py
--- a/src/cms/forms/users/user_profile_form.py
+++ b/src/cms/forms/users/user_profile_form.py
@@ -45,7 +45,6 @@
# check if called from UserProfileForm or RegionUserProfileForm
if 'regions' in self.cleaned_data:
# regions can't be saved if commit=False on the ModelForm, so we have to save them explicitly
- user_profile.regions = self.cleaned_data['regions']
- user_profile.save()
+ user_profile.regions.set(self.cleaned_data['regions'])
return user_profile
| {"golden_diff": "diff --git a/src/cms/forms/users/user_profile_form.py b/src/cms/forms/users/user_profile_form.py\n--- a/src/cms/forms/users/user_profile_form.py\n+++ b/src/cms/forms/users/user_profile_form.py\n@@ -45,7 +45,6 @@\n # check if called from UserProfileForm or RegionUserProfileForm\n if 'regions' in self.cleaned_data:\n # regions can't be saved if commit=False on the ModelForm, so we have to save them explicitly\n- user_profile.regions = self.cleaned_data['regions']\n- user_profile.save()\n+ user_profile.regions.set(self.cleaned_data['regions'])\n \n return user_profile\n", "issue": "New user creation results in TypeError\nIf one wants to create a new user via the network settings an error will occur. The user gets created anyway, but this should be fixed quite fast.\r\n\r\n\r\n\nNew user creation results in TypeError\nIf one wants to create a new user via the network settings an error will occur. The user gets created anyway, but this should be fixed quite fast.\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nForm for creating a user object\n\"\"\"\nimport logging\n\nfrom django import forms\n\nfrom ...models import UserProfile\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UserProfileForm(forms.ModelForm):\n\n class Meta:\n model = UserProfile\n fields = [\n 'regions',\n 'organization'\n ]\n\n # pylint: disable=arguments-differ\n def save(self, *args, **kwargs):\n\n logger.info(\n 'UserProfileForm saved with args %s and kwargs %s',\n args,\n kwargs\n )\n\n # pop kwarg to make sure the super class does not get this param\n user = kwargs.pop('user', None)\n\n if not self.instance.id:\n # don't commit saving of ModelForm, because required user field is still missing\n kwargs['commit'] = False\n\n # save ModelForm\n user_profile = super(UserProfileForm, self).save(*args, **kwargs)\n\n if not self.instance.id:\n user_profile.user = user\n user_profile.save()\n # check if called from UserProfileForm or RegionUserProfileForm\n if 'regions' in self.cleaned_data:\n # regions can't be saved if commit=False on the ModelForm, so we have to save them explicitly\n user_profile.regions = self.cleaned_data['regions']\n user_profile.save()\n\n return user_profile\n", "path": "src/cms/forms/users/user_profile_form.py"}]} | 1,140 | 140 |
gh_patches_debug_321 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-5424 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove possibel unused constant
At first sight looks like isn't used anymore after https://github.com/rtfd/readthedocs.org/pull/5383
https://github.com/rtfd/readthedocs.org/blob/78c34c904b347110b2cd545b4b5a80ed526590f7/readthedocs/core/models.py#L13-L13
We should still double check and make sure tests are passing after the removal.
</issue>
<code>
[start of readthedocs/core/models.py]
1 # -*- coding: utf-8 -*-
2
3 """Models for the core app."""
4 import logging
5
6 from annoying.fields import AutoOneToOneField
7 from django.db import models
8 from django.urls import reverse
9 from django.utils.translation import ugettext
10 from django.utils.translation import ugettext_lazy as _
11
12
13 STANDARD_EMAIL = '[email protected]'
14
15 log = logging.getLogger(__name__)
16
17
18 class UserProfile(models.Model):
19
20 """Additional information about a User."""
21
22 user = AutoOneToOneField(
23 'auth.User',
24 verbose_name=_('User'),
25 related_name='profile',
26 )
27 whitelisted = models.BooleanField(_('Whitelisted'), default=False)
28 banned = models.BooleanField(_('Banned'), default=False)
29 homepage = models.CharField(_('Homepage'), max_length=100, blank=True)
30 allow_ads = models.BooleanField(
31 _('See paid advertising'),
32 help_text=_('If unchecked, you will still see community ads.'),
33 default=True,
34 )
35
36 def __str__(self):
37 return (
38 ugettext("%(username)s's profile") %
39 {'username': self.user.username}
40 )
41
42 def get_absolute_url(self):
43 return reverse(
44 'profiles_profile_detail',
45 kwargs={'username': self.user.username},
46 )
47
[end of readthedocs/core/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/core/models.py b/readthedocs/core/models.py
--- a/readthedocs/core/models.py
+++ b/readthedocs/core/models.py
@@ -10,8 +10,6 @@
from django.utils.translation import ugettext_lazy as _
-STANDARD_EMAIL = '[email protected]'
-
log = logging.getLogger(__name__)
| {"golden_diff": "diff --git a/readthedocs/core/models.py b/readthedocs/core/models.py\n--- a/readthedocs/core/models.py\n+++ b/readthedocs/core/models.py\n@@ -10,8 +10,6 @@\n from django.utils.translation import ugettext_lazy as _\n \n \n-STANDARD_EMAIL = '[email protected]'\n-\n log = logging.getLogger(__name__)\n", "issue": "Remove possibel unused constant\nAt first sight looks like isn't used anymore after https://github.com/rtfd/readthedocs.org/pull/5383\r\n\r\nhttps://github.com/rtfd/readthedocs.org/blob/78c34c904b347110b2cd545b4b5a80ed526590f7/readthedocs/core/models.py#L13-L13\r\n\r\nWe should still double check and make sure tests are passing after the removal.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Models for the core app.\"\"\"\nimport logging\n\nfrom annoying.fields import AutoOneToOneField\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext\nfrom django.utils.translation import ugettext_lazy as _\n\n\nSTANDARD_EMAIL = '[email protected]'\n\nlog = logging.getLogger(__name__)\n\n\nclass UserProfile(models.Model):\n\n \"\"\"Additional information about a User.\"\"\"\n\n user = AutoOneToOneField(\n 'auth.User',\n verbose_name=_('User'),\n related_name='profile',\n )\n whitelisted = models.BooleanField(_('Whitelisted'), default=False)\n banned = models.BooleanField(_('Banned'), default=False)\n homepage = models.CharField(_('Homepage'), max_length=100, blank=True)\n allow_ads = models.BooleanField(\n _('See paid advertising'),\n help_text=_('If unchecked, you will still see community ads.'),\n default=True,\n )\n\n def __str__(self):\n return (\n ugettext(\"%(username)s's profile\") %\n {'username': self.user.username}\n )\n\n def get_absolute_url(self):\n return reverse(\n 'profiles_profile_detail',\n kwargs={'username': self.user.username},\n )\n", "path": "readthedocs/core/models.py"}]} | 1,000 | 79 |
gh_patches_debug_1133 | rasdani/github-patches | git_diff | joke2k__faker-512 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Using É, é (e-acute) in emails.
It looks that É, é (e-acute) symbols are not appropriate for valid email. I used https://pypi.python.org/pypi/robotframework-faker/ which uses this library and the following email was returned:
andré[email protected]
But email verification was failed for this email.
Could you remove É, é and other such letters if they are present from valid email generation?
</issue>
<code>
[start of faker/providers/internet/de_DE/__init__.py]
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4 from .. import Provider as InternetProvider
5
6 class Provider(InternetProvider):
7
8 free_email_domains = (
9 'aol.de', 'gmail.com', 'gmx.de', 'googlemail.com', 'hotmail.de',
10 'web.de', 'yahoo.de',
11 )
12 tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de', )
13
14 replacements = (
15 ('ä', 'ae'), ('Ä', 'Ae'),
16 ('ö', 'oe'), ('Ö', 'Oe'),
17 ('ü', 'ue'), ('Ü', 'Ue'),
18 ('ß', 'ss'),
19 )
20
[end of faker/providers/internet/de_DE/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/internet/de_DE/__init__.py b/faker/providers/internet/de_DE/__init__.py
--- a/faker/providers/internet/de_DE/__init__.py
+++ b/faker/providers/internet/de_DE/__init__.py
@@ -15,5 +15,7 @@
('ä', 'ae'), ('Ä', 'Ae'),
('ö', 'oe'), ('Ö', 'Oe'),
('ü', 'ue'), ('Ü', 'Ue'),
+ ('é', 'e'), ('É', 'E'),
+ ('à', 'a'), ('À', 'A'),
('ß', 'ss'),
)
| {"golden_diff": "diff --git a/faker/providers/internet/de_DE/__init__.py b/faker/providers/internet/de_DE/__init__.py\n--- a/faker/providers/internet/de_DE/__init__.py\n+++ b/faker/providers/internet/de_DE/__init__.py\n@@ -15,5 +15,7 @@\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n ('\u00f6', 'oe'), ('\u00d6', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n+ ('\u00e9', 'e'), ('\u00c9', 'E'),\n+ ('\u00e0', 'a'), ('\u00c0', 'A'),\n ('\u00df', 'ss'),\n )\n", "issue": "Using \u00c9, \u00e9 (e-acute) in emails.\nIt looks that \u00c9, \u00e9 (e-acute) symbols are not appropriate for valid email. I used https://pypi.python.org/pypi/robotframework-faker/ which uses this library and the following email was returned: \r\nandr\[email protected]\r\n\r\nBut email verification was failed for this email. \r\nCould you remove \u00c9, \u00e9 and other such letters if they are present from valid email generation?\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\nfrom .. import Provider as InternetProvider\n\nclass Provider(InternetProvider):\n\n free_email_domains = (\n 'aol.de', 'gmail.com', 'gmx.de', 'googlemail.com', 'hotmail.de',\n 'web.de', 'yahoo.de',\n )\n tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de', )\n\n replacements = (\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n ('\u00f6', 'oe'), ('\u00d6', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n ('\u00df', 'ss'),\n )\n", "path": "faker/providers/internet/de_DE/__init__.py"}]} | 831 | 147 |
gh_patches_debug_32914 | rasdani/github-patches | git_diff | getpelican__pelican-2440 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Category/Tag/Author slugs are not settable
URLWrapper objects have a setter for their 'slug' property, but all of the concrete URLWrapper subclasses override the _getter_ for 'slug', which, because of the way Python's property accessors work, makes the setter inaccessible. This breaks the 'category_meta' plugin and probably other things as well.
</issue>
<code>
[start of pelican/urlwrappers.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 import functools
5 import logging
6 import os
7
8 import six
9
10 from pelican.utils import python_2_unicode_compatible, slugify
11
12 logger = logging.getLogger(__name__)
13
14
15 @python_2_unicode_compatible
16 @functools.total_ordering
17 class URLWrapper(object):
18 def __init__(self, name, settings):
19 self.settings = settings
20 self._name = name
21 self._slug = None
22 self._slug_from_name = True
23
24 @property
25 def name(self):
26 return self._name
27
28 @name.setter
29 def name(self, name):
30 self._name = name
31 # if slug wasn't explicitly set, it needs to be regenerated from name
32 # so, changing name should reset slug for slugification
33 if self._slug_from_name:
34 self._slug = None
35
36 @property
37 def slug(self):
38 if self._slug is None:
39 self._slug = slugify(
40 self.name,
41 regex_subs=self.settings.get('SLUG_REGEX_SUBSTITUTIONS', []))
42 return self._slug
43
44 @slug.setter
45 def slug(self, slug):
46 # if slug is expliticly set, changing name won't alter slug
47 self._slug_from_name = False
48 self._slug = slug
49
50 def as_dict(self):
51 d = self.__dict__
52 d['name'] = self.name
53 d['slug'] = self.slug
54 return d
55
56 def __hash__(self):
57 return hash(self.slug)
58
59 def _normalize_key(self, key):
60 subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
61 return six.text_type(slugify(key, regex_subs=subs))
62
63 def __eq__(self, other):
64 if isinstance(other, self.__class__):
65 return self.slug == other.slug
66 if isinstance(other, six.text_type):
67 return self.slug == self._normalize_key(other)
68 return False
69
70 def __ne__(self, other):
71 if isinstance(other, self.__class__):
72 return self.slug != other.slug
73 if isinstance(other, six.text_type):
74 return self.slug != self._normalize_key(other)
75 return True
76
77 def __lt__(self, other):
78 if isinstance(other, self.__class__):
79 return self.slug < other.slug
80 if isinstance(other, six.text_type):
81 return self.slug < self._normalize_key(other)
82 return False
83
84 def __str__(self):
85 return self.name
86
87 def __repr__(self):
88 return '<{} {}>'.format(type(self).__name__, repr(self._name))
89
90 def _from_settings(self, key, get_page_name=False):
91 """Returns URL information as defined in settings.
92
93 When get_page_name=True returns URL without anything after {slug} e.g.
94 if in settings: CATEGORY_URL="cat/{slug}.html" this returns
95 "cat/{slug}" Useful for pagination.
96
97 """
98 setting = "%s_%s" % (self.__class__.__name__.upper(), key)
99 value = self.settings[setting]
100 if not isinstance(value, six.string_types):
101 logger.warning('%s is set to %s', setting, value)
102 return value
103 else:
104 if get_page_name:
105 return os.path.splitext(value)[0].format(**self.as_dict())
106 else:
107 return value.format(**self.as_dict())
108
109 page_name = property(functools.partial(_from_settings, key='URL',
110 get_page_name=True))
111 url = property(functools.partial(_from_settings, key='URL'))
112 save_as = property(functools.partial(_from_settings, key='SAVE_AS'))
113
114
115 class Category(URLWrapper):
116 @property
117 def slug(self):
118 if self._slug is None:
119 if 'CATEGORY_REGEX_SUBSTITUTIONS' in self.settings:
120 subs = self.settings['CATEGORY_REGEX_SUBSTITUTIONS']
121 else:
122 subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
123 self._slug = slugify(self.name, regex_subs=subs)
124 return self._slug
125
126
127 class Tag(URLWrapper):
128 def __init__(self, name, *args, **kwargs):
129 super(Tag, self).__init__(name.strip(), *args, **kwargs)
130
131 @property
132 def slug(self):
133 if self._slug is None:
134 if 'TAG_REGEX_SUBSTITUTIONS' in self.settings:
135 subs = self.settings['TAG_REGEX_SUBSTITUTIONS']
136 else:
137 subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
138 self._slug = slugify(self.name, regex_subs=subs)
139 return self._slug
140
141
142 class Author(URLWrapper):
143 @property
144 def slug(self):
145 if self._slug is None:
146 if 'AUTHOR_REGEX_SUBSTITUTIONS' in self.settings:
147 subs = self.settings['AUTHOR_REGEX_SUBSTITUTIONS']
148 else:
149 subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
150 self._slug = slugify(self.name, regex_subs=subs)
151 return self._slug
152
[end of pelican/urlwrappers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py
--- a/pelican/urlwrappers.py
+++ b/pelican/urlwrappers.py
@@ -36,9 +36,17 @@
@property
def slug(self):
if self._slug is None:
- self._slug = slugify(
- self.name,
- regex_subs=self.settings.get('SLUG_REGEX_SUBSTITUTIONS', []))
+ class_key = '{}_REGEX_SUBSTITUTIONS'.format(
+ self.__class__.__name__.upper())
+ if class_key in self.settings:
+ self._slug = slugify(
+ self.name,
+ regex_subs=self.settings[class_key])
+ else:
+ self._slug = slugify(
+ self.name,
+ regex_subs=self.settings.get(
+ 'SLUG_REGEX_SUBSTITUTIONS', []))
return self._slug
@slug.setter
@@ -113,39 +121,13 @@
class Category(URLWrapper):
- @property
- def slug(self):
- if self._slug is None:
- if 'CATEGORY_REGEX_SUBSTITUTIONS' in self.settings:
- subs = self.settings['CATEGORY_REGEX_SUBSTITUTIONS']
- else:
- subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
- self._slug = slugify(self.name, regex_subs=subs)
- return self._slug
+ pass
class Tag(URLWrapper):
def __init__(self, name, *args, **kwargs):
super(Tag, self).__init__(name.strip(), *args, **kwargs)
- @property
- def slug(self):
- if self._slug is None:
- if 'TAG_REGEX_SUBSTITUTIONS' in self.settings:
- subs = self.settings['TAG_REGEX_SUBSTITUTIONS']
- else:
- subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
- self._slug = slugify(self.name, regex_subs=subs)
- return self._slug
-
class Author(URLWrapper):
- @property
- def slug(self):
- if self._slug is None:
- if 'AUTHOR_REGEX_SUBSTITUTIONS' in self.settings:
- subs = self.settings['AUTHOR_REGEX_SUBSTITUTIONS']
- else:
- subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
- self._slug = slugify(self.name, regex_subs=subs)
- return self._slug
+ pass
| {"golden_diff": "diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py\n--- a/pelican/urlwrappers.py\n+++ b/pelican/urlwrappers.py\n@@ -36,9 +36,17 @@\n @property\n def slug(self):\n if self._slug is None:\n- self._slug = slugify(\n- self.name,\n- regex_subs=self.settings.get('SLUG_REGEX_SUBSTITUTIONS', []))\n+ class_key = '{}_REGEX_SUBSTITUTIONS'.format(\n+ self.__class__.__name__.upper())\n+ if class_key in self.settings:\n+ self._slug = slugify(\n+ self.name,\n+ regex_subs=self.settings[class_key])\n+ else:\n+ self._slug = slugify(\n+ self.name,\n+ regex_subs=self.settings.get(\n+ 'SLUG_REGEX_SUBSTITUTIONS', []))\n return self._slug\n \n @slug.setter\n@@ -113,39 +121,13 @@\n \n \n class Category(URLWrapper):\n- @property\n- def slug(self):\n- if self._slug is None:\n- if 'CATEGORY_REGEX_SUBSTITUTIONS' in self.settings:\n- subs = self.settings['CATEGORY_REGEX_SUBSTITUTIONS']\n- else:\n- subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n- self._slug = slugify(self.name, regex_subs=subs)\n- return self._slug\n+ pass\n \n \n class Tag(URLWrapper):\n def __init__(self, name, *args, **kwargs):\n super(Tag, self).__init__(name.strip(), *args, **kwargs)\n \n- @property\n- def slug(self):\n- if self._slug is None:\n- if 'TAG_REGEX_SUBSTITUTIONS' in self.settings:\n- subs = self.settings['TAG_REGEX_SUBSTITUTIONS']\n- else:\n- subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n- self._slug = slugify(self.name, regex_subs=subs)\n- return self._slug\n-\n \n class Author(URLWrapper):\n- @property\n- def slug(self):\n- if self._slug is None:\n- if 'AUTHOR_REGEX_SUBSTITUTIONS' in self.settings:\n- subs = self.settings['AUTHOR_REGEX_SUBSTITUTIONS']\n- else:\n- subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n- self._slug = slugify(self.name, regex_subs=subs)\n- return self._slug\n+ pass\n", "issue": "Category/Tag/Author slugs are not settable\nURLWrapper objects have a setter for their 'slug' property, but all of the concrete URLWrapper subclasses override the _getter_ for 'slug', which, because of the way Python's property accessors work, makes the setter inaccessible. This breaks the 'category_meta' plugin and probably other things as well.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os\n\nimport six\n\nfrom pelican.utils import python_2_unicode_compatible, slugify\n\nlogger = logging.getLogger(__name__)\n\n\n@python_2_unicode_compatible\[email protected]_ordering\nclass URLWrapper(object):\n def __init__(self, name, settings):\n self.settings = settings\n self._name = name\n self._slug = None\n self._slug_from_name = True\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n self._name = name\n # if slug wasn't explicitly set, it needs to be regenerated from name\n # so, changing name should reset slug for slugification\n if self._slug_from_name:\n self._slug = None\n\n @property\n def slug(self):\n if self._slug is None:\n self._slug = slugify(\n self.name,\n regex_subs=self.settings.get('SLUG_REGEX_SUBSTITUTIONS', []))\n return self._slug\n\n @slug.setter\n def slug(self, slug):\n # if slug is expliticly set, changing name won't alter slug\n self._slug_from_name = False\n self._slug = slug\n\n def as_dict(self):\n d = self.__dict__\n d['name'] = self.name\n d['slug'] = self.slug\n return d\n\n def __hash__(self):\n return hash(self.slug)\n\n def _normalize_key(self, key):\n subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n return six.text_type(slugify(key, regex_subs=subs))\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.slug == other.slug\n if isinstance(other, six.text_type):\n return self.slug == self._normalize_key(other)\n return False\n\n def __ne__(self, other):\n if isinstance(other, self.__class__):\n return self.slug != other.slug\n if isinstance(other, six.text_type):\n return self.slug != self._normalize_key(other)\n return True\n\n def __lt__(self, other):\n if isinstance(other, self.__class__):\n return self.slug < other.slug\n if isinstance(other, six.text_type):\n return self.slug < self._normalize_key(other)\n return False\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return '<{} {}>'.format(type(self).__name__, repr(self._name))\n\n def _from_settings(self, key, get_page_name=False):\n \"\"\"Returns URL information as defined in settings.\n\n When get_page_name=True returns URL without anything after {slug} e.g.\n if in settings: CATEGORY_URL=\"cat/{slug}.html\" this returns\n \"cat/{slug}\" Useful for pagination.\n\n \"\"\"\n setting = \"%s_%s\" % (self.__class__.__name__.upper(), key)\n value = self.settings[setting]\n if not isinstance(value, six.string_types):\n logger.warning('%s is set to %s', setting, value)\n return value\n else:\n if get_page_name:\n return os.path.splitext(value)[0].format(**self.as_dict())\n else:\n return value.format(**self.as_dict())\n\n page_name = property(functools.partial(_from_settings, key='URL',\n get_page_name=True))\n url = property(functools.partial(_from_settings, key='URL'))\n save_as = property(functools.partial(_from_settings, key='SAVE_AS'))\n\n\nclass Category(URLWrapper):\n @property\n def slug(self):\n if self._slug is None:\n if 'CATEGORY_REGEX_SUBSTITUTIONS' in self.settings:\n subs = self.settings['CATEGORY_REGEX_SUBSTITUTIONS']\n else:\n subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n self._slug = slugify(self.name, regex_subs=subs)\n return self._slug\n\n\nclass Tag(URLWrapper):\n def __init__(self, name, *args, **kwargs):\n super(Tag, self).__init__(name.strip(), *args, **kwargs)\n\n @property\n def slug(self):\n if self._slug is None:\n if 'TAG_REGEX_SUBSTITUTIONS' in self.settings:\n subs = self.settings['TAG_REGEX_SUBSTITUTIONS']\n else:\n subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n self._slug = slugify(self.name, regex_subs=subs)\n return self._slug\n\n\nclass Author(URLWrapper):\n @property\n def slug(self):\n if self._slug is None:\n if 'AUTHOR_REGEX_SUBSTITUTIONS' in self.settings:\n subs = self.settings['AUTHOR_REGEX_SUBSTITUTIONS']\n else:\n subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n self._slug = slugify(self.name, regex_subs=subs)\n return self._slug\n", "path": "pelican/urlwrappers.py"}]} | 2,075 | 567 |
gh_patches_debug_39819 | rasdani/github-patches | git_diff | localstack__localstack-1086 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
APIGateway AWS_PROXY integration failing for paths without parameters
I'm trying to run an AWS_PROXY integration between APIGateway and Lambda, and I'm hitting what looks like a fundamental logic bug in the APIGateway implementation.
https://github.com/localstack/localstack/blob/master/localstack/services/apigateway/apigateway_listener.py#L101
```
File "/opt/code/localstack/localstack/services/apigateway/apigateway_listener.py", line 101, in forward_request
'resourceId': resource.get('id'),
UnboundLocalError: local variable 'resource' referenced before assignment
```
Scanning up the file, it looks like `resource` is only be initialized within a `catch` block. I assume this is a simple oversight rather than a complex bug.
This looks like a bug I can fix, but I'd appreciate any guidance you can offer.
</issue>
<code>
[start of localstack/services/apigateway/apigateway_listener.py]
1 import re
2 import logging
3 import json
4 import requests
5
6 from requests.models import Response
7 from flask import Response as FlaskResponse
8 from localstack.constants import APPLICATION_JSON, PATH_USER_REQUEST
9 from localstack.config import TEST_KINESIS_URL
10 from localstack.utils import common
11 from localstack.utils.aws import aws_stack
12 from localstack.utils.common import to_str
13 from localstack.services.awslambda import lambda_api
14 from localstack.services.kinesis import kinesis_listener
15 from localstack.services.generic_proxy import ProxyListener
16 from .helpers import (get_rest_api_paths, get_resource_for_path,
17 flask_to_requests_response, handle_authorizers,
18 extract_query_string_params, extract_path_params,
19 make_error, get_cors_response)
20
21 # set up logger
22 LOGGER = logging.getLogger(__name__)
23
24 # regex path patterns
25 PATH_REGEX_AUTHORIZERS = r'^/restapis/([A-Za-z0-9_\-]+)/authorizers(\?.*)?'
26
27
28 class ProxyListenerApiGateway(ProxyListener):
29
30 def forward_request(self, method, path, data, headers):
31 data = data and json.loads(to_str(data))
32
33 # Paths to match
34 regex2 = r'^/restapis/([A-Za-z0-9_\-]+)/([A-Za-z0-9_\-]+)/%s/(.*)$' % PATH_USER_REQUEST
35
36 if re.match(regex2, path):
37 search_match = re.search(regex2, path)
38 api_id = search_match.group(1)
39 stage = search_match.group(2)
40 relative_path = '/%s' % search_match.group(3)
41 try:
42 integration = aws_stack.get_apigateway_integration(api_id, method, path=relative_path)
43 assert integration
44 except Exception:
45 try:
46 integration = aws_stack.get_apigateway_integration(api_id, 'ANY', path=relative_path)
47 assert integration
48 except Exception:
49 # if we have no exact match, try to find an API resource that contains path parameters
50 path_map = get_rest_api_paths(rest_api_id=api_id)
51 try:
52 extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)
53 except Exception:
54 return make_error('Unable to find path %s' % path, 404)
55
56 integrations = resource.get('resourceMethods', {})
57 integration = integrations.get(method, {})
58 if not integration:
59 integration = integrations.get('ANY', {})
60 integration = integration.get('methodIntegration')
61 if not integration:
62
63 if method == 'OPTIONS' and 'Origin' in headers:
64 # default to returning CORS headers if this is an OPTIONS request
65 return get_cors_response(headers)
66
67 return make_error('Unable to find integration for path %s' % path, 404)
68
69 uri = integration.get('uri')
70 if method == 'POST' and integration['type'] == 'AWS':
71 if uri.endswith('kinesis:action/PutRecords'):
72 template = integration['requestTemplates'][APPLICATION_JSON]
73 new_request = aws_stack.render_velocity_template(template, data)
74
75 # forward records to target kinesis stream
76 headers = aws_stack.mock_aws_request_headers(service='kinesis')
77 headers['X-Amz-Target'] = kinesis_listener.ACTION_PUT_RECORDS
78 result = common.make_http_request(url=TEST_KINESIS_URL,
79 method='POST', data=new_request, headers=headers)
80 return result
81 else:
82 msg = 'API Gateway action uri "%s" not yet implemented' % uri
83 LOGGER.warning(msg)
84 return make_error(msg, 404)
85
86 elif integration['type'] == 'AWS_PROXY':
87 if uri.startswith('arn:aws:apigateway:') and ':lambda:path' in uri:
88 func_arn = uri.split(':lambda:path')[1].split('functions/')[1].split('/invocations')[0]
89 account_id = uri.split(':lambda:path')[1].split(':function:')[0].split(':')[-1]
90 data_str = json.dumps(data) if isinstance(data, dict) else data
91
92 relative_path, query_string_params = extract_query_string_params(path=relative_path)
93
94 source_ip = headers['X-Forwarded-For'].split(',')[-2]
95
96 # Sample request context:
97 # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test
98 request_context = {
99 'path': relative_path,
100 'accountId': account_id,
101 'resourceId': resource.get('id'),
102 'stage': stage,
103 'identity': {
104 'accountId': account_id,
105 'sourceIp': source_ip,
106 'userAgent': headers['User-Agent'],
107 }
108 }
109
110 try:
111 path_params = extract_path_params(path=relative_path, extracted_path=extracted_path)
112 except Exception:
113 path_params = {}
114
115 result = lambda_api.process_apigateway_invocation(func_arn, relative_path, data_str,
116 headers, path_params=path_params, query_string_params=query_string_params,
117 method=method, resource_path=path, request_context=request_context)
118
119 if isinstance(result, FlaskResponse):
120 return flask_to_requests_response(result)
121
122 response = Response()
123 parsed_result = result if isinstance(result, dict) else json.loads(result)
124 parsed_result = common.json_safe(parsed_result)
125 response.status_code = int(parsed_result.get('statusCode', 200))
126 response.headers.update(parsed_result.get('headers', {}))
127 try:
128 if isinstance(parsed_result['body'], dict):
129 response._content = json.dumps(parsed_result['body'])
130 else:
131 response._content = parsed_result['body']
132 except Exception:
133 response._content = '{}'
134 return response
135 else:
136 msg = 'API Gateway action uri "%s" not yet implemented' % uri
137 LOGGER.warning(msg)
138 return make_error(msg, 404)
139
140 elif integration['type'] == 'HTTP':
141 function = getattr(requests, method.lower())
142 if isinstance(data, dict):
143 data = json.dumps(data)
144 result = function(integration['uri'], data=data, headers=headers)
145 return result
146
147 else:
148 msg = ('API Gateway integration type "%s" for method "%s" not yet implemented' %
149 (integration['type'], method))
150 LOGGER.warning(msg)
151 return make_error(msg, 404)
152
153 return 200
154
155 if re.match(PATH_REGEX_AUTHORIZERS, path):
156 return handle_authorizers(method, path, data, headers)
157
158 return True
159
160
161 # instantiate listener
162 UPDATE_APIGATEWAY = ProxyListenerApiGateway()
163
[end of localstack/services/apigateway/apigateway_listener.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/localstack/services/apigateway/apigateway_listener.py b/localstack/services/apigateway/apigateway_listener.py
--- a/localstack/services/apigateway/apigateway_listener.py
+++ b/localstack/services/apigateway/apigateway_listener.py
@@ -37,34 +37,28 @@
search_match = re.search(regex2, path)
api_id = search_match.group(1)
stage = search_match.group(2)
- relative_path = '/%s' % search_match.group(3)
+ relative_path_w_query_params = '/%s' % search_match.group(3)
+
+ relative_path, query_string_params = extract_query_string_params(path=relative_path_w_query_params)
+
+ path_map = get_rest_api_paths(rest_api_id=api_id)
try:
- integration = aws_stack.get_apigateway_integration(api_id, method, path=relative_path)
- assert integration
+ extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)
except Exception:
- try:
- integration = aws_stack.get_apigateway_integration(api_id, 'ANY', path=relative_path)
- assert integration
- except Exception:
- # if we have no exact match, try to find an API resource that contains path parameters
- path_map = get_rest_api_paths(rest_api_id=api_id)
- try:
- extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)
- except Exception:
- return make_error('Unable to find path %s' % path, 404)
+ return make_error('Unable to find path %s' % path, 404)
- integrations = resource.get('resourceMethods', {})
- integration = integrations.get(method, {})
- if not integration:
- integration = integrations.get('ANY', {})
- integration = integration.get('methodIntegration')
- if not integration:
+ integrations = resource.get('resourceMethods', {})
+ integration = integrations.get(method, {})
+ if not integration:
+ integration = integrations.get('ANY', {})
+ integration = integration.get('methodIntegration')
+ if not integration:
- if method == 'OPTIONS' and 'Origin' in headers:
- # default to returning CORS headers if this is an OPTIONS request
- return get_cors_response(headers)
+ if method == 'OPTIONS' and 'Origin' in headers:
+ # default to returning CORS headers if this is an OPTIONS request
+ return get_cors_response(headers)
- return make_error('Unable to find integration for path %s' % path, 404)
+ return make_error('Unable to find integration for path %s' % path, 404)
uri = integration.get('uri')
if method == 'POST' and integration['type'] == 'AWS':
@@ -89,8 +83,6 @@
account_id = uri.split(':lambda:path')[1].split(':function:')[0].split(':')[-1]
data_str = json.dumps(data) if isinstance(data, dict) else data
- relative_path, query_string_params = extract_query_string_params(path=relative_path)
-
source_ip = headers['X-Forwarded-For'].split(',')[-2]
# Sample request context:
| {"golden_diff": "diff --git a/localstack/services/apigateway/apigateway_listener.py b/localstack/services/apigateway/apigateway_listener.py\n--- a/localstack/services/apigateway/apigateway_listener.py\n+++ b/localstack/services/apigateway/apigateway_listener.py\n@@ -37,34 +37,28 @@\n search_match = re.search(regex2, path)\n api_id = search_match.group(1)\n stage = search_match.group(2)\n- relative_path = '/%s' % search_match.group(3)\n+ relative_path_w_query_params = '/%s' % search_match.group(3)\n+\n+ relative_path, query_string_params = extract_query_string_params(path=relative_path_w_query_params)\n+\n+ path_map = get_rest_api_paths(rest_api_id=api_id)\n try:\n- integration = aws_stack.get_apigateway_integration(api_id, method, path=relative_path)\n- assert integration\n+ extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)\n except Exception:\n- try:\n- integration = aws_stack.get_apigateway_integration(api_id, 'ANY', path=relative_path)\n- assert integration\n- except Exception:\n- # if we have no exact match, try to find an API resource that contains path parameters\n- path_map = get_rest_api_paths(rest_api_id=api_id)\n- try:\n- extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)\n- except Exception:\n- return make_error('Unable to find path %s' % path, 404)\n+ return make_error('Unable to find path %s' % path, 404)\n \n- integrations = resource.get('resourceMethods', {})\n- integration = integrations.get(method, {})\n- if not integration:\n- integration = integrations.get('ANY', {})\n- integration = integration.get('methodIntegration')\n- if not integration:\n+ integrations = resource.get('resourceMethods', {})\n+ integration = integrations.get(method, {})\n+ if not integration:\n+ integration = integrations.get('ANY', {})\n+ integration = integration.get('methodIntegration')\n+ if not integration:\n \n- if method == 'OPTIONS' and 'Origin' in headers:\n- # default to returning CORS headers if this is an OPTIONS request\n- return get_cors_response(headers)\n+ if method == 'OPTIONS' and 'Origin' in headers:\n+ # default to returning CORS headers if this is an OPTIONS request\n+ return get_cors_response(headers)\n \n- return make_error('Unable to find integration for path %s' % path, 404)\n+ return make_error('Unable to find integration for path %s' % path, 404)\n \n uri = integration.get('uri')\n if method == 'POST' and integration['type'] == 'AWS':\n@@ -89,8 +83,6 @@\n account_id = uri.split(':lambda:path')[1].split(':function:')[0].split(':')[-1]\n data_str = json.dumps(data) if isinstance(data, dict) else data\n \n- relative_path, query_string_params = extract_query_string_params(path=relative_path)\n-\n source_ip = headers['X-Forwarded-For'].split(',')[-2]\n \n # Sample request context:\n", "issue": "APIGateway AWS_PROXY integration failing for paths without parameters\nI'm trying to run an AWS_PROXY integration between APIGateway and Lambda, and I'm hitting what looks like a fundamental logic bug in the APIGateway implementation.\r\n\r\nhttps://github.com/localstack/localstack/blob/master/localstack/services/apigateway/apigateway_listener.py#L101\r\n\r\n```\r\n File \"/opt/code/localstack/localstack/services/apigateway/apigateway_listener.py\", line 101, in forward_request\r\n 'resourceId': resource.get('id'),\r\nUnboundLocalError: local variable 'resource' referenced before assignment\r\n```\r\n\r\nScanning up the file, it looks like `resource` is only be initialized within a `catch` block. I assume this is a simple oversight rather than a complex bug.\r\n\r\nThis looks like a bug I can fix, but I'd appreciate any guidance you can offer.\n", "before_files": [{"content": "import re\nimport logging\nimport json\nimport requests\n\nfrom requests.models import Response\nfrom flask import Response as FlaskResponse\nfrom localstack.constants import APPLICATION_JSON, PATH_USER_REQUEST\nfrom localstack.config import TEST_KINESIS_URL\nfrom localstack.utils import common\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import to_str\nfrom localstack.services.awslambda import lambda_api\nfrom localstack.services.kinesis import kinesis_listener\nfrom localstack.services.generic_proxy import ProxyListener\nfrom .helpers import (get_rest_api_paths, get_resource_for_path,\n flask_to_requests_response, handle_authorizers,\n extract_query_string_params, extract_path_params,\n make_error, get_cors_response)\n\n# set up logger\nLOGGER = logging.getLogger(__name__)\n\n# regex path patterns\nPATH_REGEX_AUTHORIZERS = r'^/restapis/([A-Za-z0-9_\\-]+)/authorizers(\\?.*)?'\n\n\nclass ProxyListenerApiGateway(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n data = data and json.loads(to_str(data))\n\n # Paths to match\n regex2 = r'^/restapis/([A-Za-z0-9_\\-]+)/([A-Za-z0-9_\\-]+)/%s/(.*)$' % PATH_USER_REQUEST\n\n if re.match(regex2, path):\n search_match = re.search(regex2, path)\n api_id = search_match.group(1)\n stage = search_match.group(2)\n relative_path = '/%s' % search_match.group(3)\n try:\n integration = aws_stack.get_apigateway_integration(api_id, method, path=relative_path)\n assert integration\n except Exception:\n try:\n integration = aws_stack.get_apigateway_integration(api_id, 'ANY', path=relative_path)\n assert integration\n except Exception:\n # if we have no exact match, try to find an API resource that contains path parameters\n path_map = get_rest_api_paths(rest_api_id=api_id)\n try:\n extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)\n except Exception:\n return make_error('Unable to find path %s' % path, 404)\n\n integrations = resource.get('resourceMethods', {})\n integration = integrations.get(method, {})\n if not integration:\n integration = integrations.get('ANY', {})\n integration = integration.get('methodIntegration')\n if not integration:\n\n if method == 'OPTIONS' and 'Origin' in headers:\n # default to returning CORS headers if this is an OPTIONS request\n return get_cors_response(headers)\n\n return make_error('Unable to find integration for path %s' % path, 404)\n\n uri = integration.get('uri')\n if method == 'POST' and integration['type'] == 'AWS':\n if uri.endswith('kinesis:action/PutRecords'):\n template = integration['requestTemplates'][APPLICATION_JSON]\n new_request = aws_stack.render_velocity_template(template, data)\n\n # forward records to target kinesis stream\n headers = aws_stack.mock_aws_request_headers(service='kinesis')\n headers['X-Amz-Target'] = kinesis_listener.ACTION_PUT_RECORDS\n result = common.make_http_request(url=TEST_KINESIS_URL,\n method='POST', data=new_request, headers=headers)\n return result\n else:\n msg = 'API Gateway action uri \"%s\" not yet implemented' % uri\n LOGGER.warning(msg)\n return make_error(msg, 404)\n\n elif integration['type'] == 'AWS_PROXY':\n if uri.startswith('arn:aws:apigateway:') and ':lambda:path' in uri:\n func_arn = uri.split(':lambda:path')[1].split('functions/')[1].split('/invocations')[0]\n account_id = uri.split(':lambda:path')[1].split(':function:')[0].split(':')[-1]\n data_str = json.dumps(data) if isinstance(data, dict) else data\n\n relative_path, query_string_params = extract_query_string_params(path=relative_path)\n\n source_ip = headers['X-Forwarded-For'].split(',')[-2]\n\n # Sample request context:\n # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test\n request_context = {\n 'path': relative_path,\n 'accountId': account_id,\n 'resourceId': resource.get('id'),\n 'stage': stage,\n 'identity': {\n 'accountId': account_id,\n 'sourceIp': source_ip,\n 'userAgent': headers['User-Agent'],\n }\n }\n\n try:\n path_params = extract_path_params(path=relative_path, extracted_path=extracted_path)\n except Exception:\n path_params = {}\n\n result = lambda_api.process_apigateway_invocation(func_arn, relative_path, data_str,\n headers, path_params=path_params, query_string_params=query_string_params,\n method=method, resource_path=path, request_context=request_context)\n\n if isinstance(result, FlaskResponse):\n return flask_to_requests_response(result)\n\n response = Response()\n parsed_result = result if isinstance(result, dict) else json.loads(result)\n parsed_result = common.json_safe(parsed_result)\n response.status_code = int(parsed_result.get('statusCode', 200))\n response.headers.update(parsed_result.get('headers', {}))\n try:\n if isinstance(parsed_result['body'], dict):\n response._content = json.dumps(parsed_result['body'])\n else:\n response._content = parsed_result['body']\n except Exception:\n response._content = '{}'\n return response\n else:\n msg = 'API Gateway action uri \"%s\" not yet implemented' % uri\n LOGGER.warning(msg)\n return make_error(msg, 404)\n\n elif integration['type'] == 'HTTP':\n function = getattr(requests, method.lower())\n if isinstance(data, dict):\n data = json.dumps(data)\n result = function(integration['uri'], data=data, headers=headers)\n return result\n\n else:\n msg = ('API Gateway integration type \"%s\" for method \"%s\" not yet implemented' %\n (integration['type'], method))\n LOGGER.warning(msg)\n return make_error(msg, 404)\n\n return 200\n\n if re.match(PATH_REGEX_AUTHORIZERS, path):\n return handle_authorizers(method, path, data, headers)\n\n return True\n\n\n# instantiate listener\nUPDATE_APIGATEWAY = ProxyListenerApiGateway()\n", "path": "localstack/services/apigateway/apigateway_listener.py"}]} | 2,545 | 733 |
gh_patches_debug_35965 | rasdani/github-patches | git_diff | ethereum__web3.py-914 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Erorr in websockets.py: '<=' not supported between instances of 'int' and 'NoneType'
* web3 (4.3.0)
* websockets (4.0.1)
* Python: 3.6
* OS: osx HighSierra
### What was wrong?
`web3 = Web3(Web3.WebsocketProvider("ws://10.224.12.6:8546"))`
`web3.eth.syncing //returns data`
The websocket is clearly open but when I run a filter which is supposed to have many entries, I get the following error trace:
Upon running: `data = web3.eth.getFilterLogs(new_block_filter.filter_id)`, I get:
```
~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/web3/providers/websocket.py in make_request(self, method, params)
81 WebsocketProvider._loop
82 )
---> 83 return future.result()
/anaconda3/lib/python3.6/concurrent/futures/_base.py in result(self, timeout)
430 raise CancelledError()
431 elif self._state == FINISHED:
--> 432 return self.__get_result()
433 else:
434 raise TimeoutError()
/anaconda3/lib/python3.6/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/web3/providers/websocket.py in coro_make_request(self, request_data)
71 async with self.conn as conn:
72 await conn.send(request_data)
---> 73 return json.loads(await conn.recv())
74
75 def make_request(self, method, params):
~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/websockets/protocol.py in recv(self)
321 next_message.cancel()
322 if not self.legacy_recv:
--> 323 raise ConnectionClosed(self.close_code, self.close_reason)
324
325 @asyncio.coroutine
~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/websockets/exceptions.py in __init__(self, code, reason)
145 self.reason = reason
146 message = "WebSocket connection is closed: "
--> 147 if 3000 <= code < 4000:
148 explanation = "registered"
149 elif 4000 <= code < 5000:
TypeError: '<=' not supported between instances of 'int' and 'NoneType'
```
The same filter runs fine (albeit a bit slow) using `Web3.HTTPProvider()`
</issue>
<code>
[start of web3/providers/websocket.py]
1 import asyncio
2 import json
3 import logging
4 import os
5 from threading import (
6 Thread,
7 )
8
9 import websockets
10
11 from web3.providers.base import (
12 JSONBaseProvider,
13 )
14
15
16 def _start_event_loop(loop):
17 asyncio.set_event_loop(loop)
18 loop.run_forever()
19 loop.close()
20
21
22 def _get_threaded_loop():
23 new_loop = asyncio.new_event_loop()
24 thread_loop = Thread(target=_start_event_loop, args=(new_loop,), daemon=True)
25 thread_loop.start()
26 return new_loop
27
28
29 def get_default_endpoint():
30 return os.environ.get('WEB3_WS_PROVIDER_URI', 'ws://127.0.0.1:8546')
31
32
33 class PersistentWebSocket:
34
35 def __init__(self, endpoint_uri, loop):
36 self.ws = None
37 self.endpoint_uri = endpoint_uri
38 self.loop = loop
39
40 async def __aenter__(self):
41 if self.ws is None:
42 self.ws = await websockets.connect(uri=self.endpoint_uri, loop=self.loop)
43 return self.ws
44
45 async def __aexit__(self, exc_type, exc_val, exc_tb):
46 if exc_val is not None:
47 try:
48 await self.ws.close()
49 except Exception:
50 pass
51 self.ws = None
52
53
54 class WebsocketProvider(JSONBaseProvider):
55 logger = logging.getLogger("web3.providers.WebsocketProvider")
56 _loop = None
57
58 def __init__(self, endpoint_uri=None):
59 self.endpoint_uri = endpoint_uri
60 if self.endpoint_uri is None:
61 self.endpoint_uri = get_default_endpoint()
62 if WebsocketProvider._loop is None:
63 WebsocketProvider._loop = _get_threaded_loop()
64 self.conn = PersistentWebSocket(self.endpoint_uri, WebsocketProvider._loop)
65 super().__init__()
66
67 def __str__(self):
68 return "WS connection {0}".format(self.endpoint_uri)
69
70 async def coro_make_request(self, request_data):
71 async with self.conn as conn:
72 await conn.send(request_data)
73 return json.loads(await conn.recv())
74
75 def make_request(self, method, params):
76 self.logger.debug("Making request WebSocket. URI: %s, "
77 "Method: %s", self.endpoint_uri, method)
78 request_data = self.encode_rpc_request(method, params)
79 future = asyncio.run_coroutine_threadsafe(
80 self.coro_make_request(request_data),
81 WebsocketProvider._loop
82 )
83 return future.result()
84
[end of web3/providers/websocket.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/web3/providers/websocket.py b/web3/providers/websocket.py
--- a/web3/providers/websocket.py
+++ b/web3/providers/websocket.py
@@ -8,10 +8,15 @@
import websockets
+from web3.exceptions import (
+ ValidationError,
+)
from web3.providers.base import (
JSONBaseProvider,
)
+RESTRICTED_WEBSOCKET_KWARGS = {'uri', 'loop'}
+
def _start_event_loop(loop):
asyncio.set_event_loop(loop)
@@ -32,14 +37,17 @@
class PersistentWebSocket:
- def __init__(self, endpoint_uri, loop):
+ def __init__(self, endpoint_uri, loop, websocket_kwargs):
self.ws = None
self.endpoint_uri = endpoint_uri
self.loop = loop
+ self.websocket_kwargs = websocket_kwargs
async def __aenter__(self):
if self.ws is None:
- self.ws = await websockets.connect(uri=self.endpoint_uri, loop=self.loop)
+ self.ws = await websockets.connect(
+ uri=self.endpoint_uri, loop=self.loop, **self.websocket_kwargs
+ )
return self.ws
async def __aexit__(self, exc_type, exc_val, exc_tb):
@@ -55,13 +63,26 @@
logger = logging.getLogger("web3.providers.WebsocketProvider")
_loop = None
- def __init__(self, endpoint_uri=None):
+ def __init__(self, endpoint_uri=None, websocket_kwargs=None):
self.endpoint_uri = endpoint_uri
if self.endpoint_uri is None:
self.endpoint_uri = get_default_endpoint()
if WebsocketProvider._loop is None:
WebsocketProvider._loop = _get_threaded_loop()
- self.conn = PersistentWebSocket(self.endpoint_uri, WebsocketProvider._loop)
+ if websocket_kwargs is None:
+ websocket_kwargs = {}
+ else:
+ found_restricted_keys = set(websocket_kwargs.keys()).intersection(
+ RESTRICTED_WEBSOCKET_KWARGS
+ )
+ if found_restricted_keys:
+ raise ValidationError(
+ '{0} are not allowed in websocket_kwargs, '
+ 'found: {1}'.format(RESTRICTED_WEBSOCKET_KWARGS, found_restricted_keys)
+ )
+ self.conn = PersistentWebSocket(
+ self.endpoint_uri, WebsocketProvider._loop, websocket_kwargs
+ )
super().__init__()
def __str__(self):
| {"golden_diff": "diff --git a/web3/providers/websocket.py b/web3/providers/websocket.py\n--- a/web3/providers/websocket.py\n+++ b/web3/providers/websocket.py\n@@ -8,10 +8,15 @@\n \n import websockets\n \n+from web3.exceptions import (\n+ ValidationError,\n+)\n from web3.providers.base import (\n JSONBaseProvider,\n )\n \n+RESTRICTED_WEBSOCKET_KWARGS = {'uri', 'loop'}\n+\n \n def _start_event_loop(loop):\n asyncio.set_event_loop(loop)\n@@ -32,14 +37,17 @@\n \n class PersistentWebSocket:\n \n- def __init__(self, endpoint_uri, loop):\n+ def __init__(self, endpoint_uri, loop, websocket_kwargs):\n self.ws = None\n self.endpoint_uri = endpoint_uri\n self.loop = loop\n+ self.websocket_kwargs = websocket_kwargs\n \n async def __aenter__(self):\n if self.ws is None:\n- self.ws = await websockets.connect(uri=self.endpoint_uri, loop=self.loop)\n+ self.ws = await websockets.connect(\n+ uri=self.endpoint_uri, loop=self.loop, **self.websocket_kwargs\n+ )\n return self.ws\n \n async def __aexit__(self, exc_type, exc_val, exc_tb):\n@@ -55,13 +63,26 @@\n logger = logging.getLogger(\"web3.providers.WebsocketProvider\")\n _loop = None\n \n- def __init__(self, endpoint_uri=None):\n+ def __init__(self, endpoint_uri=None, websocket_kwargs=None):\n self.endpoint_uri = endpoint_uri\n if self.endpoint_uri is None:\n self.endpoint_uri = get_default_endpoint()\n if WebsocketProvider._loop is None:\n WebsocketProvider._loop = _get_threaded_loop()\n- self.conn = PersistentWebSocket(self.endpoint_uri, WebsocketProvider._loop)\n+ if websocket_kwargs is None:\n+ websocket_kwargs = {}\n+ else:\n+ found_restricted_keys = set(websocket_kwargs.keys()).intersection(\n+ RESTRICTED_WEBSOCKET_KWARGS\n+ )\n+ if found_restricted_keys:\n+ raise ValidationError(\n+ '{0} are not allowed in websocket_kwargs, '\n+ 'found: {1}'.format(RESTRICTED_WEBSOCKET_KWARGS, found_restricted_keys)\n+ )\n+ self.conn = PersistentWebSocket(\n+ self.endpoint_uri, WebsocketProvider._loop, websocket_kwargs\n+ )\n super().__init__()\n \n def __str__(self):\n", "issue": "Erorr in websockets.py: '<=' not supported between instances of 'int' and 'NoneType'\n* web3 (4.3.0)\r\n* websockets (4.0.1)\r\n* Python: 3.6\r\n* OS: osx HighSierra\r\n\r\n\r\n### What was wrong?\r\n\r\n`web3 = Web3(Web3.WebsocketProvider(\"ws://10.224.12.6:8546\"))`\r\n`web3.eth.syncing //returns data`\r\n\r\nThe websocket is clearly open but when I run a filter which is supposed to have many entries, I get the following error trace:\r\n\r\nUpon running: `data = web3.eth.getFilterLogs(new_block_filter.filter_id)`, I get:\r\n\r\n```\r\n~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/web3/providers/websocket.py in make_request(self, method, params)\r\n 81 WebsocketProvider._loop\r\n 82 )\r\n---> 83 return future.result()\r\n\r\n/anaconda3/lib/python3.6/concurrent/futures/_base.py in result(self, timeout)\r\n 430 raise CancelledError()\r\n 431 elif self._state == FINISHED:\r\n--> 432 return self.__get_result()\r\n 433 else:\r\n 434 raise TimeoutError()\r\n\r\n/anaconda3/lib/python3.6/concurrent/futures/_base.py in __get_result(self)\r\n 382 def __get_result(self):\r\n 383 if self._exception:\r\n--> 384 raise self._exception\r\n 385 else:\r\n 386 return self._result\r\n\r\n~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/web3/providers/websocket.py in coro_make_request(self, request_data)\r\n 71 async with self.conn as conn:\r\n 72 await conn.send(request_data)\r\n---> 73 return json.loads(await conn.recv())\r\n 74 \r\n 75 def make_request(self, method, params):\r\n\r\n~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/websockets/protocol.py in recv(self)\r\n 321 next_message.cancel()\r\n 322 if not self.legacy_recv:\r\n--> 323 raise ConnectionClosed(self.close_code, self.close_reason)\r\n 324 \r\n 325 @asyncio.coroutine\r\n\r\n~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/websockets/exceptions.py in __init__(self, code, reason)\r\n 145 self.reason = reason\r\n 146 message = \"WebSocket connection is closed: \"\r\n--> 147 if 3000 <= code < 4000:\r\n 148 explanation = \"registered\"\r\n 149 elif 4000 <= code < 5000:\r\n\r\nTypeError: '<=' not supported between instances of 'int' and 'NoneType'\r\n```\r\n\r\nThe same filter runs fine (albeit a bit slow) using `Web3.HTTPProvider()`\r\n\r\n\n", "before_files": [{"content": "import asyncio\nimport json\nimport logging\nimport os\nfrom threading import (\n Thread,\n)\n\nimport websockets\n\nfrom web3.providers.base import (\n JSONBaseProvider,\n)\n\n\ndef _start_event_loop(loop):\n asyncio.set_event_loop(loop)\n loop.run_forever()\n loop.close()\n\n\ndef _get_threaded_loop():\n new_loop = asyncio.new_event_loop()\n thread_loop = Thread(target=_start_event_loop, args=(new_loop,), daemon=True)\n thread_loop.start()\n return new_loop\n\n\ndef get_default_endpoint():\n return os.environ.get('WEB3_WS_PROVIDER_URI', 'ws://127.0.0.1:8546')\n\n\nclass PersistentWebSocket:\n\n def __init__(self, endpoint_uri, loop):\n self.ws = None\n self.endpoint_uri = endpoint_uri\n self.loop = loop\n\n async def __aenter__(self):\n if self.ws is None:\n self.ws = await websockets.connect(uri=self.endpoint_uri, loop=self.loop)\n return self.ws\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n if exc_val is not None:\n try:\n await self.ws.close()\n except Exception:\n pass\n self.ws = None\n\n\nclass WebsocketProvider(JSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.WebsocketProvider\")\n _loop = None\n\n def __init__(self, endpoint_uri=None):\n self.endpoint_uri = endpoint_uri\n if self.endpoint_uri is None:\n self.endpoint_uri = get_default_endpoint()\n if WebsocketProvider._loop is None:\n WebsocketProvider._loop = _get_threaded_loop()\n self.conn = PersistentWebSocket(self.endpoint_uri, WebsocketProvider._loop)\n super().__init__()\n\n def __str__(self):\n return \"WS connection {0}\".format(self.endpoint_uri)\n\n async def coro_make_request(self, request_data):\n async with self.conn as conn:\n await conn.send(request_data)\n return json.loads(await conn.recv())\n\n def make_request(self, method, params):\n self.logger.debug(\"Making request WebSocket. URI: %s, \"\n \"Method: %s\", self.endpoint_uri, method)\n request_data = self.encode_rpc_request(method, params)\n future = asyncio.run_coroutine_threadsafe(\n self.coro_make_request(request_data),\n WebsocketProvider._loop\n )\n return future.result()\n", "path": "web3/providers/websocket.py"}]} | 1,909 | 544 |
gh_patches_debug_34363 | rasdani/github-patches | git_diff | localstack__localstack-1082 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Localstack Elasticsearch plugin Ingest User Agent Processor not available
Plugin `Ingest User Agent Processor` is installed by default for Elasticsearch (ELK) on AWS. It is not the case in Localstack and think we basically expect it.
In addition, I was not able to install it manually through command `bin/elasticsearch-plugin install ingest-user-agent` as bin/elasticsearch-plugin is missing.
</issue>
<code>
[start of localstack/constants.py]
1 import os
2 import localstack_client.config
3
4 # LocalStack version
5 VERSION = '0.8.10'
6
7 # default AWS region
8 if 'DEFAULT_REGION' not in os.environ:
9 os.environ['DEFAULT_REGION'] = 'us-east-1'
10 DEFAULT_REGION = os.environ['DEFAULT_REGION']
11
12 # constant to represent the "local" region, i.e., local machine
13 REGION_LOCAL = 'local'
14
15 # dev environment
16 ENV_DEV = 'dev'
17
18 # backend service ports, for services that are behind a proxy (counting down from 4566)
19 DEFAULT_PORT_APIGATEWAY_BACKEND = 4566
20 DEFAULT_PORT_KINESIS_BACKEND = 4565
21 DEFAULT_PORT_DYNAMODB_BACKEND = 4564
22 DEFAULT_PORT_S3_BACKEND = 4563
23 DEFAULT_PORT_SNS_BACKEND = 4562
24 DEFAULT_PORT_SQS_BACKEND = 4561
25 DEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560
26 DEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559
27
28 DEFAULT_PORT_WEB_UI = 8080
29
30 LOCALHOST = 'localhost'
31
32 # version of the Maven dependency with Java utility code
33 LOCALSTACK_MAVEN_VERSION = '0.1.15'
34
35 # map of default service APIs and ports to be spun up (fetch map from localstack_client)
36 DEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()
37
38 # host to bind to when starting the services
39 BIND_HOST = '0.0.0.0'
40
41 # AWS user account ID used for tests
42 TEST_AWS_ACCOUNT_ID = '000000000000'
43 os.environ['TEST_AWS_ACCOUNT_ID'] = TEST_AWS_ACCOUNT_ID
44
45 # root code folder
46 LOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
47
48 # virtualenv folder
49 LOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')
50 if not os.path.isdir(LOCALSTACK_VENV_FOLDER):
51 # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/
52 LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))
53
54 # API Gateway path to indicate a user request sent to the gateway
55 PATH_USER_REQUEST = '_user_request_'
56
57 # name of LocalStack Docker image
58 DOCKER_IMAGE_NAME = 'localstack/localstack'
59
60 # environment variable name to tag local test runs
61 ENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'
62
63 # content types
64 APPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'
65 APPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'
66 APPLICATION_JSON = 'application/json'
67
68 # Lambda defaults
69 LAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID
70
71 # installation constants
72 ELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.0.zip'
73 DYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'
74 ELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.14.2.jar'
75 STS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'
76
77 # API endpoint for analytics events
78 API_ENDPOINT = 'https://api.localstack.cloud/v1'
79
[end of localstack/constants.py]
[start of localstack/services/install.py]
1 #!/usr/bin/env python
2
3 import os
4 import sys
5 import glob
6 import shutil
7 import logging
8 import tempfile
9 from localstack.constants import (DEFAULT_SERVICE_PORTS, ELASTICMQ_JAR_URL, STS_JAR_URL,
10 ELASTICSEARCH_JAR_URL, DYNAMODB_JAR_URL, LOCALSTACK_MAVEN_VERSION)
11 from localstack.utils.common import download, parallelize, run, mkdir, save_file, unzip, rm_rf, chmod_r
12
13 THIS_PATH = os.path.dirname(os.path.realpath(__file__))
14 ROOT_PATH = os.path.realpath(os.path.join(THIS_PATH, '..'))
15
16 INSTALL_DIR_INFRA = '%s/infra' % ROOT_PATH
17 INSTALL_DIR_NPM = '%s/node_modules' % ROOT_PATH
18 INSTALL_DIR_ES = '%s/elasticsearch' % INSTALL_DIR_INFRA
19 INSTALL_DIR_DDB = '%s/dynamodb' % INSTALL_DIR_INFRA
20 INSTALL_DIR_KCL = '%s/amazon-kinesis-client' % INSTALL_DIR_INFRA
21 INSTALL_DIR_ELASTICMQ = '%s/elasticmq' % INSTALL_DIR_INFRA
22 INSTALL_PATH_LOCALSTACK_FAT_JAR = '%s/localstack-utils-fat.jar' % INSTALL_DIR_INFRA
23 TMP_ARCHIVE_ES = os.path.join(tempfile.gettempdir(), 'localstack.es.zip')
24 TMP_ARCHIVE_DDB = os.path.join(tempfile.gettempdir(), 'localstack.ddb.zip')
25 TMP_ARCHIVE_STS = os.path.join(tempfile.gettempdir(), 'aws-java-sdk-sts.jar')
26 TMP_ARCHIVE_ELASTICMQ = os.path.join(tempfile.gettempdir(), 'elasticmq-server.jar')
27 URL_LOCALSTACK_FAT_JAR = ('http://central.maven.org/maven2/' +
28 'cloud/localstack/localstack-utils/{v}/localstack-utils-{v}-fat.jar').format(v=LOCALSTACK_MAVEN_VERSION)
29
30 # set up logger
31 LOGGER = logging.getLogger(__name__)
32
33
34 def install_elasticsearch():
35 if not os.path.exists(INSTALL_DIR_ES):
36 LOGGER.info('Downloading and installing local Elasticsearch server. This may take some time.')
37 mkdir(INSTALL_DIR_INFRA)
38 # download and extract archive
39 download_and_extract_with_retry(ELASTICSEARCH_JAR_URL, TMP_ARCHIVE_ES, INSTALL_DIR_INFRA)
40 elasticsearch_dir = glob.glob(os.path.join(INSTALL_DIR_INFRA, 'elasticsearch*'))
41 if not elasticsearch_dir:
42 raise Exception('Unable to find Elasticsearch folder in %s' % INSTALL_DIR_INFRA)
43 shutil.move(elasticsearch_dir[0], INSTALL_DIR_ES)
44
45 for dir_name in ('data', 'logs', 'modules', 'plugins', 'config/scripts'):
46 dir_path = '%s/%s' % (INSTALL_DIR_ES, dir_name)
47 mkdir(dir_path)
48 chmod_r(dir_path, 0o777)
49
50
51 def install_elasticmq():
52 if not os.path.exists(INSTALL_DIR_ELASTICMQ):
53 LOGGER.info('Downloading and installing local ElasticMQ server. This may take some time.')
54 mkdir(INSTALL_DIR_ELASTICMQ)
55 # download archive
56 if not os.path.exists(TMP_ARCHIVE_ELASTICMQ):
57 download(ELASTICMQ_JAR_URL, TMP_ARCHIVE_ELASTICMQ)
58 shutil.copy(TMP_ARCHIVE_ELASTICMQ, INSTALL_DIR_ELASTICMQ)
59
60
61 def install_kinesalite():
62 target_dir = '%s/kinesalite' % INSTALL_DIR_NPM
63 if not os.path.exists(target_dir):
64 LOGGER.info('Downloading and installing local Kinesis server. This may take some time.')
65 run('cd "%s" && npm install' % ROOT_PATH)
66
67
68 def install_dynamodb_local():
69 if not os.path.exists(INSTALL_DIR_DDB):
70 LOGGER.info('Downloading and installing local DynamoDB server. This may take some time.')
71 mkdir(INSTALL_DIR_DDB)
72 # download and extract archive
73 download_and_extract_with_retry(DYNAMODB_JAR_URL, TMP_ARCHIVE_DDB, INSTALL_DIR_DDB)
74
75 # fix for Alpine, otherwise DynamoDBLocal fails with:
76 # DynamoDBLocal_lib/libsqlite4java-linux-amd64.so: __memcpy_chk: symbol not found
77 if is_alpine():
78 ddb_libs_dir = '%s/DynamoDBLocal_lib' % INSTALL_DIR_DDB
79 patched_marker = '%s/alpine_fix_applied' % ddb_libs_dir
80 if not os.path.exists(patched_marker):
81 patched_lib = ('https://rawgit.com/bhuisgen/docker-alpine/master/alpine-dynamodb/' +
82 'rootfs/usr/local/dynamodb/DynamoDBLocal_lib/libsqlite4java-linux-amd64.so')
83 patched_jar = ('https://rawgit.com/bhuisgen/docker-alpine/master/alpine-dynamodb/' +
84 'rootfs/usr/local/dynamodb/DynamoDBLocal_lib/sqlite4java.jar')
85 run("curl -L -o %s/libsqlite4java-linux-amd64.so '%s'" % (ddb_libs_dir, patched_lib))
86 run("curl -L -o %s/sqlite4java.jar '%s'" % (ddb_libs_dir, patched_jar))
87 save_file(patched_marker, '')
88
89 # fix logging configuration for DynamoDBLocal
90 log4j2_config = """<Configuration status="WARN">
91 <Appenders>
92 <Console name="Console" target="SYSTEM_OUT">
93 <PatternLayout pattern="%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"/>
94 </Console>
95 </Appenders>
96 <Loggers>
97 <Root level="WARN"><AppenderRef ref="Console"/></Root>
98 </Loggers>
99 </Configuration>"""
100 log4j2_file = os.path.join(INSTALL_DIR_DDB, 'log4j2.xml')
101 save_file(log4j2_file, log4j2_config)
102 run('cd "%s" && zip -u DynamoDBLocal.jar log4j2.xml || true' % INSTALL_DIR_DDB)
103
104
105 def install_amazon_kinesis_client_libs():
106 # install KCL/STS JAR files
107 if not os.path.exists(INSTALL_DIR_KCL):
108 mkdir(INSTALL_DIR_KCL)
109 if not os.path.exists(TMP_ARCHIVE_STS):
110 download(STS_JAR_URL, TMP_ARCHIVE_STS)
111 shutil.copy(TMP_ARCHIVE_STS, INSTALL_DIR_KCL)
112 # Compile Java files
113 from localstack.utils.kinesis import kclipy_helper
114 classpath = kclipy_helper.get_kcl_classpath()
115 java_files = '%s/utils/kinesis/java/com/atlassian/*.java' % ROOT_PATH
116 class_files = '%s/utils/kinesis/java/com/atlassian/*.class' % ROOT_PATH
117 if not glob.glob(class_files):
118 run('javac -cp "%s" %s' % (classpath, java_files))
119
120
121 def install_lambda_java_libs():
122 # install LocalStack "fat" JAR file (contains all dependencies)
123 if not os.path.exists(INSTALL_PATH_LOCALSTACK_FAT_JAR):
124 LOGGER.info('Downloading and installing LocalStack Java libraries. This may take some time.')
125 download(URL_LOCALSTACK_FAT_JAR, INSTALL_PATH_LOCALSTACK_FAT_JAR)
126
127
128 def install_component(name):
129 if name == 'kinesis':
130 install_kinesalite()
131 elif name == 'dynamodb':
132 install_dynamodb_local()
133 elif name == 'es':
134 install_elasticsearch()
135 elif name == 'sqs':
136 install_elasticmq()
137
138
139 def install_components(names):
140 parallelize(install_component, names)
141 install_lambda_java_libs()
142
143
144 def install_all_components():
145 install_components(DEFAULT_SERVICE_PORTS.keys())
146
147
148 # -----------------
149 # HELPER FUNCTIONS
150 # -----------------
151
152
153 def is_alpine():
154 try:
155 run('cat /etc/issue | grep Alpine', print_error=False)
156 return True
157 except Exception:
158 return False
159
160
161 def download_and_extract_with_retry(archive_url, tmp_archive, target_dir):
162
163 def download_and_extract():
164 if not os.path.exists(tmp_archive):
165 download(archive_url, tmp_archive)
166 unzip(tmp_archive, target_dir)
167
168 try:
169 download_and_extract()
170 except Exception:
171 # try deleting and re-downloading the zip file
172 LOGGER.info('Unable to extract file, re-downloading ZIP archive: %s' % tmp_archive)
173 rm_rf(tmp_archive)
174 download_and_extract()
175
176
177 if __name__ == '__main__':
178
179 if len(sys.argv) > 1:
180 if sys.argv[1] == 'libs':
181 print('Initializing installation.')
182 logging.basicConfig(level=logging.INFO)
183 logging.getLogger('requests').setLevel(logging.WARNING)
184 install_all_components()
185 print('Done.')
186 elif sys.argv[1] == 'testlibs':
187 # Install additional libraries for testing
188 install_amazon_kinesis_client_libs()
189
[end of localstack/services/install.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/localstack/constants.py b/localstack/constants.py
--- a/localstack/constants.py
+++ b/localstack/constants.py
@@ -70,6 +70,9 @@
# installation constants
ELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.0.zip'
+# https://docs.aws.amazon.com/ja_jp/elasticsearch-service/latest/developerguide/aes-supported-plugins.html
+ELASTICSEARCH_PLUGIN_LIST = ['analysis-icu', 'ingest-attachment', 'ingest-user-agent', 'analysis-kuromoji',
+ 'mapper-murmur3', 'mapper-size', 'analysis-phonetic', 'analysis-smartcn', 'analysis-stempel', 'analysis-ukrainian']
DYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'
ELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.14.2.jar'
STS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'
diff --git a/localstack/services/install.py b/localstack/services/install.py
--- a/localstack/services/install.py
+++ b/localstack/services/install.py
@@ -7,7 +7,7 @@
import logging
import tempfile
from localstack.constants import (DEFAULT_SERVICE_PORTS, ELASTICMQ_JAR_URL, STS_JAR_URL,
- ELASTICSEARCH_JAR_URL, DYNAMODB_JAR_URL, LOCALSTACK_MAVEN_VERSION)
+ ELASTICSEARCH_JAR_URL, ELASTICSEARCH_PLUGIN_LIST, DYNAMODB_JAR_URL, LOCALSTACK_MAVEN_VERSION)
from localstack.utils.common import download, parallelize, run, mkdir, save_file, unzip, rm_rf, chmod_r
THIS_PATH = os.path.dirname(os.path.realpath(__file__))
@@ -47,6 +47,14 @@
mkdir(dir_path)
chmod_r(dir_path, 0o777)
+ # install default plugins
+ for plugin in ELASTICSEARCH_PLUGIN_LIST:
+ if is_alpine():
+ # https://github.com/pires/docker-elasticsearch/issues/56
+ os.environ['ES_TMPDIR'] = '/tmp'
+ plugin_binary = os.path.join(INSTALL_DIR_ES, 'bin', 'elasticsearch-plugin')
+ run('%s install %s' % (plugin_binary, plugin))
+
def install_elasticmq():
if not os.path.exists(INSTALL_DIR_ELASTICMQ):
| {"golden_diff": "diff --git a/localstack/constants.py b/localstack/constants.py\n--- a/localstack/constants.py\n+++ b/localstack/constants.py\n@@ -70,6 +70,9 @@\n \n # installation constants\n ELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.0.zip'\n+# https://docs.aws.amazon.com/ja_jp/elasticsearch-service/latest/developerguide/aes-supported-plugins.html\n+ELASTICSEARCH_PLUGIN_LIST = ['analysis-icu', 'ingest-attachment', 'ingest-user-agent', 'analysis-kuromoji',\n+ 'mapper-murmur3', 'mapper-size', 'analysis-phonetic', 'analysis-smartcn', 'analysis-stempel', 'analysis-ukrainian']\n DYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'\n ELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.14.2.jar'\n STS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'\ndiff --git a/localstack/services/install.py b/localstack/services/install.py\n--- a/localstack/services/install.py\n+++ b/localstack/services/install.py\n@@ -7,7 +7,7 @@\n import logging\n import tempfile\n from localstack.constants import (DEFAULT_SERVICE_PORTS, ELASTICMQ_JAR_URL, STS_JAR_URL,\n- ELASTICSEARCH_JAR_URL, DYNAMODB_JAR_URL, LOCALSTACK_MAVEN_VERSION)\n+ ELASTICSEARCH_JAR_URL, ELASTICSEARCH_PLUGIN_LIST, DYNAMODB_JAR_URL, LOCALSTACK_MAVEN_VERSION)\n from localstack.utils.common import download, parallelize, run, mkdir, save_file, unzip, rm_rf, chmod_r\n \n THIS_PATH = os.path.dirname(os.path.realpath(__file__))\n@@ -47,6 +47,14 @@\n mkdir(dir_path)\n chmod_r(dir_path, 0o777)\n \n+ # install default plugins\n+ for plugin in ELASTICSEARCH_PLUGIN_LIST:\n+ if is_alpine():\n+ # https://github.com/pires/docker-elasticsearch/issues/56\n+ os.environ['ES_TMPDIR'] = '/tmp'\n+ plugin_binary = os.path.join(INSTALL_DIR_ES, 'bin', 'elasticsearch-plugin')\n+ run('%s install %s' % (plugin_binary, plugin))\n+\n \n def install_elasticmq():\n if not os.path.exists(INSTALL_DIR_ELASTICMQ):\n", "issue": "Localstack Elasticsearch plugin Ingest User Agent Processor not available\nPlugin `Ingest User Agent Processor` is installed by default for Elasticsearch (ELK) on AWS. It is not the case in Localstack and think we basically expect it.\r\n\r\nIn addition, I was not able to install it manually through command `bin/elasticsearch-plugin install ingest-user-agent` as bin/elasticsearch-plugin is missing.\n", "before_files": [{"content": "import os\nimport localstack_client.config\n\n# LocalStack version\nVERSION = '0.8.10'\n\n# default AWS region\nif 'DEFAULT_REGION' not in os.environ:\n os.environ['DEFAULT_REGION'] = 'us-east-1'\nDEFAULT_REGION = os.environ['DEFAULT_REGION']\n\n# constant to represent the \"local\" region, i.e., local machine\nREGION_LOCAL = 'local'\n\n# dev environment\nENV_DEV = 'dev'\n\n# backend service ports, for services that are behind a proxy (counting down from 4566)\nDEFAULT_PORT_APIGATEWAY_BACKEND = 4566\nDEFAULT_PORT_KINESIS_BACKEND = 4565\nDEFAULT_PORT_DYNAMODB_BACKEND = 4564\nDEFAULT_PORT_S3_BACKEND = 4563\nDEFAULT_PORT_SNS_BACKEND = 4562\nDEFAULT_PORT_SQS_BACKEND = 4561\nDEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560\nDEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559\n\nDEFAULT_PORT_WEB_UI = 8080\n\nLOCALHOST = 'localhost'\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = '0.1.15'\n\n# map of default service APIs and ports to be spun up (fetch map from localstack_client)\nDEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()\n\n# host to bind to when starting the services\nBIND_HOST = '0.0.0.0'\n\n# AWS user account ID used for tests\nTEST_AWS_ACCOUNT_ID = '000000000000'\nos.environ['TEST_AWS_ACCOUNT_ID'] = TEST_AWS_ACCOUNT_ID\n\n# root code folder\nLOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))\n\n# virtualenv folder\nLOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')\nif not os.path.isdir(LOCALSTACK_VENV_FOLDER):\n # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/\n LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))\n\n# API Gateway path to indicate a user request sent to the gateway\nPATH_USER_REQUEST = '_user_request_'\n\n# name of LocalStack Docker image\nDOCKER_IMAGE_NAME = 'localstack/localstack'\n\n# environment variable name to tag local test runs\nENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'\n\n# content types\nAPPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'\nAPPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'\nAPPLICATION_JSON = 'application/json'\n\n# Lambda defaults\nLAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID\n\n# installation constants\nELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.0.zip'\nDYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'\nELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.14.2.jar'\nSTS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'\n\n# API endpoint for analytics events\nAPI_ENDPOINT = 'https://api.localstack.cloud/v1'\n", "path": "localstack/constants.py"}, {"content": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport glob\nimport shutil\nimport logging\nimport tempfile\nfrom localstack.constants import (DEFAULT_SERVICE_PORTS, ELASTICMQ_JAR_URL, STS_JAR_URL,\n ELASTICSEARCH_JAR_URL, DYNAMODB_JAR_URL, LOCALSTACK_MAVEN_VERSION)\nfrom localstack.utils.common import download, parallelize, run, mkdir, save_file, unzip, rm_rf, chmod_r\n\nTHIS_PATH = os.path.dirname(os.path.realpath(__file__))\nROOT_PATH = os.path.realpath(os.path.join(THIS_PATH, '..'))\n\nINSTALL_DIR_INFRA = '%s/infra' % ROOT_PATH\nINSTALL_DIR_NPM = '%s/node_modules' % ROOT_PATH\nINSTALL_DIR_ES = '%s/elasticsearch' % INSTALL_DIR_INFRA\nINSTALL_DIR_DDB = '%s/dynamodb' % INSTALL_DIR_INFRA\nINSTALL_DIR_KCL = '%s/amazon-kinesis-client' % INSTALL_DIR_INFRA\nINSTALL_DIR_ELASTICMQ = '%s/elasticmq' % INSTALL_DIR_INFRA\nINSTALL_PATH_LOCALSTACK_FAT_JAR = '%s/localstack-utils-fat.jar' % INSTALL_DIR_INFRA\nTMP_ARCHIVE_ES = os.path.join(tempfile.gettempdir(), 'localstack.es.zip')\nTMP_ARCHIVE_DDB = os.path.join(tempfile.gettempdir(), 'localstack.ddb.zip')\nTMP_ARCHIVE_STS = os.path.join(tempfile.gettempdir(), 'aws-java-sdk-sts.jar')\nTMP_ARCHIVE_ELASTICMQ = os.path.join(tempfile.gettempdir(), 'elasticmq-server.jar')\nURL_LOCALSTACK_FAT_JAR = ('http://central.maven.org/maven2/' +\n 'cloud/localstack/localstack-utils/{v}/localstack-utils-{v}-fat.jar').format(v=LOCALSTACK_MAVEN_VERSION)\n\n# set up logger\nLOGGER = logging.getLogger(__name__)\n\n\ndef install_elasticsearch():\n if not os.path.exists(INSTALL_DIR_ES):\n LOGGER.info('Downloading and installing local Elasticsearch server. This may take some time.')\n mkdir(INSTALL_DIR_INFRA)\n # download and extract archive\n download_and_extract_with_retry(ELASTICSEARCH_JAR_URL, TMP_ARCHIVE_ES, INSTALL_DIR_INFRA)\n elasticsearch_dir = glob.glob(os.path.join(INSTALL_DIR_INFRA, 'elasticsearch*'))\n if not elasticsearch_dir:\n raise Exception('Unable to find Elasticsearch folder in %s' % INSTALL_DIR_INFRA)\n shutil.move(elasticsearch_dir[0], INSTALL_DIR_ES)\n\n for dir_name in ('data', 'logs', 'modules', 'plugins', 'config/scripts'):\n dir_path = '%s/%s' % (INSTALL_DIR_ES, dir_name)\n mkdir(dir_path)\n chmod_r(dir_path, 0o777)\n\n\ndef install_elasticmq():\n if not os.path.exists(INSTALL_DIR_ELASTICMQ):\n LOGGER.info('Downloading and installing local ElasticMQ server. This may take some time.')\n mkdir(INSTALL_DIR_ELASTICMQ)\n # download archive\n if not os.path.exists(TMP_ARCHIVE_ELASTICMQ):\n download(ELASTICMQ_JAR_URL, TMP_ARCHIVE_ELASTICMQ)\n shutil.copy(TMP_ARCHIVE_ELASTICMQ, INSTALL_DIR_ELASTICMQ)\n\n\ndef install_kinesalite():\n target_dir = '%s/kinesalite' % INSTALL_DIR_NPM\n if not os.path.exists(target_dir):\n LOGGER.info('Downloading and installing local Kinesis server. This may take some time.')\n run('cd \"%s\" && npm install' % ROOT_PATH)\n\n\ndef install_dynamodb_local():\n if not os.path.exists(INSTALL_DIR_DDB):\n LOGGER.info('Downloading and installing local DynamoDB server. This may take some time.')\n mkdir(INSTALL_DIR_DDB)\n # download and extract archive\n download_and_extract_with_retry(DYNAMODB_JAR_URL, TMP_ARCHIVE_DDB, INSTALL_DIR_DDB)\n\n # fix for Alpine, otherwise DynamoDBLocal fails with:\n # DynamoDBLocal_lib/libsqlite4java-linux-amd64.so: __memcpy_chk: symbol not found\n if is_alpine():\n ddb_libs_dir = '%s/DynamoDBLocal_lib' % INSTALL_DIR_DDB\n patched_marker = '%s/alpine_fix_applied' % ddb_libs_dir\n if not os.path.exists(patched_marker):\n patched_lib = ('https://rawgit.com/bhuisgen/docker-alpine/master/alpine-dynamodb/' +\n 'rootfs/usr/local/dynamodb/DynamoDBLocal_lib/libsqlite4java-linux-amd64.so')\n patched_jar = ('https://rawgit.com/bhuisgen/docker-alpine/master/alpine-dynamodb/' +\n 'rootfs/usr/local/dynamodb/DynamoDBLocal_lib/sqlite4java.jar')\n run(\"curl -L -o %s/libsqlite4java-linux-amd64.so '%s'\" % (ddb_libs_dir, patched_lib))\n run(\"curl -L -o %s/sqlite4java.jar '%s'\" % (ddb_libs_dir, patched_jar))\n save_file(patched_marker, '')\n\n # fix logging configuration for DynamoDBLocal\n log4j2_config = \"\"\"<Configuration status=\"WARN\">\n <Appenders>\n <Console name=\"Console\" target=\"SYSTEM_OUT\">\n <PatternLayout pattern=\"%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\"/>\n </Console>\n </Appenders>\n <Loggers>\n <Root level=\"WARN\"><AppenderRef ref=\"Console\"/></Root>\n </Loggers>\n </Configuration>\"\"\"\n log4j2_file = os.path.join(INSTALL_DIR_DDB, 'log4j2.xml')\n save_file(log4j2_file, log4j2_config)\n run('cd \"%s\" && zip -u DynamoDBLocal.jar log4j2.xml || true' % INSTALL_DIR_DDB)\n\n\ndef install_amazon_kinesis_client_libs():\n # install KCL/STS JAR files\n if not os.path.exists(INSTALL_DIR_KCL):\n mkdir(INSTALL_DIR_KCL)\n if not os.path.exists(TMP_ARCHIVE_STS):\n download(STS_JAR_URL, TMP_ARCHIVE_STS)\n shutil.copy(TMP_ARCHIVE_STS, INSTALL_DIR_KCL)\n # Compile Java files\n from localstack.utils.kinesis import kclipy_helper\n classpath = kclipy_helper.get_kcl_classpath()\n java_files = '%s/utils/kinesis/java/com/atlassian/*.java' % ROOT_PATH\n class_files = '%s/utils/kinesis/java/com/atlassian/*.class' % ROOT_PATH\n if not glob.glob(class_files):\n run('javac -cp \"%s\" %s' % (classpath, java_files))\n\n\ndef install_lambda_java_libs():\n # install LocalStack \"fat\" JAR file (contains all dependencies)\n if not os.path.exists(INSTALL_PATH_LOCALSTACK_FAT_JAR):\n LOGGER.info('Downloading and installing LocalStack Java libraries. This may take some time.')\n download(URL_LOCALSTACK_FAT_JAR, INSTALL_PATH_LOCALSTACK_FAT_JAR)\n\n\ndef install_component(name):\n if name == 'kinesis':\n install_kinesalite()\n elif name == 'dynamodb':\n install_dynamodb_local()\n elif name == 'es':\n install_elasticsearch()\n elif name == 'sqs':\n install_elasticmq()\n\n\ndef install_components(names):\n parallelize(install_component, names)\n install_lambda_java_libs()\n\n\ndef install_all_components():\n install_components(DEFAULT_SERVICE_PORTS.keys())\n\n\n# -----------------\n# HELPER FUNCTIONS\n# -----------------\n\n\ndef is_alpine():\n try:\n run('cat /etc/issue | grep Alpine', print_error=False)\n return True\n except Exception:\n return False\n\n\ndef download_and_extract_with_retry(archive_url, tmp_archive, target_dir):\n\n def download_and_extract():\n if not os.path.exists(tmp_archive):\n download(archive_url, tmp_archive)\n unzip(tmp_archive, target_dir)\n\n try:\n download_and_extract()\n except Exception:\n # try deleting and re-downloading the zip file\n LOGGER.info('Unable to extract file, re-downloading ZIP archive: %s' % tmp_archive)\n rm_rf(tmp_archive)\n download_and_extract()\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) > 1:\n if sys.argv[1] == 'libs':\n print('Initializing installation.')\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('requests').setLevel(logging.WARNING)\n install_all_components()\n print('Done.')\n elif sys.argv[1] == 'testlibs':\n # Install additional libraries for testing\n install_amazon_kinesis_client_libs()\n", "path": "localstack/services/install.py"}]} | 3,922 | 591 |
gh_patches_debug_19160 | rasdani/github-patches | git_diff | marshmallow-code__webargs-368 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: module 'typing' has no attribute 'NoReturn' with Python 3.5.3
I get this error when running the tests with Python 3.5.3.
```
tests/test_py3/test_aiohttpparser_async_functions.py:6: in <module>
from webargs.aiohttpparser import parser, use_args, use_kwargs
webargs/aiohttpparser.py:72: in <module>
class AIOHTTPParser(AsyncParser):
webargs/aiohttpparser.py:148: in AIOHTTPParser
) -> typing.NoReturn:
E AttributeError: module 'typing' has no attribute 'NoReturn'
```
The docs say [`typing.NoReturn`](https://docs.python.org/3/library/typing.html#typing.NoReturn) was added in 3.6.5. However, [the tests pass on Travis](https://travis-ci.org/marshmallow-code/webargs/jobs/486701760) with Python 3.5.6.
</issue>
<code>
[start of webargs/aiohttpparser.py]
1 """aiohttp request argument parsing module.
2
3 Example: ::
4
5 import asyncio
6 from aiohttp import web
7
8 from webargs import fields
9 from webargs.aiohttpparser import use_args
10
11
12 hello_args = {
13 'name': fields.Str(required=True)
14 }
15 @asyncio.coroutine
16 @use_args(hello_args)
17 def index(request, args):
18 return web.Response(
19 body='Hello {}'.format(args['name']).encode('utf-8')
20 )
21
22 app = web.Application()
23 app.router.add_route('GET', '/', index)
24 """
25 import typing
26
27 from aiohttp import web
28 from aiohttp.web import Request
29 from aiohttp import web_exceptions
30 from marshmallow import Schema, ValidationError
31 from marshmallow.fields import Field
32
33 from webargs import core
34 from webargs.core import json
35 from webargs.asyncparser import AsyncParser
36
37
38 def is_json_request(req: Request) -> bool:
39 content_type = req.content_type
40 return core.is_json(content_type)
41
42
43 class HTTPUnprocessableEntity(web.HTTPClientError):
44 status_code = 422
45
46
47 # Mapping of status codes to exception classes
48 # Adapted from werkzeug
49 exception_map = {422: HTTPUnprocessableEntity}
50
51
52 def _find_exceptions() -> None:
53 for name in web_exceptions.__all__:
54 obj = getattr(web_exceptions, name)
55 try:
56 is_http_exception = issubclass(obj, web_exceptions.HTTPException)
57 except TypeError:
58 is_http_exception = False
59 if not is_http_exception or obj.status_code is None:
60 continue
61 old_obj = exception_map.get(obj.status_code, None)
62 if old_obj is not None and issubclass(obj, old_obj):
63 continue
64 exception_map[obj.status_code] = obj
65
66
67 # Collect all exceptions from aiohttp.web_exceptions
68 _find_exceptions()
69 del _find_exceptions
70
71
72 class AIOHTTPParser(AsyncParser):
73 """aiohttp request argument parser."""
74
75 __location_map__ = dict(
76 match_info="parse_match_info", **core.Parser.__location_map__
77 )
78
79 def parse_querystring(self, req: Request, name: str, field: Field) -> typing.Any:
80 """Pull a querystring value from the request."""
81 return core.get_value(req.query, name, field)
82
83 async def parse_form(self, req: Request, name: str, field: Field) -> typing.Any:
84 """Pull a form value from the request."""
85 post_data = self._cache.get("post")
86 if post_data is None:
87 self._cache["post"] = await req.post()
88 return core.get_value(self._cache["post"], name, field)
89
90 async def parse_json(self, req: Request, name: str, field: Field) -> typing.Any:
91 """Pull a json value from the request."""
92 json_data = self._cache.get("json")
93 if json_data is None:
94 if not (req.body_exists and is_json_request(req)):
95 return core.missing
96 try:
97 json_data = await req.json(loads=json.loads)
98 except json.JSONDecodeError as e:
99 if e.doc == "":
100 return core.missing
101 else:
102 return self.handle_invalid_json_error(e, req)
103 self._cache["json"] = json_data
104 return core.get_value(json_data, name, field, allow_many_nested=True)
105
106 def parse_headers(self, req: Request, name: str, field: Field) -> typing.Any:
107 """Pull a value from the header data."""
108 return core.get_value(req.headers, name, field)
109
110 def parse_cookies(self, req: Request, name: str, field: Field) -> typing.Any:
111 """Pull a value from the cookiejar."""
112 return core.get_value(req.cookies, name, field)
113
114 def parse_files(self, req: Request, name: str, field: Field) -> None:
115 raise NotImplementedError(
116 "parse_files is not implemented. You may be able to use parse_form for "
117 "parsing upload data."
118 )
119
120 def parse_match_info(self, req: Request, name: str, field: Field) -> typing.Any:
121 """Pull a value from the request's ``match_info``."""
122 return core.get_value(req.match_info, name, field)
123
124 def get_request_from_view_args(
125 self, view: typing.Callable, args: typing.Iterable, kwargs: typing.Mapping
126 ) -> Request:
127 """Get request object from a handler function or method. Used internally by
128 ``use_args`` and ``use_kwargs``.
129 """
130 req = None
131 for arg in args:
132 if isinstance(arg, web.Request):
133 req = arg
134 break
135 elif isinstance(arg, web.View):
136 req = arg.request
137 break
138 assert isinstance(req, web.Request), "Request argument not found for handler"
139 return req
140
141 def handle_error(
142 self,
143 error: ValidationError,
144 req: Request,
145 schema: Schema,
146 error_status_code: typing.Union[int, None] = None,
147 error_headers: typing.Union[typing.Mapping[str, str], None] = None,
148 ) -> typing.NoReturn:
149 """Handle ValidationErrors and return a JSON response of error messages
150 to the client.
151 """
152 error_class = exception_map.get(
153 error_status_code or self.DEFAULT_VALIDATION_STATUS
154 )
155 if not error_class:
156 raise LookupError("No exception for {0}".format(error_status_code))
157 headers = error_headers
158 raise error_class(
159 body=json.dumps(error.messages).encode("utf-8"),
160 headers=headers,
161 content_type="application/json",
162 )
163
164 def handle_invalid_json_error(
165 self, error: json.JSONDecodeError, req: Request, *args, **kwargs
166 ) -> typing.NoReturn:
167 error_class = exception_map[400]
168 messages = {"json": ["Invalid JSON body."]}
169 raise error_class(
170 body=json.dumps(messages).encode("utf-8"), content_type="application/json"
171 )
172
173
174 parser = AIOHTTPParser()
175 use_args = parser.use_args # type: typing.Callable
176 use_kwargs = parser.use_kwargs # type: typing.Callable
177
[end of webargs/aiohttpparser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/webargs/aiohttpparser.py b/webargs/aiohttpparser.py
--- a/webargs/aiohttpparser.py
+++ b/webargs/aiohttpparser.py
@@ -145,7 +145,7 @@
schema: Schema,
error_status_code: typing.Union[int, None] = None,
error_headers: typing.Union[typing.Mapping[str, str], None] = None,
- ) -> typing.NoReturn:
+ ) -> "typing.NoReturn":
"""Handle ValidationErrors and return a JSON response of error messages
to the client.
"""
@@ -163,7 +163,7 @@
def handle_invalid_json_error(
self, error: json.JSONDecodeError, req: Request, *args, **kwargs
- ) -> typing.NoReturn:
+ ) -> "typing.NoReturn":
error_class = exception_map[400]
messages = {"json": ["Invalid JSON body."]}
raise error_class(
| {"golden_diff": "diff --git a/webargs/aiohttpparser.py b/webargs/aiohttpparser.py\n--- a/webargs/aiohttpparser.py\n+++ b/webargs/aiohttpparser.py\n@@ -145,7 +145,7 @@\n schema: Schema,\n error_status_code: typing.Union[int, None] = None,\n error_headers: typing.Union[typing.Mapping[str, str], None] = None,\n- ) -> typing.NoReturn:\n+ ) -> \"typing.NoReturn\":\n \"\"\"Handle ValidationErrors and return a JSON response of error messages\n to the client.\n \"\"\"\n@@ -163,7 +163,7 @@\n \n def handle_invalid_json_error(\n self, error: json.JSONDecodeError, req: Request, *args, **kwargs\n- ) -> typing.NoReturn:\n+ ) -> \"typing.NoReturn\":\n error_class = exception_map[400]\n messages = {\"json\": [\"Invalid JSON body.\"]}\n raise error_class(\n", "issue": "AttributeError: module 'typing' has no attribute 'NoReturn' with Python 3.5.3\nI get this error when running the tests with Python 3.5.3.\r\n\r\n```\r\ntests/test_py3/test_aiohttpparser_async_functions.py:6: in <module>\r\n from webargs.aiohttpparser import parser, use_args, use_kwargs\r\nwebargs/aiohttpparser.py:72: in <module>\r\n class AIOHTTPParser(AsyncParser):\r\nwebargs/aiohttpparser.py:148: in AIOHTTPParser\r\n ) -> typing.NoReturn:\r\nE AttributeError: module 'typing' has no attribute 'NoReturn'\r\n```\r\n\r\nThe docs say [`typing.NoReturn`](https://docs.python.org/3/library/typing.html#typing.NoReturn) was added in 3.6.5. However, [the tests pass on Travis](https://travis-ci.org/marshmallow-code/webargs/jobs/486701760) with Python 3.5.6.\n", "before_files": [{"content": "\"\"\"aiohttp request argument parsing module.\n\nExample: ::\n\n import asyncio\n from aiohttp import web\n\n from webargs import fields\n from webargs.aiohttpparser import use_args\n\n\n hello_args = {\n 'name': fields.Str(required=True)\n }\n @asyncio.coroutine\n @use_args(hello_args)\n def index(request, args):\n return web.Response(\n body='Hello {}'.format(args['name']).encode('utf-8')\n )\n\n app = web.Application()\n app.router.add_route('GET', '/', index)\n\"\"\"\nimport typing\n\nfrom aiohttp import web\nfrom aiohttp.web import Request\nfrom aiohttp import web_exceptions\nfrom marshmallow import Schema, ValidationError\nfrom marshmallow.fields import Field\n\nfrom webargs import core\nfrom webargs.core import json\nfrom webargs.asyncparser import AsyncParser\n\n\ndef is_json_request(req: Request) -> bool:\n content_type = req.content_type\n return core.is_json(content_type)\n\n\nclass HTTPUnprocessableEntity(web.HTTPClientError):\n status_code = 422\n\n\n# Mapping of status codes to exception classes\n# Adapted from werkzeug\nexception_map = {422: HTTPUnprocessableEntity}\n\n\ndef _find_exceptions() -> None:\n for name in web_exceptions.__all__:\n obj = getattr(web_exceptions, name)\n try:\n is_http_exception = issubclass(obj, web_exceptions.HTTPException)\n except TypeError:\n is_http_exception = False\n if not is_http_exception or obj.status_code is None:\n continue\n old_obj = exception_map.get(obj.status_code, None)\n if old_obj is not None and issubclass(obj, old_obj):\n continue\n exception_map[obj.status_code] = obj\n\n\n# Collect all exceptions from aiohttp.web_exceptions\n_find_exceptions()\ndel _find_exceptions\n\n\nclass AIOHTTPParser(AsyncParser):\n \"\"\"aiohttp request argument parser.\"\"\"\n\n __location_map__ = dict(\n match_info=\"parse_match_info\", **core.Parser.__location_map__\n )\n\n def parse_querystring(self, req: Request, name: str, field: Field) -> typing.Any:\n \"\"\"Pull a querystring value from the request.\"\"\"\n return core.get_value(req.query, name, field)\n\n async def parse_form(self, req: Request, name: str, field: Field) -> typing.Any:\n \"\"\"Pull a form value from the request.\"\"\"\n post_data = self._cache.get(\"post\")\n if post_data is None:\n self._cache[\"post\"] = await req.post()\n return core.get_value(self._cache[\"post\"], name, field)\n\n async def parse_json(self, req: Request, name: str, field: Field) -> typing.Any:\n \"\"\"Pull a json value from the request.\"\"\"\n json_data = self._cache.get(\"json\")\n if json_data is None:\n if not (req.body_exists and is_json_request(req)):\n return core.missing\n try:\n json_data = await req.json(loads=json.loads)\n except json.JSONDecodeError as e:\n if e.doc == \"\":\n return core.missing\n else:\n return self.handle_invalid_json_error(e, req)\n self._cache[\"json\"] = json_data\n return core.get_value(json_data, name, field, allow_many_nested=True)\n\n def parse_headers(self, req: Request, name: str, field: Field) -> typing.Any:\n \"\"\"Pull a value from the header data.\"\"\"\n return core.get_value(req.headers, name, field)\n\n def parse_cookies(self, req: Request, name: str, field: Field) -> typing.Any:\n \"\"\"Pull a value from the cookiejar.\"\"\"\n return core.get_value(req.cookies, name, field)\n\n def parse_files(self, req: Request, name: str, field: Field) -> None:\n raise NotImplementedError(\n \"parse_files is not implemented. You may be able to use parse_form for \"\n \"parsing upload data.\"\n )\n\n def parse_match_info(self, req: Request, name: str, field: Field) -> typing.Any:\n \"\"\"Pull a value from the request's ``match_info``.\"\"\"\n return core.get_value(req.match_info, name, field)\n\n def get_request_from_view_args(\n self, view: typing.Callable, args: typing.Iterable, kwargs: typing.Mapping\n ) -> Request:\n \"\"\"Get request object from a handler function or method. Used internally by\n ``use_args`` and ``use_kwargs``.\n \"\"\"\n req = None\n for arg in args:\n if isinstance(arg, web.Request):\n req = arg\n break\n elif isinstance(arg, web.View):\n req = arg.request\n break\n assert isinstance(req, web.Request), \"Request argument not found for handler\"\n return req\n\n def handle_error(\n self,\n error: ValidationError,\n req: Request,\n schema: Schema,\n error_status_code: typing.Union[int, None] = None,\n error_headers: typing.Union[typing.Mapping[str, str], None] = None,\n ) -> typing.NoReturn:\n \"\"\"Handle ValidationErrors and return a JSON response of error messages\n to the client.\n \"\"\"\n error_class = exception_map.get(\n error_status_code or self.DEFAULT_VALIDATION_STATUS\n )\n if not error_class:\n raise LookupError(\"No exception for {0}\".format(error_status_code))\n headers = error_headers\n raise error_class(\n body=json.dumps(error.messages).encode(\"utf-8\"),\n headers=headers,\n content_type=\"application/json\",\n )\n\n def handle_invalid_json_error(\n self, error: json.JSONDecodeError, req: Request, *args, **kwargs\n ) -> typing.NoReturn:\n error_class = exception_map[400]\n messages = {\"json\": [\"Invalid JSON body.\"]}\n raise error_class(\n body=json.dumps(messages).encode(\"utf-8\"), content_type=\"application/json\"\n )\n\n\nparser = AIOHTTPParser()\nuse_args = parser.use_args # type: typing.Callable\nuse_kwargs = parser.use_kwargs # type: typing.Callable\n", "path": "webargs/aiohttpparser.py"}]} | 2,525 | 222 |
gh_patches_debug_3699 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-2982 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PasswordResetView::getErrors function never called.
## BUG
<!--
Read https://plone.org/support/bugs first!
Please use the labels at Github, at least one of the types: bug, regression, question, enhancement.
Please include tracebacks, screenshots, code of debugging sessions or code that reproduces the issue if possible.
The best reproductions are in plain Plone installations without addons or at least with minimal needed addons installed.
-->
### What I did:
I am trying to reset the password, using a normal Plone user and I made a PDB inside https://github.com/plone/Products.CMFPlone/blob/master/Products/CMFPlone/browser/login/password_reset.py#L159
URL: {site url}/passwordreset/e3127df738bc41e1976cc36cc9832132?userid=local_manager
### What I expect to happen:
I expected a call `RegistrationTool.testPasswordValidity(password, password2)
` as I have some business logic inside testPasswordValidity but I saw code never coming here
### What actually happened:
As I see inside ''getErrors'' method, there is a call to registration tool testPasswordValidity method but the ''getErrors'' never called.
### What version of Plone/ Addons I am using:
Plone 5.2.5rc
</issue>
<code>
[start of Products/CMFPlone/browser/login/password_reset.py]
1 # -*- coding: utf-8 -*-
2 from AccessControl.SecurityManagement import getSecurityManager
3 from email.header import Header
4 from plone.app.layout.navigation.interfaces import INavigationRoot
5 from plone.memoize import view
6 from plone.registry.interfaces import IRegistry
7 from Products.CMFCore.utils import getToolByName
8 from Products.CMFPlone import PloneMessageFactory as _
9 from Products.CMFPlone.interfaces import IPasswordResetToolView
10 from Products.CMFPlone.interfaces.controlpanel import IMailSchema
11 from Products.CMFPlone.PasswordResetTool import ExpiredRequestError
12 from Products.CMFPlone.PasswordResetTool import InvalidRequestError
13 from Products.CMFPlone.utils import safe_unicode
14 from Products.CMFPlone.utils import safeToInt
15 from Products.Five import BrowserView
16 from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
17 from Products.PlonePAS.events import UserInitialLoginInEvent
18 from Products.PlonePAS.events import UserLoggedInEvent
19 from Products.PluggableAuthService.interfaces.plugins import ICredentialsUpdatePlugin # noqa
20 from Products.statusmessages.interfaces import IStatusMessage
21 from zope.component import getMultiAdapter
22 from zope.component import getUtility
23 from zope.event import notify
24 from zope.i18n import translate
25 from zope.interface import implementer
26 from zope.publisher.interfaces import IPublishTraverse
27
28
29 @implementer(IPasswordResetToolView)
30 class PasswordResetToolView(BrowserView):
31
32 @view.memoize_contextless
33 def portal_state(self):
34 """ return portal_state of plone.app.layout
35 """
36 return getMultiAdapter((self.context, self.request),
37 name=u"plone_portal_state")
38
39 def encode_mail_header(self, text):
40 """ Encodes text into correctly encoded email header """
41 return Header(safe_unicode(text), 'utf-8')
42
43 def encoded_mail_sender(self):
44 """ returns encoded version of Portal name <portal_email> """
45 registry = getUtility(IRegistry)
46 mail_settings = registry.forInterface(IMailSchema, prefix="plone")
47 from_ = mail_settings.email_from_name
48 mail = mail_settings.email_from_address
49 return '"%s" <%s>' % (self.encode_mail_header(from_).encode(), mail)
50
51 def registered_notify_subject(self):
52 portal_name = self.portal_state().portal_title()
53 return translate(
54 _(
55 u'mailtemplate_user_account_info',
56 default=u'User Account Information for ${portal_name}',
57 mapping={'portal_name': safe_unicode(portal_name)},
58 ),
59 context=self.request,
60 )
61
62 def mail_password_subject(self):
63 return translate(
64 _(
65 u'mailtemplate_subject_resetpasswordrequest',
66 default=u'Password reset request',
67 ),
68 context=self.request,
69 )
70
71 def construct_url(self, randomstring):
72 return '%s/passwordreset/%s' % (
73 self.portal_state().navigation_root_url(), randomstring)
74
75 def expiration_timeout(self):
76 pw_tool = getToolByName(self.context, 'portal_password_reset')
77 timeout = int(pw_tool.getExpirationTimeout() or 0)
78 return timeout * 24 # timeout is in days, but templates want in hours.
79
80
81 @implementer(IPublishTraverse)
82 class PasswordResetView(BrowserView):
83 """ """
84
85 invalid = ViewPageTemplateFile('templates/pwreset_invalid.pt')
86 expired = ViewPageTemplateFile('templates/pwreset_expired.pt')
87 finish = ViewPageTemplateFile('templates/pwreset_finish.pt')
88 form = ViewPageTemplateFile('templates/pwreset_form.pt')
89 subpath = None
90
91 def _auto_login(self, userid, password):
92 aclu = getToolByName(self.context, 'acl_users')
93 for name, plugin in aclu.plugins.listPlugins(ICredentialsUpdatePlugin):
94 plugin.updateCredentials(
95 self.request,
96 self.request.response,
97 userid,
98 password
99 )
100 user = getSecurityManager().getUser()
101 login_time = user.getProperty('login_time', None)
102 if login_time is None:
103 notify(UserInitialLoginInEvent(user))
104 else:
105 notify(UserLoggedInEvent(user))
106
107 IStatusMessage(self.request).addStatusMessage(
108 _(
109 'password_reset_successful',
110 default='Password reset successful, '
111 'you are logged in now!',
112 ),
113 'info',
114 )
115 url = INavigationRoot(self.context).absolute_url()
116 self.request.response.redirect(url)
117 return
118
119 def _reset_password(self, pw_tool, randomstring):
120 userid = self.request.form.get('userid')
121 password = self.request.form.get('password')
122 try:
123 pw_tool.resetPassword(userid, randomstring, password)
124 except ExpiredRequestError:
125 return self.expired()
126 except InvalidRequestError:
127 return self.invalid()
128 except RuntimeError:
129 return self.invalid()
130 registry = getUtility(IRegistry)
131 if registry.get('plone.autologin_after_password_reset', False):
132 return self._auto_login(userid, password)
133 return self.finish()
134
135 def __call__(self):
136 if self.subpath:
137 # Try traverse subpath first:
138 randomstring = self.subpath[0]
139 else:
140 randomstring = self.request.get('key', None)
141
142 pw_tool = getToolByName(self.context, 'portal_password_reset')
143 if self.request.method == 'POST':
144 return self._reset_password(pw_tool, randomstring)
145 try:
146 pw_tool.verifyKey(randomstring)
147 except InvalidRequestError:
148 return self.invalid()
149 except ExpiredRequestError:
150 return self.expired()
151 return self.form()
152
153 def publishTraverse(self, request, name):
154 if self.subpath is None:
155 self.subpath = []
156 self.subpath.append(name)
157 return self
158
159 def getErrors(self):
160 if self.request.method != 'POST':
161 return
162 password = self.request.form.get('password')
163 password2 = self.request.form.get('password2')
164 userid = self.request.form.get('userid')
165 reg_tool = getToolByName(self.context, 'portal_registration')
166 pw_fail = reg_tool.testPasswordValidity(password, password2)
167 state = {}
168 if pw_fail:
169 state['password'] = pw_fail
170
171 # Determine if we're checking userids or not
172 pw_tool = getToolByName(self.context, 'portal_password_reset')
173 if not pw_tool.checkUser():
174 return state
175
176 if not userid:
177 state['userid'] = _(
178 'This field is required, please provide some information.',
179 )
180 if state:
181 state['status'] = 'failure'
182 state['portal_status_message'] = _(
183 'Please correct the indicated errors.',
184 )
185 return state
186
187 def login_url(self):
188 portal_state = getMultiAdapter((self.context, self.request),
189 name=u"plone_portal_state")
190 return '{0}/login?__ac_name={1}'.format(
191 portal_state.navigation_root_url(),
192 self.request.form.get('userid', ''))
193
194 def expiration_timeout(self):
195 pw_tool = getToolByName(self.context, 'portal_password_reset')
196 timeout = int(pw_tool.getExpirationTimeout() or 0)
197 return timeout * 24 # timeout is in days, but templates want in hours.
198
199
200 class ExplainPWResetToolView(BrowserView):
201 """ """
202
203 def timeout_days(self):
204 return self.context.getExpirationTimeout()
205
206 def user_check(self):
207 return self.context._user_check and 'checked' or None
208
209 @property
210 def stats(self):
211 """Return a dictionary like so:
212 {"open":3, "expired":0}
213 about the number of open and expired reset requests.
214 """
215 # count expired reset requests by creating a list of it
216 bad = len([1 for expiry in self.context._requests.values()
217 if self.context.expired(expiry)])
218 # open reset requests are all requests without the expired ones
219 good = len(self.context._requests) - bad
220 return {"open": good, "expired": bad}
221
222 def __call__(self):
223 if self.request.method == 'POST':
224 timeout_days = safeToInt(self.request.get('timeout_days'), 7)
225 self.context.setExpirationTimeout(timeout_days)
226 self.context._user_check = bool(
227 self.request.get('user_check', False),
228 )
229 return self.index()
230
[end of Products/CMFPlone/browser/login/password_reset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Products/CMFPlone/browser/login/password_reset.py b/Products/CMFPlone/browser/login/password_reset.py
--- a/Products/CMFPlone/browser/login/password_reset.py
+++ b/Products/CMFPlone/browser/login/password_reset.py
@@ -117,6 +117,9 @@
return
def _reset_password(self, pw_tool, randomstring):
+ state = self.getErrors()
+ if state:
+ return self.form()
userid = self.request.form.get('userid')
password = self.request.form.get('password')
try:
| {"golden_diff": "diff --git a/Products/CMFPlone/browser/login/password_reset.py b/Products/CMFPlone/browser/login/password_reset.py\n--- a/Products/CMFPlone/browser/login/password_reset.py\n+++ b/Products/CMFPlone/browser/login/password_reset.py\n@@ -117,6 +117,9 @@\n return\n \n def _reset_password(self, pw_tool, randomstring):\n+ state = self.getErrors()\n+ if state:\n+ return self.form()\n userid = self.request.form.get('userid')\n password = self.request.form.get('password')\n try:\n", "issue": "PasswordResetView::getErrors function never called.\n## BUG\r\n\r\n<!--\r\n\r\nRead https://plone.org/support/bugs first!\r\n\r\nPlease use the labels at Github, at least one of the types: bug, regression, question, enhancement.\r\n\r\nPlease include tracebacks, screenshots, code of debugging sessions or code that reproduces the issue if possible.\r\nThe best reproductions are in plain Plone installations without addons or at least with minimal needed addons installed.\r\n\r\n-->\r\n\r\n### What I did:\r\nI am trying to reset the password, using a normal Plone user and I made a PDB inside https://github.com/plone/Products.CMFPlone/blob/master/Products/CMFPlone/browser/login/password_reset.py#L159\r\n\r\nURL: {site url}/passwordreset/e3127df738bc41e1976cc36cc9832132?userid=local_manager\r\n\r\n### What I expect to happen:\r\nI expected a call `RegistrationTool.testPasswordValidity(password, password2)\r\n` as I have some business logic inside testPasswordValidity but I saw code never coming here\r\n\r\n### What actually happened:\r\nAs I see inside ''getErrors'' method, there is a call to registration tool testPasswordValidity method but the ''getErrors'' never called.\r\n### What version of Plone/ Addons I am using:\r\nPlone 5.2.5rc\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl.SecurityManagement import getSecurityManager\nfrom email.header import Header\nfrom plone.app.layout.navigation.interfaces import INavigationRoot\nfrom plone.memoize import view\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces import IPasswordResetToolView\nfrom Products.CMFPlone.interfaces.controlpanel import IMailSchema\nfrom Products.CMFPlone.PasswordResetTool import ExpiredRequestError\nfrom Products.CMFPlone.PasswordResetTool import InvalidRequestError\nfrom Products.CMFPlone.utils import safe_unicode\nfrom Products.CMFPlone.utils import safeToInt\nfrom Products.Five import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.PlonePAS.events import UserInitialLoginInEvent\nfrom Products.PlonePAS.events import UserLoggedInEvent\nfrom Products.PluggableAuthService.interfaces.plugins import ICredentialsUpdatePlugin # noqa\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtility\nfrom zope.event import notify\nfrom zope.i18n import translate\nfrom zope.interface import implementer\nfrom zope.publisher.interfaces import IPublishTraverse\n\n\n@implementer(IPasswordResetToolView)\nclass PasswordResetToolView(BrowserView):\n\n @view.memoize_contextless\n def portal_state(self):\n \"\"\" return portal_state of plone.app.layout\n \"\"\"\n return getMultiAdapter((self.context, self.request),\n name=u\"plone_portal_state\")\n\n def encode_mail_header(self, text):\n \"\"\" Encodes text into correctly encoded email header \"\"\"\n return Header(safe_unicode(text), 'utf-8')\n\n def encoded_mail_sender(self):\n \"\"\" returns encoded version of Portal name <portal_email> \"\"\"\n registry = getUtility(IRegistry)\n mail_settings = registry.forInterface(IMailSchema, prefix=\"plone\")\n from_ = mail_settings.email_from_name\n mail = mail_settings.email_from_address\n return '\"%s\" <%s>' % (self.encode_mail_header(from_).encode(), mail)\n\n def registered_notify_subject(self):\n portal_name = self.portal_state().portal_title()\n return translate(\n _(\n u'mailtemplate_user_account_info',\n default=u'User Account Information for ${portal_name}',\n mapping={'portal_name': safe_unicode(portal_name)},\n ),\n context=self.request,\n )\n\n def mail_password_subject(self):\n return translate(\n _(\n u'mailtemplate_subject_resetpasswordrequest',\n default=u'Password reset request',\n ),\n context=self.request,\n )\n\n def construct_url(self, randomstring):\n return '%s/passwordreset/%s' % (\n self.portal_state().navigation_root_url(), randomstring)\n\n def expiration_timeout(self):\n pw_tool = getToolByName(self.context, 'portal_password_reset')\n timeout = int(pw_tool.getExpirationTimeout() or 0)\n return timeout * 24 # timeout is in days, but templates want in hours.\n\n\n@implementer(IPublishTraverse)\nclass PasswordResetView(BrowserView):\n \"\"\" \"\"\"\n\n invalid = ViewPageTemplateFile('templates/pwreset_invalid.pt')\n expired = ViewPageTemplateFile('templates/pwreset_expired.pt')\n finish = ViewPageTemplateFile('templates/pwreset_finish.pt')\n form = ViewPageTemplateFile('templates/pwreset_form.pt')\n subpath = None\n\n def _auto_login(self, userid, password):\n aclu = getToolByName(self.context, 'acl_users')\n for name, plugin in aclu.plugins.listPlugins(ICredentialsUpdatePlugin):\n plugin.updateCredentials(\n self.request,\n self.request.response,\n userid,\n password\n )\n user = getSecurityManager().getUser()\n login_time = user.getProperty('login_time', None)\n if login_time is None:\n notify(UserInitialLoginInEvent(user))\n else:\n notify(UserLoggedInEvent(user))\n\n IStatusMessage(self.request).addStatusMessage(\n _(\n 'password_reset_successful',\n default='Password reset successful, '\n 'you are logged in now!',\n ),\n 'info',\n )\n url = INavigationRoot(self.context).absolute_url()\n self.request.response.redirect(url)\n return\n\n def _reset_password(self, pw_tool, randomstring):\n userid = self.request.form.get('userid')\n password = self.request.form.get('password')\n try:\n pw_tool.resetPassword(userid, randomstring, password)\n except ExpiredRequestError:\n return self.expired()\n except InvalidRequestError:\n return self.invalid()\n except RuntimeError:\n return self.invalid()\n registry = getUtility(IRegistry)\n if registry.get('plone.autologin_after_password_reset', False):\n return self._auto_login(userid, password)\n return self.finish()\n\n def __call__(self):\n if self.subpath:\n # Try traverse subpath first:\n randomstring = self.subpath[0]\n else:\n randomstring = self.request.get('key', None)\n\n pw_tool = getToolByName(self.context, 'portal_password_reset')\n if self.request.method == 'POST':\n return self._reset_password(pw_tool, randomstring)\n try:\n pw_tool.verifyKey(randomstring)\n except InvalidRequestError:\n return self.invalid()\n except ExpiredRequestError:\n return self.expired()\n return self.form()\n\n def publishTraverse(self, request, name):\n if self.subpath is None:\n self.subpath = []\n self.subpath.append(name)\n return self\n\n def getErrors(self):\n if self.request.method != 'POST':\n return\n password = self.request.form.get('password')\n password2 = self.request.form.get('password2')\n userid = self.request.form.get('userid')\n reg_tool = getToolByName(self.context, 'portal_registration')\n pw_fail = reg_tool.testPasswordValidity(password, password2)\n state = {}\n if pw_fail:\n state['password'] = pw_fail\n\n # Determine if we're checking userids or not\n pw_tool = getToolByName(self.context, 'portal_password_reset')\n if not pw_tool.checkUser():\n return state\n\n if not userid:\n state['userid'] = _(\n 'This field is required, please provide some information.',\n )\n if state:\n state['status'] = 'failure'\n state['portal_status_message'] = _(\n 'Please correct the indicated errors.',\n )\n return state\n\n def login_url(self):\n portal_state = getMultiAdapter((self.context, self.request),\n name=u\"plone_portal_state\")\n return '{0}/login?__ac_name={1}'.format(\n portal_state.navigation_root_url(),\n self.request.form.get('userid', ''))\n\n def expiration_timeout(self):\n pw_tool = getToolByName(self.context, 'portal_password_reset')\n timeout = int(pw_tool.getExpirationTimeout() or 0)\n return timeout * 24 # timeout is in days, but templates want in hours.\n\n\nclass ExplainPWResetToolView(BrowserView):\n \"\"\" \"\"\"\n\n def timeout_days(self):\n return self.context.getExpirationTimeout()\n\n def user_check(self):\n return self.context._user_check and 'checked' or None\n\n @property\n def stats(self):\n \"\"\"Return a dictionary like so:\n {\"open\":3, \"expired\":0}\n about the number of open and expired reset requests.\n \"\"\"\n # count expired reset requests by creating a list of it\n bad = len([1 for expiry in self.context._requests.values()\n if self.context.expired(expiry)])\n # open reset requests are all requests without the expired ones\n good = len(self.context._requests) - bad\n return {\"open\": good, \"expired\": bad}\n\n def __call__(self):\n if self.request.method == 'POST':\n timeout_days = safeToInt(self.request.get('timeout_days'), 7)\n self.context.setExpirationTimeout(timeout_days)\n self.context._user_check = bool(\n self.request.get('user_check', False),\n )\n return self.index()\n", "path": "Products/CMFPlone/browser/login/password_reset.py"}]} | 3,203 | 132 |
gh_patches_debug_6782 | rasdani/github-patches | git_diff | learningequality__kolibri-1761 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The mastery completion sign updates only after a page refresh and not real time.
## Summary
A learner had completed and came out of the exercise and found the green completed tick did not get updated real time, but after refreshing the page the completed tick appeared.
## System information
- Version: Kolibri 0.4.0beta10
- Operating system: Ubuntu 14.04 LTS
- Browser: Chrome
## How to reproduce
1. Attempt an exercise or master it.
2. Come out of the exercise.
3. The completed or In progress stamp is not updated real time.
## Screenshots
Learner has mastered the topic.

He exited the exercise and the completed sign on the thumbnail is not update:

But on refreshing the page the thumbnail has the completed sign.
</issue>
<code>
[start of kolibri/auth/backends.py]
1 """
2 Implements custom auth backends as described in the Django docs, for our custom user classes -- FacilityUser and
3 DeviceOwner. The appropriate classes should be listed in the AUTHENTICATION_BACKENDS. Note that authentication
4 backends are checked in the order they're listed.
5 """
6
7 from kolibri.auth.models import DeviceOwner, FacilityUser
8
9
10 class FacilityUserBackend(object):
11 """
12 A class that implements authentication for FacilityUsers.
13 """
14
15 def authenticate(self, username=None, password=None, facility=None):
16 """
17 Authenticates the user if the credentials correspond to a FacilityUser for the specified Facility.
18
19 :param username: a string
20 :param password: a string
21 :param facility: a Facility
22 :return: A FacilityUser instance if successful, or None if authentication failed.
23 """
24 users = FacilityUser.objects.filter(username=username)
25 if facility:
26 users = users.filter(facility=facility)
27 for user in users:
28 if user.check_password(password):
29 return user
30 # Allow login without password for learners for facilities that allow this.
31 # Must specify the facility, to prevent accidental logins
32 elif facility and user.dataset.learner_can_login_with_no_password and not user.roles.count():
33 return user
34 return None
35
36 def get_user(self, user_id):
37 """
38 Gets a user. Auth backends are required to implement this.
39
40 :param user_id: A FacilityUser pk
41 :return: A FacilityUser instance if a BaseUser with that pk is found, else None.
42 """
43 try:
44 return FacilityUser.objects.get(pk=user_id)
45 except FacilityUser.DoesNotExist:
46 return None
47
48
49 class DeviceOwnerBackend(object):
50 """
51 A class that implements authentication for DeviceOwners.
52 """
53
54 def authenticate(self, username=None, password=None, **kwargs):
55 """
56 Authenticates the user if the credentials correspond to a DeviceOwner.
57
58 :param username: a string
59 :param password: a string
60 :return: A DeviceOwner instance if successful, or None if authentication failed.
61 """
62 try:
63 user = DeviceOwner.objects.get(username=username)
64 if user.check_password(password):
65 return user
66 else:
67 return None
68 except DeviceOwner.DoesNotExist:
69 return None
70
71 def get_user(self, user_id):
72 """
73 Gets a user. Auth backends are required to implement this.
74
75 :param user_id: A BaseUser pk
76 :return: A DeviceOwner instance if a BaseUser with that pk is found, else None.
77 """
78 try:
79 return DeviceOwner.objects.get(pk=user_id)
80 except DeviceOwner.DoesNotExist:
81 return None
82
[end of kolibri/auth/backends.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/auth/backends.py b/kolibri/auth/backends.py
--- a/kolibri/auth/backends.py
+++ b/kolibri/auth/backends.py
@@ -21,7 +21,7 @@
:param facility: a Facility
:return: A FacilityUser instance if successful, or None if authentication failed.
"""
- users = FacilityUser.objects.filter(username=username)
+ users = FacilityUser.objects.filter(username__iexact=username)
if facility:
users = users.filter(facility=facility)
for user in users:
| {"golden_diff": "diff --git a/kolibri/auth/backends.py b/kolibri/auth/backends.py\n--- a/kolibri/auth/backends.py\n+++ b/kolibri/auth/backends.py\n@@ -21,7 +21,7 @@\n :param facility: a Facility\n :return: A FacilityUser instance if successful, or None if authentication failed.\n \"\"\"\n- users = FacilityUser.objects.filter(username=username)\n+ users = FacilityUser.objects.filter(username__iexact=username)\n if facility:\n users = users.filter(facility=facility)\n for user in users:\n", "issue": "The mastery completion sign updates only after a page refresh and not real time.\n## Summary\r\n\r\nA learner had completed and came out of the exercise and found the green completed tick did not get updated real time, but after refreshing the page the completed tick appeared. \r\n\r\n## System information\r\n - Version: Kolibri 0.4.0beta10\r\n - Operating system: Ubuntu 14.04 LTS\r\n - Browser: Chrome\r\n\r\n\r\n## How to reproduce\r\n1. Attempt an exercise or master it.\r\n2. Come out of the exercise.\r\n3. The completed or In progress stamp is not updated real time.\r\n\r\n## Screenshots\r\nLearner has mastered the topic.\r\n\r\n\r\nHe exited the exercise and the completed sign on the thumbnail is not update:\r\n\r\n\r\nBut on refreshing the page the thumbnail has the completed sign.\n", "before_files": [{"content": "\"\"\"\nImplements custom auth backends as described in the Django docs, for our custom user classes -- FacilityUser and\nDeviceOwner. The appropriate classes should be listed in the AUTHENTICATION_BACKENDS. Note that authentication\nbackends are checked in the order they're listed.\n\"\"\"\n\nfrom kolibri.auth.models import DeviceOwner, FacilityUser\n\n\nclass FacilityUserBackend(object):\n \"\"\"\n A class that implements authentication for FacilityUsers.\n \"\"\"\n\n def authenticate(self, username=None, password=None, facility=None):\n \"\"\"\n Authenticates the user if the credentials correspond to a FacilityUser for the specified Facility.\n\n :param username: a string\n :param password: a string\n :param facility: a Facility\n :return: A FacilityUser instance if successful, or None if authentication failed.\n \"\"\"\n users = FacilityUser.objects.filter(username=username)\n if facility:\n users = users.filter(facility=facility)\n for user in users:\n if user.check_password(password):\n return user\n # Allow login without password for learners for facilities that allow this.\n # Must specify the facility, to prevent accidental logins\n elif facility and user.dataset.learner_can_login_with_no_password and not user.roles.count():\n return user\n return None\n\n def get_user(self, user_id):\n \"\"\"\n Gets a user. Auth backends are required to implement this.\n\n :param user_id: A FacilityUser pk\n :return: A FacilityUser instance if a BaseUser with that pk is found, else None.\n \"\"\"\n try:\n return FacilityUser.objects.get(pk=user_id)\n except FacilityUser.DoesNotExist:\n return None\n\n\nclass DeviceOwnerBackend(object):\n \"\"\"\n A class that implements authentication for DeviceOwners.\n \"\"\"\n\n def authenticate(self, username=None, password=None, **kwargs):\n \"\"\"\n Authenticates the user if the credentials correspond to a DeviceOwner.\n\n :param username: a string\n :param password: a string\n :return: A DeviceOwner instance if successful, or None if authentication failed.\n \"\"\"\n try:\n user = DeviceOwner.objects.get(username=username)\n if user.check_password(password):\n return user\n else:\n return None\n except DeviceOwner.DoesNotExist:\n return None\n\n def get_user(self, user_id):\n \"\"\"\n Gets a user. Auth backends are required to implement this.\n\n :param user_id: A BaseUser pk\n :return: A DeviceOwner instance if a BaseUser with that pk is found, else None.\n \"\"\"\n try:\n return DeviceOwner.objects.get(pk=user_id)\n except DeviceOwner.DoesNotExist:\n return None\n", "path": "kolibri/auth/backends.py"}]} | 1,595 | 126 |
gh_patches_debug_30951 | rasdani/github-patches | git_diff | fail2ban__fail2ban-1503 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong install path `/usr/share/doc/` for some platform (install fails on Mac OS 10.11 "El Capitan")
Due to El Capitan's new "System Integrity Protection", there is no way to create the directory at /usr/share/doc/fail2ban, even as root:
> % sudo python setup.py install
> running install
> Checking .pth file support in /Library/Python/2.7/site-packages/
> ...
> running install_data
> creating /usr/share/doc/fail2ban
> error: could not create '/usr/share/doc/fail2ban': Operation not permitted
However, /usr/local is modifiable, so changing line 151 of setup.py from
> '/usr/share/doc/fail2ban'
to
> '/usr/local/doc/fail2ban'
allowed the installer to proceed.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/python
2 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
3 # vi: set ft=python sts=4 ts=4 sw=4 noet :
4
5 # This file is part of Fail2Ban.
6 #
7 # Fail2Ban is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2 of the License, or
10 # (at your option) any later version.
11 #
12 # Fail2Ban is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with Fail2Ban; if not, write to the Free Software
19 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20
21 __author__ = "Cyril Jaquier, Steven Hiscocks, Yaroslav Halchenko"
22 __copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2008-2013 Fail2Ban Contributors"
23 __license__ = "GPL"
24
25 try:
26 import setuptools
27 from setuptools import setup
28 except ImportError:
29 setuptools = None
30 from distutils.core import setup
31
32 try:
33 # python 3.x
34 from distutils.command.build_py import build_py_2to3 as build_py
35 from distutils.command.build_scripts \
36 import build_scripts_2to3 as build_scripts
37 except ImportError:
38 # python 2.x
39 from distutils.command.build_py import build_py
40 from distutils.command.build_scripts import build_scripts
41 import os
42 from os.path import isfile, join, isdir, realpath
43 import sys
44 import warnings
45 from glob import glob
46
47 if setuptools and "test" in sys.argv:
48 import logging
49 logSys = logging.getLogger("fail2ban")
50 hdlr = logging.StreamHandler(sys.stdout)
51 fmt = logging.Formatter("%(asctime)-15s %(message)s")
52 hdlr.setFormatter(fmt)
53 logSys.addHandler(hdlr)
54 if set(["-q", "--quiet"]) & set(sys.argv):
55 logSys.setLevel(logging.CRITICAL)
56 warnings.simplefilter("ignore")
57 sys.warnoptions.append("ignore")
58 elif set(["-v", "--verbose"]) & set(sys.argv):
59 logSys.setLevel(logging.DEBUG)
60 else:
61 logSys.setLevel(logging.INFO)
62 elif "test" in sys.argv:
63 print("python distribute required to execute fail2ban tests")
64 print("")
65
66 longdesc = '''
67 Fail2Ban scans log files like /var/log/pwdfail or
68 /var/log/apache/error_log and bans IP that makes
69 too many password failures. It updates firewall rules
70 to reject the IP address or executes user defined
71 commands.'''
72
73 if setuptools:
74 setup_extra = {
75 'test_suite': "fail2ban.tests.utils.gatherTests",
76 'use_2to3': True,
77 }
78 else:
79 setup_extra = {}
80
81 data_files_extra = []
82 if os.path.exists('/var/run'):
83 # if we are on the system with /var/run -- we are to use it for having fail2ban/
84 # directory there for socket file etc.
85 # realpath is used to possibly resolve /var/run -> /run symlink
86 data_files_extra += [(realpath('/var/run/fail2ban'), '')]
87
88 # Get version number, avoiding importing fail2ban.
89 # This is due to tests not functioning for python3 as 2to3 takes place later
90 exec(open(join("fail2ban", "version.py")).read())
91
92 setup(
93 name = "fail2ban",
94 version = version,
95 description = "Ban IPs that make too many password failures",
96 long_description = longdesc,
97 author = "Cyril Jaquier & Fail2Ban Contributors",
98 author_email = "[email protected]",
99 url = "http://www.fail2ban.org",
100 license = "GPL",
101 platforms = "Posix",
102 cmdclass = {'build_py': build_py, 'build_scripts': build_scripts},
103 scripts = [
104 'bin/fail2ban-client',
105 'bin/fail2ban-server',
106 'bin/fail2ban-regex',
107 'bin/fail2ban-testcases',
108 ],
109 packages = [
110 'fail2ban',
111 'fail2ban.client',
112 'fail2ban.server',
113 'fail2ban.tests',
114 'fail2ban.tests.action_d',
115 ],
116 package_data = {
117 'fail2ban.tests':
118 [ join(w[0], f).replace("fail2ban/tests/", "", 1)
119 for w in os.walk('fail2ban/tests/files')
120 for f in w[2]] +
121 [ join(w[0], f).replace("fail2ban/tests/", "", 1)
122 for w in os.walk('fail2ban/tests/config')
123 for f in w[2]] +
124 [ join(w[0], f).replace("fail2ban/tests/", "", 1)
125 for w in os.walk('fail2ban/tests/action_d')
126 for f in w[2]]
127 },
128 data_files = [
129 ('/etc/fail2ban',
130 glob("config/*.conf")
131 ),
132 ('/etc/fail2ban/filter.d',
133 glob("config/filter.d/*.conf")
134 ),
135 ('/etc/fail2ban/filter.d/ignorecommands',
136 glob("config/filter.d/ignorecommands/*")
137 ),
138 ('/etc/fail2ban/action.d',
139 glob("config/action.d/*.conf") +
140 glob("config/action.d/*.py")
141 ),
142 ('/etc/fail2ban/fail2ban.d',
143 ''
144 ),
145 ('/etc/fail2ban/jail.d',
146 ''
147 ),
148 ('/var/lib/fail2ban',
149 ''
150 ),
151 ('/usr/share/doc/fail2ban',
152 ['README.md', 'README.Solaris', 'DEVELOP', 'FILTERS',
153 'doc/run-rootless.txt']
154 )
155 ] + data_files_extra,
156 **setup_extra
157 )
158
159 # Do some checks after installation
160 # Search for obsolete files.
161 obsoleteFiles = []
162 elements = {
163 "/etc/":
164 [
165 "fail2ban.conf"
166 ],
167 "/usr/bin/":
168 [
169 "fail2ban.py"
170 ],
171 "/usr/lib/fail2ban/":
172 [
173 "version.py",
174 "protocol.py"
175 ]
176 }
177
178 for directory in elements:
179 for f in elements[directory]:
180 path = join(directory, f)
181 if isfile(path):
182 obsoleteFiles.append(path)
183
184 if obsoleteFiles:
185 print("")
186 print("Obsolete files from previous Fail2Ban versions were found on "
187 "your system.")
188 print("Please delete them:")
189 print("")
190 for f in obsoleteFiles:
191 print("\t" + f)
192 print("")
193
194 if isdir("/usr/lib/fail2ban"):
195 print("")
196 print("Fail2ban is not installed under /usr/lib anymore. The new "
197 "location is under /usr/share. Please remove the directory "
198 "/usr/lib/fail2ban and everything under this directory.")
199 print("")
200
201 # Update config file
202 if sys.argv[1] == "install":
203 print("")
204 print("Please do not forget to update your configuration files.")
205 print("They are in /etc/fail2ban/.")
206 print("")
207
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -19,9 +19,11 @@
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
__author__ = "Cyril Jaquier, Steven Hiscocks, Yaroslav Halchenko"
-__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2008-2013 Fail2Ban Contributors"
+__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2008-2016 Fail2Ban Contributors"
__license__ = "GPL"
+import platform
+
try:
import setuptools
from setuptools import setup
@@ -85,6 +87,18 @@
# realpath is used to possibly resolve /var/run -> /run symlink
data_files_extra += [(realpath('/var/run/fail2ban'), '')]
+# Installing documentation files only under Linux or other GNU/ systems
+# (e.g. GNU/kFreeBSD), since others might have protective mechanisms forbidding
+# installation there (see e.g. #1233)
+platform_system = platform.system().lower()
+doc_files = ['README.md', 'DEVELOP', 'FILTERS', 'doc/run-rootless.txt']
+if platform_system in ('solaris', 'sunos'):
+ doc_files.append('README.Solaris')
+if platform_system in ('linux', 'solaris', 'sunos') or platform_system.startswith('gnu'):
+ data_files_extra.append(
+ ('/usr/share/doc/fail2ban', doc_files)
+ )
+
# Get version number, avoiding importing fail2ban.
# This is due to tests not functioning for python3 as 2to3 takes place later
exec(open(join("fail2ban", "version.py")).read())
@@ -148,10 +162,6 @@
('/var/lib/fail2ban',
''
),
- ('/usr/share/doc/fail2ban',
- ['README.md', 'README.Solaris', 'DEVELOP', 'FILTERS',
- 'doc/run-rootless.txt']
- )
] + data_files_extra,
**setup_extra
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,9 +19,11 @@\n # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n \n __author__ = \"Cyril Jaquier, Steven Hiscocks, Yaroslav Halchenko\"\n-__copyright__ = \"Copyright (c) 2004 Cyril Jaquier, 2008-2013 Fail2Ban Contributors\"\n+__copyright__ = \"Copyright (c) 2004 Cyril Jaquier, 2008-2016 Fail2Ban Contributors\"\n __license__ = \"GPL\"\n \n+import platform\n+\n try:\n \timport setuptools\n \tfrom setuptools import setup\n@@ -85,6 +87,18 @@\n \t# realpath is used to possibly resolve /var/run -> /run symlink\n \tdata_files_extra += [(realpath('/var/run/fail2ban'), '')]\n \n+# Installing documentation files only under Linux or other GNU/ systems\n+# (e.g. GNU/kFreeBSD), since others might have protective mechanisms forbidding\n+# installation there (see e.g. #1233)\n+platform_system = platform.system().lower()\n+doc_files = ['README.md', 'DEVELOP', 'FILTERS', 'doc/run-rootless.txt']\n+if platform_system in ('solaris', 'sunos'):\n+\tdoc_files.append('README.Solaris')\n+if platform_system in ('linux', 'solaris', 'sunos') or platform_system.startswith('gnu'):\n+\tdata_files_extra.append(\n+\t\t('/usr/share/doc/fail2ban', doc_files)\n+\t)\n+\n # Get version number, avoiding importing fail2ban.\n # This is due to tests not functioning for python3 as 2to3 takes place later\n exec(open(join(\"fail2ban\", \"version.py\")).read())\n@@ -148,10 +162,6 @@\n \t\t('/var/lib/fail2ban',\n \t\t\t''\n \t\t),\n-\t\t('/usr/share/doc/fail2ban',\n-\t\t\t['README.md', 'README.Solaris', 'DEVELOP', 'FILTERS',\n-\t\t\t 'doc/run-rootless.txt']\n-\t\t)\n \t] + data_files_extra,\n \t**setup_extra\n )\n", "issue": "Wrong install path `/usr/share/doc/` for some platform (install fails on Mac OS 10.11 \"El Capitan\")\nDue to El Capitan's new \"System Integrity Protection\", there is no way to create the directory at /usr/share/doc/fail2ban, even as root:\n\n> % sudo python setup.py install\n> running install\n> Checking .pth file support in /Library/Python/2.7/site-packages/\n> ...\n> running install_data\n> creating /usr/share/doc/fail2ban\n> error: could not create '/usr/share/doc/fail2ban': Operation not permitted\n\nHowever, /usr/local is modifiable, so changing line 151 of setup.py from\n\n> '/usr/share/doc/fail2ban'\n\nto\n\n> '/usr/local/doc/fail2ban'\n\nallowed the installer to proceed.\n\n", "before_files": [{"content": "#!/usr/bin/python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-\n# vi: set ft=python sts=4 ts=4 sw=4 noet :\n\n# This file is part of Fail2Ban.\n#\n# Fail2Ban is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Fail2Ban is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Fail2Ban; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n__author__ = \"Cyril Jaquier, Steven Hiscocks, Yaroslav Halchenko\"\n__copyright__ = \"Copyright (c) 2004 Cyril Jaquier, 2008-2013 Fail2Ban Contributors\"\n__license__ = \"GPL\"\n\ntry:\n\timport setuptools\n\tfrom setuptools import setup\nexcept ImportError:\n\tsetuptools = None\n\tfrom distutils.core import setup\n\ntry:\n\t# python 3.x\n\tfrom distutils.command.build_py import build_py_2to3 as build_py\n\tfrom distutils.command.build_scripts \\\n\t\timport build_scripts_2to3 as build_scripts\nexcept ImportError:\n\t# python 2.x\n\tfrom distutils.command.build_py import build_py\n\tfrom distutils.command.build_scripts import build_scripts\nimport os\nfrom os.path import isfile, join, isdir, realpath\nimport sys\nimport warnings\nfrom glob import glob\n\nif setuptools and \"test\" in sys.argv:\n\timport logging\n\tlogSys = logging.getLogger(\"fail2ban\")\n\thdlr = logging.StreamHandler(sys.stdout)\n\tfmt = logging.Formatter(\"%(asctime)-15s %(message)s\")\n\thdlr.setFormatter(fmt)\n\tlogSys.addHandler(hdlr)\n\tif set([\"-q\", \"--quiet\"]) & set(sys.argv):\n\t\tlogSys.setLevel(logging.CRITICAL)\n\t\twarnings.simplefilter(\"ignore\")\n\t\tsys.warnoptions.append(\"ignore\")\n\telif set([\"-v\", \"--verbose\"]) & set(sys.argv):\n\t\tlogSys.setLevel(logging.DEBUG)\n\telse:\n\t\tlogSys.setLevel(logging.INFO)\nelif \"test\" in sys.argv:\n\tprint(\"python distribute required to execute fail2ban tests\")\n\tprint(\"\")\n\nlongdesc = '''\nFail2Ban scans log files like /var/log/pwdfail or\n/var/log/apache/error_log and bans IP that makes\ntoo many password failures. It updates firewall rules\nto reject the IP address or executes user defined\ncommands.'''\n\nif setuptools:\n\tsetup_extra = {\n\t\t'test_suite': \"fail2ban.tests.utils.gatherTests\",\n\t\t'use_2to3': True,\n\t}\nelse:\n\tsetup_extra = {}\n\ndata_files_extra = []\nif os.path.exists('/var/run'):\n\t# if we are on the system with /var/run -- we are to use it for having fail2ban/\n\t# directory there for socket file etc.\n\t# realpath is used to possibly resolve /var/run -> /run symlink\n\tdata_files_extra += [(realpath('/var/run/fail2ban'), '')]\n\n# Get version number, avoiding importing fail2ban.\n# This is due to tests not functioning for python3 as 2to3 takes place later\nexec(open(join(\"fail2ban\", \"version.py\")).read())\n\nsetup(\n\tname = \"fail2ban\",\n\tversion = version,\n\tdescription = \"Ban IPs that make too many password failures\",\n\tlong_description = longdesc,\n\tauthor = \"Cyril Jaquier & Fail2Ban Contributors\",\n\tauthor_email = \"[email protected]\",\n\turl = \"http://www.fail2ban.org\",\n\tlicense = \"GPL\",\n\tplatforms = \"Posix\",\n\tcmdclass = {'build_py': build_py, 'build_scripts': build_scripts},\n\tscripts = [\n\t\t'bin/fail2ban-client',\n\t\t'bin/fail2ban-server',\n\t\t'bin/fail2ban-regex',\n\t\t'bin/fail2ban-testcases',\n\t],\n\tpackages = [\n\t\t'fail2ban',\n\t\t'fail2ban.client',\n\t\t'fail2ban.server',\n\t\t'fail2ban.tests',\n\t\t'fail2ban.tests.action_d',\n\t],\n\tpackage_data = {\n\t\t'fail2ban.tests':\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/files')\n\t\t\t\tfor f in w[2]] +\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/config')\n\t\t\t\tfor f in w[2]] +\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/action_d')\n\t\t\t\tfor f in w[2]]\n\t},\n\tdata_files = [\n\t\t('/etc/fail2ban',\n\t\t\tglob(\"config/*.conf\")\n\t\t),\n\t\t('/etc/fail2ban/filter.d',\n\t\t\tglob(\"config/filter.d/*.conf\")\n\t\t),\n\t\t('/etc/fail2ban/filter.d/ignorecommands',\n\t\t\tglob(\"config/filter.d/ignorecommands/*\")\n\t\t),\n\t\t('/etc/fail2ban/action.d',\n\t\t\tglob(\"config/action.d/*.conf\") +\n\t\t\tglob(\"config/action.d/*.py\")\n\t\t),\n\t\t('/etc/fail2ban/fail2ban.d',\n\t\t\t''\n\t\t),\n\t\t('/etc/fail2ban/jail.d',\n\t\t\t''\n\t\t),\n\t\t('/var/lib/fail2ban',\n\t\t\t''\n\t\t),\n\t\t('/usr/share/doc/fail2ban',\n\t\t\t['README.md', 'README.Solaris', 'DEVELOP', 'FILTERS',\n\t\t\t 'doc/run-rootless.txt']\n\t\t)\n\t] + data_files_extra,\n\t**setup_extra\n)\n\n# Do some checks after installation\n# Search for obsolete files.\nobsoleteFiles = []\nelements = {\n\t\"/etc/\":\n\t\t[\n\t\t\t\"fail2ban.conf\"\n\t\t],\n\t\"/usr/bin/\":\n\t\t[\n\t\t\t\"fail2ban.py\"\n\t\t],\n\t\"/usr/lib/fail2ban/\":\n\t\t[\n\t\t\t\"version.py\",\n\t\t\t\"protocol.py\"\n\t\t]\n}\n\nfor directory in elements:\n\tfor f in elements[directory]:\n\t\tpath = join(directory, f)\n\t\tif isfile(path):\n\t\t\tobsoleteFiles.append(path)\n\nif obsoleteFiles:\n\tprint(\"\")\n\tprint(\"Obsolete files from previous Fail2Ban versions were found on \"\n\t\t \"your system.\")\n\tprint(\"Please delete them:\")\n\tprint(\"\")\n\tfor f in obsoleteFiles:\n\t\tprint(\"\\t\" + f)\n\tprint(\"\")\n\nif isdir(\"/usr/lib/fail2ban\"):\n\tprint(\"\")\n\tprint(\"Fail2ban is not installed under /usr/lib anymore. The new \"\n\t\t \"location is under /usr/share. Please remove the directory \"\n\t\t \"/usr/lib/fail2ban and everything under this directory.\")\n\tprint(\"\")\n\n# Update config file\nif sys.argv[1] == \"install\":\n\tprint(\"\")\n\tprint(\"Please do not forget to update your configuration files.\")\n\tprint(\"They are in /etc/fail2ban/.\")\n\tprint(\"\")\n", "path": "setup.py"}]} | 2,934 | 522 |
gh_patches_debug_36757 | rasdani/github-patches | git_diff | huggingface__trl-398 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Llama Reward Model is incorrectly merged
As mentioned in #287, `merge_peft_adapter` saves the Llama RM as a `LlamaForCausalLM` see [here](https://github.com/lvwerra/trl/blob/main/examples/stack_llama/scripts/merge_peft_adapter.py#L35)
But the reward model is trained and should be a `LlamaForSequenceClassification` and running `rl_training.py` gives the obvious warnings
```
Some weights of the model checkpoint at ./llama-7b-se-rm were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']
- This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at /home/toolkit/huggingface/llama-7b-rm and are newly initialized: ['score.weight']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
```
We should instead check whether we are merging the rm and then save as a the correct model
Also the `score.weight` is not being loaded as mentioned in #297 , see more info below
--- update --
It seems that `merge_peft_adapter` should be using `merge_and_unload()` which correctly overrides the score. But I haven't yet managed to get good results using the adapter weights on the hub
</issue>
<code>
[start of examples/stack_llama/scripts/merge_peft_adapter.py]
1 from dataclasses import dataclass, field
2 from typing import Optional
3
4 import peft
5 import torch
6 from peft import PeftConfig, PeftModel
7 from peft.utils import _get_submodules
8 from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
9
10
11 DEFAULT_PAD_TOKEN = "[PAD]"
12 DEFAULT_EOS_TOKEN = "</s>"
13 DEFAULT_BOS_TOKEN = "</s>"
14 DEFAULT_UNK_TOKEN = "</s>"
15
16
17 @dataclass
18 class ScriptArguments:
19 """
20 The name of the Casual LM model we wish to fine with PPO
21 """
22
23 adapter_model_name: Optional[str] = field(default=None, metadata={"help": "the model name"})
24 base_model_name: Optional[str] = field(default=None, metadata={"help": "the model name"})
25 output_name: Optional[str] = field(default=None, metadata={"help": "the model name"})
26
27
28 parser = HfArgumentParser(ScriptArguments)
29 script_args = parser.parse_args_into_dataclasses()[0]
30 assert script_args.adapter_model_name is not None, "please provide the name of the Adapter you would like to merge"
31 assert script_args.base_model_name is not None, "please provide the name of the Base model"
32 assert script_args.base_model_name is not None, "please provide the output name of the merged model"
33
34 peft_config = PeftConfig.from_pretrained(script_args.adapter_model_name)
35 model = AutoModelForCausalLM.from_pretrained(script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16)
36 tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name)
37 config = AutoConfig.from_pretrained(script_args.base_model_name)
38 architecture = config.architectures[0]
39 if "Llama" in architecture:
40 print("Setting EOS, BOS, and UNK tokens for LLama tokenizer")
41 tokenizer.add_special_tokens(
42 {
43 "eos_token": DEFAULT_EOS_TOKEN,
44 "bos_token": DEFAULT_BOS_TOKEN,
45 "unk_token": DEFAULT_UNK_TOKEN,
46 "pad_token": DEFAULT_PAD_TOKEN,
47 }
48 )
49
50 # Load the Lora model
51 model = PeftModel.from_pretrained(model, script_args.adapter_model_name)
52 model.eval()
53
54 key_list = [key for key, _ in model.base_model.model.named_modules() if "lora" not in key]
55 for key in key_list:
56 parent, target, target_name = _get_submodules(model.base_model.model, key)
57 if isinstance(target, peft.tuners.lora.Linear):
58 bias = target.bias is not None
59 new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)
60 model.base_model._replace_module(parent, target_name, new_module, target)
61
62 model = model.base_model.model
63
64 model.save_pretrained(f"{script_args.output_name}")
65 tokenizer.save_pretrained(f"{script_args.output_name}")
66 model.push_to_hub(f"{script_args.output_name}", use_temp_dir=False)
67
[end of examples/stack_llama/scripts/merge_peft_adapter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/stack_llama/scripts/merge_peft_adapter.py b/examples/stack_llama/scripts/merge_peft_adapter.py
--- a/examples/stack_llama/scripts/merge_peft_adapter.py
+++ b/examples/stack_llama/scripts/merge_peft_adapter.py
@@ -1,17 +1,9 @@
from dataclasses import dataclass, field
from typing import Optional
-import peft
import torch
from peft import PeftConfig, PeftModel
-from peft.utils import _get_submodules
-from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
-
-
-DEFAULT_PAD_TOKEN = "[PAD]"
-DEFAULT_EOS_TOKEN = "</s>"
-DEFAULT_BOS_TOKEN = "</s>"
-DEFAULT_UNK_TOKEN = "</s>"
+from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser
@dataclass
@@ -32,34 +24,23 @@
assert script_args.base_model_name is not None, "please provide the output name of the merged model"
peft_config = PeftConfig.from_pretrained(script_args.adapter_model_name)
-model = AutoModelForCausalLM.from_pretrained(script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16)
-tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name)
-config = AutoConfig.from_pretrained(script_args.base_model_name)
-architecture = config.architectures[0]
-if "Llama" in architecture:
- print("Setting EOS, BOS, and UNK tokens for LLama tokenizer")
- tokenizer.add_special_tokens(
- {
- "eos_token": DEFAULT_EOS_TOKEN,
- "bos_token": DEFAULT_BOS_TOKEN,
- "unk_token": DEFAULT_UNK_TOKEN,
- "pad_token": DEFAULT_PAD_TOKEN,
- }
+if peft_config.task_type == "SEQ_CLS":
+ # peft is for reward model so load sequence classification
+ model = AutoModelForSequenceClassification.from_pretrained(
+ script_args.base_model_name, num_labels=1, torch_dtype=torch.bfloat16
+ )
+else:
+ model = AutoModelForCausalLM.from_pretrained(
+ script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16
)
+tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name)
+
# Load the Lora model
model = PeftModel.from_pretrained(model, script_args.adapter_model_name)
model.eval()
-key_list = [key for key, _ in model.base_model.model.named_modules() if "lora" not in key]
-for key in key_list:
- parent, target, target_name = _get_submodules(model.base_model.model, key)
- if isinstance(target, peft.tuners.lora.Linear):
- bias = target.bias is not None
- new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)
- model.base_model._replace_module(parent, target_name, new_module, target)
-
-model = model.base_model.model
+model = model.merge_and_unload()
model.save_pretrained(f"{script_args.output_name}")
tokenizer.save_pretrained(f"{script_args.output_name}")
| {"golden_diff": "diff --git a/examples/stack_llama/scripts/merge_peft_adapter.py b/examples/stack_llama/scripts/merge_peft_adapter.py\n--- a/examples/stack_llama/scripts/merge_peft_adapter.py\n+++ b/examples/stack_llama/scripts/merge_peft_adapter.py\n@@ -1,17 +1,9 @@\n from dataclasses import dataclass, field\n from typing import Optional\n \n-import peft\n import torch\n from peft import PeftConfig, PeftModel\n-from peft.utils import _get_submodules\n-from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser\n-\n-\n-DEFAULT_PAD_TOKEN = \"[PAD]\"\n-DEFAULT_EOS_TOKEN = \"</s>\"\n-DEFAULT_BOS_TOKEN = \"</s>\"\n-DEFAULT_UNK_TOKEN = \"</s>\"\n+from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser\n \n \n @dataclass\n@@ -32,34 +24,23 @@\n assert script_args.base_model_name is not None, \"please provide the output name of the merged model\"\n \n peft_config = PeftConfig.from_pretrained(script_args.adapter_model_name)\n-model = AutoModelForCausalLM.from_pretrained(script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16)\n-tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name)\n-config = AutoConfig.from_pretrained(script_args.base_model_name)\n-architecture = config.architectures[0]\n-if \"Llama\" in architecture:\n- print(\"Setting EOS, BOS, and UNK tokens for LLama tokenizer\")\n- tokenizer.add_special_tokens(\n- {\n- \"eos_token\": DEFAULT_EOS_TOKEN,\n- \"bos_token\": DEFAULT_BOS_TOKEN,\n- \"unk_token\": DEFAULT_UNK_TOKEN,\n- \"pad_token\": DEFAULT_PAD_TOKEN,\n- }\n+if peft_config.task_type == \"SEQ_CLS\":\n+ # peft is for reward model so load sequence classification\n+ model = AutoModelForSequenceClassification.from_pretrained(\n+ script_args.base_model_name, num_labels=1, torch_dtype=torch.bfloat16\n+ )\n+else:\n+ model = AutoModelForCausalLM.from_pretrained(\n+ script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16\n )\n \n+tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name)\n+\n # Load the Lora model\n model = PeftModel.from_pretrained(model, script_args.adapter_model_name)\n model.eval()\n \n-key_list = [key for key, _ in model.base_model.model.named_modules() if \"lora\" not in key]\n-for key in key_list:\n- parent, target, target_name = _get_submodules(model.base_model.model, key)\n- if isinstance(target, peft.tuners.lora.Linear):\n- bias = target.bias is not None\n- new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)\n- model.base_model._replace_module(parent, target_name, new_module, target)\n-\n-model = model.base_model.model\n+model = model.merge_and_unload()\n \n model.save_pretrained(f\"{script_args.output_name}\")\n tokenizer.save_pretrained(f\"{script_args.output_name}\")\n", "issue": "Llama Reward Model is incorrectly merged\nAs mentioned in #287, `merge_peft_adapter` saves the Llama RM as a `LlamaForCausalLM` see [here](https://github.com/lvwerra/trl/blob/main/examples/stack_llama/scripts/merge_peft_adapter.py#L35)\r\n\r\nBut the reward model is trained and should be a `LlamaForSequenceClassification` and running `rl_training.py` gives the obvious warnings\r\n```\r\nSome weights of the model checkpoint at ./llama-7b-se-rm were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']\r\n- This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\r\n- This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\r\nSome weights of LlamaForSequenceClassification were not initialized from the model checkpoint at /home/toolkit/huggingface/llama-7b-rm and are newly initialized: ['score.weight']\r\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\r\n```\r\n\r\nWe should instead check whether we are merging the rm and then save as a the correct model \r\n\r\nAlso the `score.weight` is not being loaded as mentioned in #297 , see more info below\r\n\r\n\r\n--- update --\r\n\r\nIt seems that `merge_peft_adapter` should be using `merge_and_unload()` which correctly overrides the score. But I haven't yet managed to get good results using the adapter weights on the hub\n", "before_files": [{"content": "from dataclasses import dataclass, field\nfrom typing import Optional\n\nimport peft\nimport torch\nfrom peft import PeftConfig, PeftModel\nfrom peft.utils import _get_submodules\nfrom transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser\n\n\nDEFAULT_PAD_TOKEN = \"[PAD]\"\nDEFAULT_EOS_TOKEN = \"</s>\"\nDEFAULT_BOS_TOKEN = \"</s>\"\nDEFAULT_UNK_TOKEN = \"</s>\"\n\n\n@dataclass\nclass ScriptArguments:\n \"\"\"\n The name of the Casual LM model we wish to fine with PPO\n \"\"\"\n\n adapter_model_name: Optional[str] = field(default=None, metadata={\"help\": \"the model name\"})\n base_model_name: Optional[str] = field(default=None, metadata={\"help\": \"the model name\"})\n output_name: Optional[str] = field(default=None, metadata={\"help\": \"the model name\"})\n\n\nparser = HfArgumentParser(ScriptArguments)\nscript_args = parser.parse_args_into_dataclasses()[0]\nassert script_args.adapter_model_name is not None, \"please provide the name of the Adapter you would like to merge\"\nassert script_args.base_model_name is not None, \"please provide the name of the Base model\"\nassert script_args.base_model_name is not None, \"please provide the output name of the merged model\"\n\npeft_config = PeftConfig.from_pretrained(script_args.adapter_model_name)\nmodel = AutoModelForCausalLM.from_pretrained(script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16)\ntokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name)\nconfig = AutoConfig.from_pretrained(script_args.base_model_name)\narchitecture = config.architectures[0]\nif \"Llama\" in architecture:\n print(\"Setting EOS, BOS, and UNK tokens for LLama tokenizer\")\n tokenizer.add_special_tokens(\n {\n \"eos_token\": DEFAULT_EOS_TOKEN,\n \"bos_token\": DEFAULT_BOS_TOKEN,\n \"unk_token\": DEFAULT_UNK_TOKEN,\n \"pad_token\": DEFAULT_PAD_TOKEN,\n }\n )\n\n# Load the Lora model\nmodel = PeftModel.from_pretrained(model, script_args.adapter_model_name)\nmodel.eval()\n\nkey_list = [key for key, _ in model.base_model.model.named_modules() if \"lora\" not in key]\nfor key in key_list:\n parent, target, target_name = _get_submodules(model.base_model.model, key)\n if isinstance(target, peft.tuners.lora.Linear):\n bias = target.bias is not None\n new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)\n model.base_model._replace_module(parent, target_name, new_module, target)\n\nmodel = model.base_model.model\n\nmodel.save_pretrained(f\"{script_args.output_name}\")\ntokenizer.save_pretrained(f\"{script_args.output_name}\")\nmodel.push_to_hub(f\"{script_args.output_name}\", use_temp_dir=False)\n", "path": "examples/stack_llama/scripts/merge_peft_adapter.py"}]} | 1,674 | 704 |
gh_patches_debug_10391 | rasdani/github-patches | git_diff | streamlit__streamlit-5168 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Uncaught exception: TypeError: Protocols cannot be instantiated
### Summary
TypeError: Protocols cannot be instantiated in 1.12.0
After upgrading from 1.11.1 to 1.12.0, streamlit server keeps raising exceptions and clients get stuck at loading page 'please wait...'
### Steps to reproduce
server error snippet:
```
Traceback (most recent call last):
File "/home/xx/.local/lib/python3.9/site-packages/tornado/http1connection.py", line 276, in _read_message
delegate.finish()
File "/home/xx/.local/lib/python3.9/site-packages/tornado/routing.py", line 268, in finish
self.delegate.finish()
File "/home/xx/.local/lib/python3.9/site-packages/tornado/web.py", line 2322, in finish
self.execute()
File "/home/xx/.local/lib/python3.9/site-packages/tornado/web.py", line 2344, in execute
self.handler = self.handler_class(
File "/home/xx/.local/lib/python3.9/site-packages/tornado/websocket.py", line 224, in __init__
super().__init__(application, request, **kwargs)
File "/home/xx/.local/lib/python3.9/site-packages/tornado/web.py", line 215, in __init__
super().__init__()
File "/usr/local/python3/lib/python3.9/typing.py", line 1083, in _no_init
raise TypeError('Protocols cannot be instantiated')
TypeError: Protocols cannot be instantiated
```
**Actual behavior:**
Get stuck at loading page 'please wait...'
### Debug info
- Streamlit version: 1.12.0
- Python version: 3.9.7
- Using Conda? PipEnv? PyEnv? Pex? using venv
- OS version: Debian 10
- Browser version: Chrome 104 and Safari 15
### Additional information
Roll back to streamlit version 1.11.1, everything works fine.
</issue>
<code>
[start of lib/setup.py]
1 # Copyright 2018-2022 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import setuptools
17 import sys
18
19 from setuptools.command.install import install
20
21
22 VERSION = "1.12.0" # PEP-440
23
24 NAME = "streamlit"
25
26 DESCRIPTION = "The fastest way to build data apps in Python"
27
28 LONG_DESCRIPTION = (
29 "Streamlit's open-source app framework is the easiest way "
30 "for data scientists and machine learning engineers to "
31 "create beautiful, performant apps in only a few hours! "
32 "All in pure Python. All for free."
33 )
34
35 # IMPORTANT: We should try very hard *not* to add dependencies to Streamlit.
36 # And if you do add one, make the required version as general as possible.
37 # But include relevant lower bounds for any features we use from our dependencies.
38 INSTALL_REQUIRES = [
39 "altair>=3.2.0",
40 "blinker>=1.0.0",
41 "cachetools>=4.0",
42 "click>=7.0",
43 # 1.4 introduced the functionality found in python 3.8's importlib.metadata module
44 "importlib-metadata>=1.4",
45 "numpy",
46 "packaging>=14.1",
47 "pandas>=0.21.0",
48 "pillow>=6.2.0",
49 "protobuf<4,>=3.12",
50 "pyarrow>=4.0",
51 "pydeck>=0.1.dev5",
52 "pympler>=0.9",
53 "python-dateutil",
54 "requests>=2.4",
55 "rich>=10.11.0",
56 "semver",
57 "toml",
58 # 5.0 has a fix for etag header: https://github.com/tornadoweb/tornado/issues/2262
59 "tornado>=5.0",
60 "typing-extensions>=3.10.0.0",
61 "tzlocal>=1.1",
62 "validators>=0.2",
63 # Don't require watchdog on MacOS, since it'll fail without xcode tools.
64 # Without watchdog, we fallback to a polling file watcher to check for app changes.
65 "watchdog; platform_system != 'Darwin'",
66 ]
67
68 # We want to exclude some dependencies in our internal conda distribution of
69 # Streamlit.
70 CONDA_OPTIONAL_DEPENDENCIES = [
71 "gitpython!=3.1.19",
72 ]
73
74 # NOTE: ST_CONDA_BUILD is used here (even though CONDA_BUILD is set
75 # automatically when using the `conda build` command) because the
76 # `load_setup_py_data()` conda build helper function does not have the
77 # CONDA_BUILD environment variable set when it runs to generate our build
78 # recipe from meta.yaml.
79 if not os.getenv("ST_CONDA_BUILD"):
80 INSTALL_REQUIRES.extend(CONDA_OPTIONAL_DEPENDENCIES)
81
82
83 class VerifyVersionCommand(install):
84 """Custom command to verify that the git tag matches our version"""
85
86 description = "verify that the git tag matches our version"
87
88 def run(self):
89 tag = os.getenv("CIRCLE_TAG")
90
91 if tag != VERSION:
92 info = "Git tag: {0} does not match the version of this app: {1}".format(
93 tag, VERSION
94 )
95 sys.exit(info)
96
97
98 setuptools.setup(
99 name=NAME,
100 version=VERSION,
101 description=DESCRIPTION,
102 long_description=LONG_DESCRIPTION,
103 url="https://streamlit.io",
104 project_urls={
105 "Source": "https://github.com/streamlit/streamlit",
106 },
107 author="Streamlit Inc",
108 author_email="[email protected]",
109 python_requires=">=3.7",
110 license="Apache 2",
111 # PEP 561: https://mypy.readthedocs.io/en/stable/installed_packages.html
112 package_data={"streamlit": ["py.typed", "hello/**/*.py"]},
113 packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
114 # Requirements
115 install_requires=INSTALL_REQUIRES,
116 zip_safe=False, # install source files not egg
117 include_package_data=True, # copy html and friends
118 entry_points={"console_scripts": ["streamlit = streamlit.web.cli:main"]},
119 # For Windows so that streamlit * commands work ie.
120 # - streamlit version
121 # - streamlit hello
122 scripts=["bin/streamlit.cmd"],
123 cmdclass={
124 "verify": VerifyVersionCommand,
125 },
126 )
127
[end of lib/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/setup.py b/lib/setup.py
--- a/lib/setup.py
+++ b/lib/setup.py
@@ -106,7 +106,10 @@
},
author="Streamlit Inc",
author_email="[email protected]",
- python_requires=">=3.7",
+ # We exclude Python 3.9.7 from our compatible versions due to a bug in that version
+ # with typing.Protocol. See https://github.com/streamlit/streamlit/issues/5140 and
+ # https://bugs.python.org/issue45121
+ python_requires=">=3.7, !=3.9.7",
license="Apache 2",
# PEP 561: https://mypy.readthedocs.io/en/stable/installed_packages.html
package_data={"streamlit": ["py.typed", "hello/**/*.py"]},
| {"golden_diff": "diff --git a/lib/setup.py b/lib/setup.py\n--- a/lib/setup.py\n+++ b/lib/setup.py\n@@ -106,7 +106,10 @@\n },\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n- python_requires=\">=3.7\",\n+ # We exclude Python 3.9.7 from our compatible versions due to a bug in that version\n+ # with typing.Protocol. See https://github.com/streamlit/streamlit/issues/5140 and\n+ # https://bugs.python.org/issue45121\n+ python_requires=\">=3.7, !=3.9.7\",\n license=\"Apache 2\",\n # PEP 561: https://mypy.readthedocs.io/en/stable/installed_packages.html\n package_data={\"streamlit\": [\"py.typed\", \"hello/**/*.py\"]},\n", "issue": "Uncaught exception: TypeError: Protocols cannot be instantiated\n### Summary\r\n\r\nTypeError: Protocols cannot be instantiated in 1.12.0\r\nAfter upgrading from 1.11.1 to 1.12.0, streamlit server keeps raising exceptions and clients get stuck at loading page 'please wait...'\r\n\r\n### Steps to reproduce\r\n\r\nserver error snippet:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/xx/.local/lib/python3.9/site-packages/tornado/http1connection.py\", line 276, in _read_message\r\n delegate.finish()\r\n File \"/home/xx/.local/lib/python3.9/site-packages/tornado/routing.py\", line 268, in finish\r\n self.delegate.finish()\r\n File \"/home/xx/.local/lib/python3.9/site-packages/tornado/web.py\", line 2322, in finish\r\n self.execute()\r\n File \"/home/xx/.local/lib/python3.9/site-packages/tornado/web.py\", line 2344, in execute\r\n self.handler = self.handler_class(\r\n File \"/home/xx/.local/lib/python3.9/site-packages/tornado/websocket.py\", line 224, in __init__\r\n super().__init__(application, request, **kwargs)\r\n File \"/home/xx/.local/lib/python3.9/site-packages/tornado/web.py\", line 215, in __init__\r\n super().__init__()\r\n File \"/usr/local/python3/lib/python3.9/typing.py\", line 1083, in _no_init\r\n raise TypeError('Protocols cannot be instantiated')\r\nTypeError: Protocols cannot be instantiated\r\n```\r\n\r\n**Actual behavior:**\r\n\r\nGet stuck at loading page 'please wait...' \r\n\r\n### Debug info\r\n\r\n- Streamlit version: 1.12.0\r\n- Python version: 3.9.7\r\n- Using Conda? PipEnv? PyEnv? Pex? using venv\r\n- OS version: Debian 10 \r\n- Browser version: Chrome 104 and Safari 15\r\n\r\n### Additional information\r\n\r\nRoll back to streamlit version 1.11.1, everything works fine.\r\n\n", "before_files": [{"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport setuptools\nimport sys\n\nfrom setuptools.command.install import install\n\n\nVERSION = \"1.12.0\" # PEP-440\n\nNAME = \"streamlit\"\n\nDESCRIPTION = \"The fastest way to build data apps in Python\"\n\nLONG_DESCRIPTION = (\n \"Streamlit's open-source app framework is the easiest way \"\n \"for data scientists and machine learning engineers to \"\n \"create beautiful, performant apps in only a few hours! \"\n \"All in pure Python. All for free.\"\n)\n\n# IMPORTANT: We should try very hard *not* to add dependencies to Streamlit.\n# And if you do add one, make the required version as general as possible.\n# But include relevant lower bounds for any features we use from our dependencies.\nINSTALL_REQUIRES = [\n \"altair>=3.2.0\",\n \"blinker>=1.0.0\",\n \"cachetools>=4.0\",\n \"click>=7.0\",\n # 1.4 introduced the functionality found in python 3.8's importlib.metadata module\n \"importlib-metadata>=1.4\",\n \"numpy\",\n \"packaging>=14.1\",\n \"pandas>=0.21.0\",\n \"pillow>=6.2.0\",\n \"protobuf<4,>=3.12\",\n \"pyarrow>=4.0\",\n \"pydeck>=0.1.dev5\",\n \"pympler>=0.9\",\n \"python-dateutil\",\n \"requests>=2.4\",\n \"rich>=10.11.0\",\n \"semver\",\n \"toml\",\n # 5.0 has a fix for etag header: https://github.com/tornadoweb/tornado/issues/2262\n \"tornado>=5.0\",\n \"typing-extensions>=3.10.0.0\",\n \"tzlocal>=1.1\",\n \"validators>=0.2\",\n # Don't require watchdog on MacOS, since it'll fail without xcode tools.\n # Without watchdog, we fallback to a polling file watcher to check for app changes.\n \"watchdog; platform_system != 'Darwin'\",\n]\n\n# We want to exclude some dependencies in our internal conda distribution of\n# Streamlit.\nCONDA_OPTIONAL_DEPENDENCIES = [\n \"gitpython!=3.1.19\",\n]\n\n# NOTE: ST_CONDA_BUILD is used here (even though CONDA_BUILD is set\n# automatically when using the `conda build` command) because the\n# `load_setup_py_data()` conda build helper function does not have the\n# CONDA_BUILD environment variable set when it runs to generate our build\n# recipe from meta.yaml.\nif not os.getenv(\"ST_CONDA_BUILD\"):\n INSTALL_REQUIRES.extend(CONDA_OPTIONAL_DEPENDENCIES)\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"CIRCLE_TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=\"https://streamlit.io\",\n project_urls={\n \"Source\": \"https://github.com/streamlit/streamlit\",\n },\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n license=\"Apache 2\",\n # PEP 561: https://mypy.readthedocs.io/en/stable/installed_packages.html\n package_data={\"streamlit\": [\"py.typed\", \"hello/**/*.py\"]},\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=INSTALL_REQUIRES,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.web.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py"}]} | 2,374 | 199 |
gh_patches_debug_6809 | rasdani/github-patches | git_diff | PyGithub__PyGithub-1641 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KeyError while trying to fetch an unbounded PaginatedList's count
Accessing the `totalCount` attribute on a `PaginatedList` returned from the `get_repos` method throws a KeyError
Trace
```py
repos = github_client.get_repos()
repos.totalCount
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-42-68d28c2d7948> in <module>
----> 1 repos.totalCount
e:\software\python36\lib\site-packages\github\PaginatedList.py in totalCount(self)
164 links = self.__parseLinkHeader(headers)
165 lastUrl = links.get("last")
--> 166 self.__totalCount = int(parse_qs(lastUrl)["page"][0])
167 return self.__totalCount
168
KeyError: 'page'
```
</issue>
<code>
[start of github/PaginatedList.py]
1 # -*- coding: utf-8 -*-
2
3 ############################ Copyrights and license ############################
4 # #
5 # Copyright 2012 Vincent Jacques <[email protected]> #
6 # Copyright 2012 Zearin <[email protected]> #
7 # Copyright 2013 AKFish <[email protected]> #
8 # Copyright 2013 Bill Mill <[email protected]> #
9 # Copyright 2013 Vincent Jacques <[email protected]> #
10 # Copyright 2013 davidbrai <[email protected]> #
11 # Copyright 2014 Thialfihar <[email protected]> #
12 # Copyright 2014 Vincent Jacques <[email protected]> #
13 # Copyright 2015 Dan Vanderkam <[email protected]> #
14 # Copyright 2015 Eliot Walker <[email protected]> #
15 # Copyright 2016 Peter Buckley <[email protected]> #
16 # Copyright 2017 Jannis Gebauer <[email protected]> #
17 # Copyright 2018 Gilad Shefer <[email protected]> #
18 # Copyright 2018 Joel Koglin <[email protected]> #
19 # Copyright 2018 Wan Liuyang <[email protected]> #
20 # Copyright 2018 sfdye <[email protected]> #
21 # #
22 # This file is part of PyGithub. #
23 # http://pygithub.readthedocs.io/ #
24 # #
25 # PyGithub is free software: you can redistribute it and/or modify it under #
26 # the terms of the GNU Lesser General Public License as published by the Free #
27 # Software Foundation, either version 3 of the License, or (at your option) #
28 # any later version. #
29 # #
30 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
31 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
32 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
33 # details. #
34 # #
35 # You should have received a copy of the GNU Lesser General Public License #
36 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
37 # #
38 ################################################################################
39
40 from urllib.parse import parse_qs
41
42
43 class PaginatedListBase:
44 def __init__(self):
45 self.__elements = list()
46
47 def __getitem__(self, index):
48 assert isinstance(index, (int, slice))
49 if isinstance(index, int):
50 self.__fetchToIndex(index)
51 return self.__elements[index]
52 else:
53 return self._Slice(self, index)
54
55 def __iter__(self):
56 for element in self.__elements:
57 yield element
58 while self._couldGrow():
59 newElements = self._grow()
60 for element in newElements:
61 yield element
62
63 def _isBiggerThan(self, index):
64 return len(self.__elements) > index or self._couldGrow()
65
66 def __fetchToIndex(self, index):
67 while len(self.__elements) <= index and self._couldGrow():
68 self._grow()
69
70 def _grow(self):
71 newElements = self._fetchNextPage()
72 self.__elements += newElements
73 return newElements
74
75 class _Slice:
76 def __init__(self, theList, theSlice):
77 self.__list = theList
78 self.__start = theSlice.start or 0
79 self.__stop = theSlice.stop
80 self.__step = theSlice.step or 1
81
82 def __iter__(self):
83 index = self.__start
84 while not self.__finished(index):
85 if self.__list._isBiggerThan(index):
86 yield self.__list[index]
87 index += self.__step
88 else:
89 return
90
91 def __finished(self, index):
92 return self.__stop is not None and index >= self.__stop
93
94
95 class PaginatedList(PaginatedListBase):
96 """
97 This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_.
98
99 You can simply enumerate through instances of this class::
100
101 for repo in user.get_repos():
102 print(repo.name)
103
104 If you want to know the total number of items in the list::
105
106 print(user.get_repos().totalCount)
107
108 You can also index them or take slices::
109
110 second_repo = user.get_repos()[1]
111 first_repos = user.get_repos()[:10]
112
113 If you want to iterate in reversed order, just do::
114
115 for repo in user.get_repos().reversed:
116 print(repo.name)
117
118 And if you really need it, you can explicitly access a specific page::
119
120 some_repos = user.get_repos().get_page(0)
121 some_other_repos = user.get_repos().get_page(3)
122 """
123
124 def __init__(
125 self,
126 contentClass,
127 requester,
128 firstUrl,
129 firstParams,
130 headers=None,
131 list_item="items",
132 ):
133 super().__init__()
134 self.__requester = requester
135 self.__contentClass = contentClass
136 self.__firstUrl = firstUrl
137 self.__firstParams = firstParams or ()
138 self.__nextUrl = firstUrl
139 self.__nextParams = firstParams or {}
140 self.__headers = headers
141 self.__list_item = list_item
142 if self.__requester.per_page != 30:
143 self.__nextParams["per_page"] = self.__requester.per_page
144 self._reversed = False
145 self.__totalCount = None
146
147 @property
148 def totalCount(self):
149 if not self.__totalCount:
150 params = {} if self.__nextParams is None else self.__nextParams.copy()
151 # set per_page = 1 so the totalCount is just the number of pages
152 params.update({"per_page": 1})
153 headers, data = self.__requester.requestJsonAndCheck(
154 "GET", self.__firstUrl, parameters=params, headers=self.__headers
155 )
156 if "link" not in headers:
157 if data and "total_count" in data:
158 self.__totalCount = data["total_count"]
159 elif data:
160 self.__totalCount = len(data)
161 else:
162 self.__totalCount = 0
163 else:
164 links = self.__parseLinkHeader(headers)
165 lastUrl = links.get("last")
166 self.__totalCount = int(parse_qs(lastUrl)["page"][0])
167 return self.__totalCount
168
169 def _getLastPageUrl(self):
170 headers, data = self.__requester.requestJsonAndCheck(
171 "GET", self.__firstUrl, parameters=self.__nextParams, headers=self.__headers
172 )
173 links = self.__parseLinkHeader(headers)
174 lastUrl = links.get("last")
175 return lastUrl
176
177 @property
178 def reversed(self):
179 r = PaginatedList(
180 self.__contentClass,
181 self.__requester,
182 self.__firstUrl,
183 self.__firstParams,
184 self.__headers,
185 self.__list_item,
186 )
187 r.__reverse()
188 return r
189
190 def __reverse(self):
191 self._reversed = True
192 lastUrl = self._getLastPageUrl()
193 if lastUrl:
194 self.__nextUrl = lastUrl
195
196 def _couldGrow(self):
197 return self.__nextUrl is not None
198
199 def _fetchNextPage(self):
200 headers, data = self.__requester.requestJsonAndCheck(
201 "GET", self.__nextUrl, parameters=self.__nextParams, headers=self.__headers
202 )
203 data = data if data else []
204
205 self.__nextUrl = None
206 if len(data) > 0:
207 links = self.__parseLinkHeader(headers)
208 if self._reversed:
209 if "prev" in links:
210 self.__nextUrl = links["prev"]
211 elif "next" in links:
212 self.__nextUrl = links["next"]
213 self.__nextParams = None
214
215 if self.__list_item in data:
216 self.__totalCount = data.get("total_count")
217 data = data[self.__list_item]
218
219 content = [
220 self.__contentClass(self.__requester, headers, element, completed=False)
221 for element in data
222 if element is not None
223 ]
224 if self._reversed:
225 return content[::-1]
226 return content
227
228 def __parseLinkHeader(self, headers):
229 links = {}
230 if "link" in headers:
231 linkHeaders = headers["link"].split(", ")
232 for linkHeader in linkHeaders:
233 url, rel, *rest = linkHeader.split("; ")
234 url = url[1:-1]
235 rel = rel[5:-1]
236 links[rel] = url
237 return links
238
239 def get_page(self, page):
240 params = dict(self.__firstParams)
241 if page != 0:
242 params["page"] = page + 1
243 if self.__requester.per_page != 30:
244 params["per_page"] = self.__requester.per_page
245 headers, data = self.__requester.requestJsonAndCheck(
246 "GET", self.__firstUrl, parameters=params, headers=self.__headers
247 )
248
249 if self.__list_item in data:
250 self.__totalCount = data.get("total_count")
251 data = data[self.__list_item]
252
253 return [
254 self.__contentClass(self.__requester, headers, element, completed=False)
255 for element in data
256 ]
257
[end of github/PaginatedList.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/github/PaginatedList.py b/github/PaginatedList.py
--- a/github/PaginatedList.py
+++ b/github/PaginatedList.py
@@ -163,7 +163,10 @@
else:
links = self.__parseLinkHeader(headers)
lastUrl = links.get("last")
- self.__totalCount = int(parse_qs(lastUrl)["page"][0])
+ if lastUrl:
+ self.__totalCount = int(parse_qs(lastUrl)["page"][0])
+ else:
+ self.__totalCount = 0
return self.__totalCount
def _getLastPageUrl(self):
| {"golden_diff": "diff --git a/github/PaginatedList.py b/github/PaginatedList.py\n--- a/github/PaginatedList.py\n+++ b/github/PaginatedList.py\n@@ -163,7 +163,10 @@\n else:\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n- self.__totalCount = int(parse_qs(lastUrl)[\"page\"][0])\n+ if lastUrl:\n+ self.__totalCount = int(parse_qs(lastUrl)[\"page\"][0])\n+ else:\n+ self.__totalCount = 0\n return self.__totalCount\n \n def _getLastPageUrl(self):\n", "issue": "KeyError while trying to fetch an unbounded PaginatedList's count \nAccessing the `totalCount` attribute on a `PaginatedList` returned from the `get_repos` method throws a KeyError\r\n\r\nTrace\r\n```py\r\nrepos = github_client.get_repos()\r\n\r\nrepos.totalCount\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-42-68d28c2d7948> in <module>\r\n----> 1 repos.totalCount\r\n\r\ne:\\software\\python36\\lib\\site-packages\\github\\PaginatedList.py in totalCount(self)\r\n 164 links = self.__parseLinkHeader(headers)\r\n 165 lastUrl = links.get(\"last\")\r\n--> 166 self.__totalCount = int(parse_qs(lastUrl)[\"page\"][0])\r\n 167 return self.__totalCount\r\n 168 \r\n\r\nKeyError: 'page'\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Bill Mill <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2013 davidbrai <[email protected]> #\n# Copyright 2014 Thialfihar <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Dan Vanderkam <[email protected]> #\n# Copyright 2015 Eliot Walker <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2018 Gilad Shefer <[email protected]> #\n# Copyright 2018 Joel Koglin <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nfrom urllib.parse import parse_qs\n\n\nclass PaginatedListBase:\n def __init__(self):\n self.__elements = list()\n\n def __getitem__(self, index):\n assert isinstance(index, (int, slice))\n if isinstance(index, int):\n self.__fetchToIndex(index)\n return self.__elements[index]\n else:\n return self._Slice(self, index)\n\n def __iter__(self):\n for element in self.__elements:\n yield element\n while self._couldGrow():\n newElements = self._grow()\n for element in newElements:\n yield element\n\n def _isBiggerThan(self, index):\n return len(self.__elements) > index or self._couldGrow()\n\n def __fetchToIndex(self, index):\n while len(self.__elements) <= index and self._couldGrow():\n self._grow()\n\n def _grow(self):\n newElements = self._fetchNextPage()\n self.__elements += newElements\n return newElements\n\n class _Slice:\n def __init__(self, theList, theSlice):\n self.__list = theList\n self.__start = theSlice.start or 0\n self.__stop = theSlice.stop\n self.__step = theSlice.step or 1\n\n def __iter__(self):\n index = self.__start\n while not self.__finished(index):\n if self.__list._isBiggerThan(index):\n yield self.__list[index]\n index += self.__step\n else:\n return\n\n def __finished(self, index):\n return self.__stop is not None and index >= self.__stop\n\n\nclass PaginatedList(PaginatedListBase):\n \"\"\"\n This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_.\n\n You can simply enumerate through instances of this class::\n\n for repo in user.get_repos():\n print(repo.name)\n\n If you want to know the total number of items in the list::\n\n print(user.get_repos().totalCount)\n\n You can also index them or take slices::\n\n second_repo = user.get_repos()[1]\n first_repos = user.get_repos()[:10]\n\n If you want to iterate in reversed order, just do::\n\n for repo in user.get_repos().reversed:\n print(repo.name)\n\n And if you really need it, you can explicitly access a specific page::\n\n some_repos = user.get_repos().get_page(0)\n some_other_repos = user.get_repos().get_page(3)\n \"\"\"\n\n def __init__(\n self,\n contentClass,\n requester,\n firstUrl,\n firstParams,\n headers=None,\n list_item=\"items\",\n ):\n super().__init__()\n self.__requester = requester\n self.__contentClass = contentClass\n self.__firstUrl = firstUrl\n self.__firstParams = firstParams or ()\n self.__nextUrl = firstUrl\n self.__nextParams = firstParams or {}\n self.__headers = headers\n self.__list_item = list_item\n if self.__requester.per_page != 30:\n self.__nextParams[\"per_page\"] = self.__requester.per_page\n self._reversed = False\n self.__totalCount = None\n\n @property\n def totalCount(self):\n if not self.__totalCount:\n params = {} if self.__nextParams is None else self.__nextParams.copy()\n # set per_page = 1 so the totalCount is just the number of pages\n params.update({\"per_page\": 1})\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\", self.__firstUrl, parameters=params, headers=self.__headers\n )\n if \"link\" not in headers:\n if data and \"total_count\" in data:\n self.__totalCount = data[\"total_count\"]\n elif data:\n self.__totalCount = len(data)\n else:\n self.__totalCount = 0\n else:\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n self.__totalCount = int(parse_qs(lastUrl)[\"page\"][0])\n return self.__totalCount\n\n def _getLastPageUrl(self):\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\", self.__firstUrl, parameters=self.__nextParams, headers=self.__headers\n )\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n return lastUrl\n\n @property\n def reversed(self):\n r = PaginatedList(\n self.__contentClass,\n self.__requester,\n self.__firstUrl,\n self.__firstParams,\n self.__headers,\n self.__list_item,\n )\n r.__reverse()\n return r\n\n def __reverse(self):\n self._reversed = True\n lastUrl = self._getLastPageUrl()\n if lastUrl:\n self.__nextUrl = lastUrl\n\n def _couldGrow(self):\n return self.__nextUrl is not None\n\n def _fetchNextPage(self):\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\", self.__nextUrl, parameters=self.__nextParams, headers=self.__headers\n )\n data = data if data else []\n\n self.__nextUrl = None\n if len(data) > 0:\n links = self.__parseLinkHeader(headers)\n if self._reversed:\n if \"prev\" in links:\n self.__nextUrl = links[\"prev\"]\n elif \"next\" in links:\n self.__nextUrl = links[\"next\"]\n self.__nextParams = None\n\n if self.__list_item in data:\n self.__totalCount = data.get(\"total_count\")\n data = data[self.__list_item]\n\n content = [\n self.__contentClass(self.__requester, headers, element, completed=False)\n for element in data\n if element is not None\n ]\n if self._reversed:\n return content[::-1]\n return content\n\n def __parseLinkHeader(self, headers):\n links = {}\n if \"link\" in headers:\n linkHeaders = headers[\"link\"].split(\", \")\n for linkHeader in linkHeaders:\n url, rel, *rest = linkHeader.split(\"; \")\n url = url[1:-1]\n rel = rel[5:-1]\n links[rel] = url\n return links\n\n def get_page(self, page):\n params = dict(self.__firstParams)\n if page != 0:\n params[\"page\"] = page + 1\n if self.__requester.per_page != 30:\n params[\"per_page\"] = self.__requester.per_page\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\", self.__firstUrl, parameters=params, headers=self.__headers\n )\n\n if self.__list_item in data:\n self.__totalCount = data.get(\"total_count\")\n data = data[self.__list_item]\n\n return [\n self.__contentClass(self.__requester, headers, element, completed=False)\n for element in data\n ]\n", "path": "github/PaginatedList.py"}]} | 3,511 | 138 |
gh_patches_debug_4453 | rasdani/github-patches | git_diff | translate__pootle-4350 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Editor templates broken in non-debug mode
Since switching to Django 1.8 in master, the [scripts section of the editor templates](https://github.com/translate/pootle/blob/master/pootle/templates/editor/_scripts.html) doesn't render when `DEBUG = False`.
I might be doing something wrong, but I tried removing any template customizations we have, also clearing out caches, trying different browsers, always with the same result. I can reproduce this locally and in our staging server.
</issue>
<code>
[start of pootle/apps/pootle_store/templatetags/store_tags.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 import re
11
12 from diff_match_patch import diff_match_patch
13 from translate.misc.multistring import multistring
14 from translate.storage.placeables import general
15
16 from django import template
17 from django.core.exceptions import ObjectDoesNotExist
18 from django.template.defaultfilters import stringfilter
19 from django.template.loader import get_template
20 from django.utils.safestring import mark_safe
21 from django.utils.translation import ugettext as _
22
23 from pootle_store.fields import list_empty
24
25
26 register = template.Library()
27
28
29 IMAGE_URL_RE = re.compile("(https?://[^\s]+\.(png|jpe?g|gif))", re.IGNORECASE)
30
31
32 @register.filter
33 def image_urls(text):
34 """Return a list of image URLs extracted from `text`."""
35 return map(lambda x: x[0], IMAGE_URL_RE.findall(text))
36
37
38 ESCAPE_RE = re.compile('<[^<]*?>|\\\\|\r\n|[\r\n\t&<>]')
39
40
41 def fancy_escape(text):
42 """Replace special chars with entities, and highlight XML tags and
43 whitespaces.
44 """
45 def replace(match):
46 escape_highlight = ('<span class="highlight-escape '
47 'js-editor-copytext">%s</span>')
48 submap = {
49 '\r\n': (escape_highlight % '\\r\\n') + '<br/>\n',
50 '\r': (escape_highlight % '\\r') + '<br/>\n',
51 '\n': (escape_highlight % '\\n') + '<br/>\n',
52 '\t': (escape_highlight % '\\t'),
53 '&': '&',
54 '<': '<',
55 '>': '>',
56 '\\': (escape_highlight % '\\\\'),
57 }
58 try:
59 return submap[match.group()]
60 except KeyError:
61 html_highlight = ('<span class="highlight-html '
62 'js-editor-copytext"><%s></span>')
63 return html_highlight % fancy_escape(match.group()[1:-1])
64
65 return ESCAPE_RE.sub(replace, text)
66
67
68 WHITESPACE_RE = re.compile('^ +| +$|[\r\n\t] +| {2,}')
69
70
71 def fancy_spaces(text):
72 """Highlight spaces to make them easily visible."""
73 def replace(match):
74 fancy_space = '<span class="translation-space"> </span>'
75 if match.group().startswith(' '):
76 return fancy_space * len(match.group())
77 return match.group()[0] + fancy_space * (len(match.group()) - 1)
78 return WHITESPACE_RE.sub(replace, text)
79
80
81 PUNCTUATION_RE = general.PunctuationPlaceable().regex
82
83
84 def fancy_punctuation_chars(text):
85 """Wrap punctuation chars found in the ``text`` around tags."""
86 def replace(match):
87 fancy_special_char = ('<span class="highlight-punctuation '
88 'js-editor-copytext">%s</span>')
89 return fancy_special_char % match.group()
90
91 return PUNCTUATION_RE.sub(replace, text)
92
93
94 @register.filter
95 @stringfilter
96 def fancy_highlight(text):
97 return mark_safe(fancy_punctuation_chars(fancy_spaces(fancy_escape(text))))
98
99
100 def call_highlight(old, new):
101 """Calls diff highlighting code only if the target is set.
102 Otherwise, highlight as a normal unit.
103 """
104 if isinstance(old, multistring):
105 old_value = old.strings
106 else:
107 old_value = old
108
109 if list_empty(old_value):
110 return fancy_highlight(new)
111
112 return highlight_diffs(old, new)
113
114
115 differencer = diff_match_patch()
116
117
118 def highlight_diffs(old, new):
119 """Highlight the differences between old and new."""
120
121 textdiff = u"" # to store the final result
122 removed = u"" # the removed text that we might still want to add
123 diff = differencer.diff_main(old, new)
124 differencer.diff_cleanupSemantic(diff)
125 for op, text in diff:
126 if op == 0: # equality
127 if removed:
128 textdiff += '<span class="diff-delete">%s</span>' % \
129 fancy_escape(removed)
130 removed = u""
131 textdiff += fancy_escape(text)
132 elif op == 1: # insertion
133 if removed:
134 # this is part of a substitution, not a plain insertion. We
135 # will format this differently.
136 textdiff += '<span class="diff-replace">%s</span>' % \
137 fancy_escape(text)
138 removed = u""
139 else:
140 textdiff += '<span class="diff-insert">%s</span>' % \
141 fancy_escape(text)
142 elif op == -1: # deletion
143 removed = text
144 if removed:
145 textdiff += '<span class="diff-delete">%s</span>' % \
146 fancy_escape(removed)
147 return mark_safe(textdiff)
148
149
150 @register.filter('pluralize_source')
151 def pluralize_source(unit):
152 if not unit.hasplural():
153 return [(0, unit.source, None)]
154
155 count = len(unit.source.strings)
156 if count == 1:
157 return [(0, unit.source.strings[0], "%s+%s" % (_('Singular'),
158 _('Plural')))]
159
160 if count == 2:
161 return [(0, unit.source.strings[0], _('Singular')),
162 (1, unit.source.strings[1], _('Plural'))]
163
164 forms = []
165 for i, source in enumerate(unit.source.strings):
166 forms.append((i, source, _('Plural Form %d', i)))
167 return forms
168
169
170 @register.filter('pluralize_target')
171 def pluralize_target(unit, nplurals=None):
172 if not unit.hasplural():
173 return [(0, unit.target, None)]
174
175 if nplurals is None:
176 try:
177 nplurals = unit.store.translation_project.language.nplurals
178 except ObjectDoesNotExist:
179 pass
180 forms = []
181 if nplurals is None:
182 for i, target in enumerate(unit.target.strings):
183 forms.append((i, target, _('Plural Form %d', i)))
184 else:
185 for i in range(nplurals):
186 try:
187 target = unit.target.strings[i]
188 except IndexError:
189 target = ''
190 forms.append((i, target, _('Plural Form %d', i)))
191
192 return forms
193
194
195 @register.filter('pluralize_diff_sugg')
196 def pluralize_diff_sugg(sugg):
197 unit = sugg.unit
198 if not unit.hasplural():
199 return [
200 (0, sugg.target, call_highlight(unit.target, sugg.target), None)
201 ]
202
203 forms = []
204 for i, target in enumerate(sugg.target.strings):
205 if i < len(unit.target.strings):
206 sugg_text = unit.target.strings[i]
207 else:
208 sugg_text = ''
209
210 forms.append((
211 i, target, call_highlight(sugg_text, target),
212 _('Plural Form %d', i)
213 ))
214
215 return forms
216
217
218 @register.tag(name="include_raw")
219 def do_include_raw(parser, token):
220 """
221 Performs a template include without parsing the context, just dumps
222 the template in.
223 Source: http://djangosnippets.org/snippets/1684/
224 """
225 bits = token.split_contents()
226 if len(bits) != 2:
227 raise template.TemplateSyntaxError(
228 "%r tag takes one argument: the name of the template "
229 "to be included" % bits[0]
230 )
231
232 template_name = bits[1]
233 if (template_name[0] in ('"', "'") and
234 template_name[-1] == template_name[0]):
235 template_name = template_name[1:-1]
236
237 source, path = get_template(
238 template_name).origin.loader(template_name)
239
240 return template.base.TextNode(source)
241
[end of pootle/apps/pootle_store/templatetags/store_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_store/templatetags/store_tags.py b/pootle/apps/pootle_store/templatetags/store_tags.py
--- a/pootle/apps/pootle_store/templatetags/store_tags.py
+++ b/pootle/apps/pootle_store/templatetags/store_tags.py
@@ -234,7 +234,8 @@
template_name[-1] == template_name[0]):
template_name = template_name[1:-1]
- source, path = get_template(
- template_name).origin.loader(template_name)
-
- return template.base.TextNode(source)
+ return template.base.TextNode(
+ u"\n".join(
+ [x.s
+ for x
+ in get_template(template_name).template.nodelist]))
| {"golden_diff": "diff --git a/pootle/apps/pootle_store/templatetags/store_tags.py b/pootle/apps/pootle_store/templatetags/store_tags.py\n--- a/pootle/apps/pootle_store/templatetags/store_tags.py\n+++ b/pootle/apps/pootle_store/templatetags/store_tags.py\n@@ -234,7 +234,8 @@\n template_name[-1] == template_name[0]):\n template_name = template_name[1:-1]\n \n- source, path = get_template(\n- template_name).origin.loader(template_name)\n-\n- return template.base.TextNode(source)\n+ return template.base.TextNode(\n+ u\"\\n\".join(\n+ [x.s\n+ for x\n+ in get_template(template_name).template.nodelist]))\n", "issue": "Editor templates broken in non-debug mode\nSince switching to Django 1.8 in master, the [scripts section of the editor templates](https://github.com/translate/pootle/blob/master/pootle/templates/editor/_scripts.html) doesn't render when `DEBUG = False`.\n\nI might be doing something wrong, but I tried removing any template customizations we have, also clearing out caches, trying different browsers, always with the same result. I can reproduce this locally and in our staging server.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport re\n\nfrom diff_match_patch import diff_match_patch\nfrom translate.misc.multistring import multistring\nfrom translate.storage.placeables import general\n\nfrom django import template\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.template.defaultfilters import stringfilter\nfrom django.template.loader import get_template\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext as _\n\nfrom pootle_store.fields import list_empty\n\n\nregister = template.Library()\n\n\nIMAGE_URL_RE = re.compile(\"(https?://[^\\s]+\\.(png|jpe?g|gif))\", re.IGNORECASE)\n\n\[email protected]\ndef image_urls(text):\n \"\"\"Return a list of image URLs extracted from `text`.\"\"\"\n return map(lambda x: x[0], IMAGE_URL_RE.findall(text))\n\n\nESCAPE_RE = re.compile('<[^<]*?>|\\\\\\\\|\\r\\n|[\\r\\n\\t&<>]')\n\n\ndef fancy_escape(text):\n \"\"\"Replace special chars with entities, and highlight XML tags and\n whitespaces.\n \"\"\"\n def replace(match):\n escape_highlight = ('<span class=\"highlight-escape '\n 'js-editor-copytext\">%s</span>')\n submap = {\n '\\r\\n': (escape_highlight % '\\\\r\\\\n') + '<br/>\\n',\n '\\r': (escape_highlight % '\\\\r') + '<br/>\\n',\n '\\n': (escape_highlight % '\\\\n') + '<br/>\\n',\n '\\t': (escape_highlight % '\\\\t'),\n '&': '&',\n '<': '<',\n '>': '>',\n '\\\\': (escape_highlight % '\\\\\\\\'),\n }\n try:\n return submap[match.group()]\n except KeyError:\n html_highlight = ('<span class=\"highlight-html '\n 'js-editor-copytext\"><%s></span>')\n return html_highlight % fancy_escape(match.group()[1:-1])\n\n return ESCAPE_RE.sub(replace, text)\n\n\nWHITESPACE_RE = re.compile('^ +| +$|[\\r\\n\\t] +| {2,}')\n\n\ndef fancy_spaces(text):\n \"\"\"Highlight spaces to make them easily visible.\"\"\"\n def replace(match):\n fancy_space = '<span class=\"translation-space\"> </span>'\n if match.group().startswith(' '):\n return fancy_space * len(match.group())\n return match.group()[0] + fancy_space * (len(match.group()) - 1)\n return WHITESPACE_RE.sub(replace, text)\n\n\nPUNCTUATION_RE = general.PunctuationPlaceable().regex\n\n\ndef fancy_punctuation_chars(text):\n \"\"\"Wrap punctuation chars found in the ``text`` around tags.\"\"\"\n def replace(match):\n fancy_special_char = ('<span class=\"highlight-punctuation '\n 'js-editor-copytext\">%s</span>')\n return fancy_special_char % match.group()\n\n return PUNCTUATION_RE.sub(replace, text)\n\n\[email protected]\n@stringfilter\ndef fancy_highlight(text):\n return mark_safe(fancy_punctuation_chars(fancy_spaces(fancy_escape(text))))\n\n\ndef call_highlight(old, new):\n \"\"\"Calls diff highlighting code only if the target is set.\n Otherwise, highlight as a normal unit.\n \"\"\"\n if isinstance(old, multistring):\n old_value = old.strings\n else:\n old_value = old\n\n if list_empty(old_value):\n return fancy_highlight(new)\n\n return highlight_diffs(old, new)\n\n\ndifferencer = diff_match_patch()\n\n\ndef highlight_diffs(old, new):\n \"\"\"Highlight the differences between old and new.\"\"\"\n\n textdiff = u\"\" # to store the final result\n removed = u\"\" # the removed text that we might still want to add\n diff = differencer.diff_main(old, new)\n differencer.diff_cleanupSemantic(diff)\n for op, text in diff:\n if op == 0: # equality\n if removed:\n textdiff += '<span class=\"diff-delete\">%s</span>' % \\\n fancy_escape(removed)\n removed = u\"\"\n textdiff += fancy_escape(text)\n elif op == 1: # insertion\n if removed:\n # this is part of a substitution, not a plain insertion. We\n # will format this differently.\n textdiff += '<span class=\"diff-replace\">%s</span>' % \\\n fancy_escape(text)\n removed = u\"\"\n else:\n textdiff += '<span class=\"diff-insert\">%s</span>' % \\\n fancy_escape(text)\n elif op == -1: # deletion\n removed = text\n if removed:\n textdiff += '<span class=\"diff-delete\">%s</span>' % \\\n fancy_escape(removed)\n return mark_safe(textdiff)\n\n\[email protected]('pluralize_source')\ndef pluralize_source(unit):\n if not unit.hasplural():\n return [(0, unit.source, None)]\n\n count = len(unit.source.strings)\n if count == 1:\n return [(0, unit.source.strings[0], \"%s+%s\" % (_('Singular'),\n _('Plural')))]\n\n if count == 2:\n return [(0, unit.source.strings[0], _('Singular')),\n (1, unit.source.strings[1], _('Plural'))]\n\n forms = []\n for i, source in enumerate(unit.source.strings):\n forms.append((i, source, _('Plural Form %d', i)))\n return forms\n\n\[email protected]('pluralize_target')\ndef pluralize_target(unit, nplurals=None):\n if not unit.hasplural():\n return [(0, unit.target, None)]\n\n if nplurals is None:\n try:\n nplurals = unit.store.translation_project.language.nplurals\n except ObjectDoesNotExist:\n pass\n forms = []\n if nplurals is None:\n for i, target in enumerate(unit.target.strings):\n forms.append((i, target, _('Plural Form %d', i)))\n else:\n for i in range(nplurals):\n try:\n target = unit.target.strings[i]\n except IndexError:\n target = ''\n forms.append((i, target, _('Plural Form %d', i)))\n\n return forms\n\n\[email protected]('pluralize_diff_sugg')\ndef pluralize_diff_sugg(sugg):\n unit = sugg.unit\n if not unit.hasplural():\n return [\n (0, sugg.target, call_highlight(unit.target, sugg.target), None)\n ]\n\n forms = []\n for i, target in enumerate(sugg.target.strings):\n if i < len(unit.target.strings):\n sugg_text = unit.target.strings[i]\n else:\n sugg_text = ''\n\n forms.append((\n i, target, call_highlight(sugg_text, target),\n _('Plural Form %d', i)\n ))\n\n return forms\n\n\[email protected](name=\"include_raw\")\ndef do_include_raw(parser, token):\n \"\"\"\n Performs a template include without parsing the context, just dumps\n the template in.\n Source: http://djangosnippets.org/snippets/1684/\n \"\"\"\n bits = token.split_contents()\n if len(bits) != 2:\n raise template.TemplateSyntaxError(\n \"%r tag takes one argument: the name of the template \"\n \"to be included\" % bits[0]\n )\n\n template_name = bits[1]\n if (template_name[0] in ('\"', \"'\") and\n template_name[-1] == template_name[0]):\n template_name = template_name[1:-1]\n\n source, path = get_template(\n template_name).origin.loader(template_name)\n\n return template.base.TextNode(source)\n", "path": "pootle/apps/pootle_store/templatetags/store_tags.py"}]} | 3,014 | 182 |
gh_patches_debug_21408 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1063 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
String misinterpreted as an int results in error on E2015
```
cfn-lint --version
cfn-lint 0.19.1
```
*Description of issue.*
The following template
```
Parameters:
CentralAccountId:
Default: 112233445566
MaxLength: 12
MinLength: 12
Type: String
```
result in the error:
```
E0002 Unknown exception while processing rule E2015: object of type 'int' has no len()
application-account-initial-setup.yaml:1:1
```
It is solved by putting quotes on the default value. However it is valid to not putting the quotes.
</issue>
<code>
[start of src/cfnlint/rules/parameters/Default.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import re
18 import six
19 from cfnlint import CloudFormationLintRule
20 from cfnlint import RuleMatch
21
22
23 class Default(CloudFormationLintRule):
24 """Check if Parameters are configured correctly"""
25 id = 'E2015'
26 shortdesc = 'Default value is within parameter constraints'
27 description = 'Making sure the parameters have a default value inside AllowedValues, MinValue, MaxValue, AllowedPattern'
28 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html'
29 tags = ['parameters']
30
31 def check_allowed_pattern(self, allowed_value, allowed_pattern, path):
32 """
33 Check allowed value against allowed pattern
34 """
35 message = 'Default should be allowed by AllowedPattern'
36 try:
37 if not re.match(allowed_pattern, str(allowed_value)):
38 return([RuleMatch(path, message)])
39 except re.error as ex:
40 self.logger.debug('Regex pattern "%s" isn\'t supported by Python: %s', allowed_pattern, ex)
41
42 return []
43
44 def check_min_value(self, allowed_value, min_value, path):
45 """
46 Check allowed value against min value
47 """
48 message = 'Default should be equal to or higher than MinValue'
49
50 if isinstance(allowed_value, six.integer_types) and isinstance(min_value, six.integer_types):
51 if allowed_value < min_value:
52 return([RuleMatch(path, message)])
53
54 return []
55
56 def check_max_value(self, allowed_value, max_value, path):
57 """
58 Check allowed value against max value
59 """
60 message = 'Default should be less than or equal to MaxValue'
61
62 if isinstance(allowed_value, six.integer_types) and isinstance(max_value, six.integer_types):
63 if allowed_value > max_value:
64 return([RuleMatch(path, message)])
65
66 return []
67
68 def check_allowed_values(self, allowed_value, allowed_values, path):
69 """
70 Check allowed value against allowed values
71 """
72 message = 'Default should be a value within AllowedValues'
73
74 if allowed_value not in allowed_values:
75 return([RuleMatch(path, message)])
76
77 return []
78
79 def check_min_length(self, allowed_value, min_length, path):
80 """
81 Check allowed value against MinLength
82 """
83 message = 'Default should have a length above or equal to MinLength'
84
85 if isinstance(min_length, six.integer_types):
86 if len(allowed_value) < min_length:
87 return([RuleMatch(path, message)])
88
89 return []
90
91 def check_max_length(self, allowed_value, max_length, path):
92 """
93 Check allowed value against MaxLength
94 """
95 message = 'Default should have a length below or equal to MaxLength'
96
97 if isinstance(max_length, six.integer_types):
98 if len(allowed_value) > max_length:
99 return([RuleMatch(path, message)])
100
101 return []
102
103 def match(self, cfn):
104 """Check CloudFormation Parameters"""
105
106 matches = []
107
108 for paramname, paramvalue in cfn.get_parameters().items():
109 default_value = paramvalue.get('Default')
110 if default_value is not None:
111 path = ['Parameters', paramname, 'Default']
112 allowed_pattern = paramvalue.get('AllowedPattern')
113 if allowed_pattern:
114 matches.extend(
115 self.check_allowed_pattern(
116 default_value, allowed_pattern, path
117 )
118 )
119 min_value = paramvalue.get('MinValue')
120 if min_value:
121 matches.extend(
122 self.check_min_value(
123 default_value, min_value, path
124 )
125 )
126 max_value = paramvalue.get('MaxValue')
127 if max_value is not None:
128 matches.extend(
129 self.check_max_value(
130 default_value, max_value, path
131 )
132 )
133 allowed_values = paramvalue.get('AllowedValues')
134 if allowed_values:
135 matches.extend(
136 self.check_allowed_values(
137 default_value, allowed_values, path
138 )
139 )
140 min_length = paramvalue.get('MinLength')
141 if min_length is not None:
142 matches.extend(
143 self.check_min_length(
144 default_value, min_length, path
145 )
146 )
147 max_length = paramvalue.get('MaxLength')
148 if max_length is not None:
149 matches.extend(
150 self.check_max_length(
151 default_value, max_length, path
152 )
153 )
154
155 return matches
156
[end of src/cfnlint/rules/parameters/Default.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/parameters/Default.py b/src/cfnlint/rules/parameters/Default.py
--- a/src/cfnlint/rules/parameters/Default.py
+++ b/src/cfnlint/rules/parameters/Default.py
@@ -82,8 +82,9 @@
"""
message = 'Default should have a length above or equal to MinLength'
+ value = allowed_value if isinstance(allowed_value, six.string_types) else str(allowed_value)
if isinstance(min_length, six.integer_types):
- if len(allowed_value) < min_length:
+ if len(value) < min_length:
return([RuleMatch(path, message)])
return []
@@ -94,8 +95,9 @@
"""
message = 'Default should have a length below or equal to MaxLength'
+ value = allowed_value if isinstance(allowed_value, six.string_types) else str(allowed_value)
if isinstance(max_length, six.integer_types):
- if len(allowed_value) > max_length:
+ if len(value) > max_length:
return([RuleMatch(path, message)])
return []
| {"golden_diff": "diff --git a/src/cfnlint/rules/parameters/Default.py b/src/cfnlint/rules/parameters/Default.py\n--- a/src/cfnlint/rules/parameters/Default.py\n+++ b/src/cfnlint/rules/parameters/Default.py\n@@ -82,8 +82,9 @@\n \"\"\"\n message = 'Default should have a length above or equal to MinLength'\n \n+ value = allowed_value if isinstance(allowed_value, six.string_types) else str(allowed_value)\n if isinstance(min_length, six.integer_types):\n- if len(allowed_value) < min_length:\n+ if len(value) < min_length:\n return([RuleMatch(path, message)])\n \n return []\n@@ -94,8 +95,9 @@\n \"\"\"\n message = 'Default should have a length below or equal to MaxLength'\n \n+ value = allowed_value if isinstance(allowed_value, six.string_types) else str(allowed_value)\n if isinstance(max_length, six.integer_types):\n- if len(allowed_value) > max_length:\n+ if len(value) > max_length:\n return([RuleMatch(path, message)])\n \n return []\n", "issue": "String misinterpreted as an int results in error on E2015\n```\r\ncfn-lint --version\r\ncfn-lint 0.19.1\r\n```\r\n\r\n*Description of issue.*\r\nThe following template\r\n```\r\nParameters:\r\n CentralAccountId:\r\n Default: 112233445566\r\n MaxLength: 12\r\n MinLength: 12\r\n Type: String\r\n```\r\nresult in the error:\r\n```\r\nE0002 Unknown exception while processing rule E2015: object of type 'int' has no len()\r\napplication-account-initial-setup.yaml:1:1\r\n```\r\n\r\nIt is solved by putting quotes on the default value. However it is valid to not putting the quotes.\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Default(CloudFormationLintRule):\n \"\"\"Check if Parameters are configured correctly\"\"\"\n id = 'E2015'\n shortdesc = 'Default value is within parameter constraints'\n description = 'Making sure the parameters have a default value inside AllowedValues, MinValue, MaxValue, AllowedPattern'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html'\n tags = ['parameters']\n\n def check_allowed_pattern(self, allowed_value, allowed_pattern, path):\n \"\"\"\n Check allowed value against allowed pattern\n \"\"\"\n message = 'Default should be allowed by AllowedPattern'\n try:\n if not re.match(allowed_pattern, str(allowed_value)):\n return([RuleMatch(path, message)])\n except re.error as ex:\n self.logger.debug('Regex pattern \"%s\" isn\\'t supported by Python: %s', allowed_pattern, ex)\n\n return []\n\n def check_min_value(self, allowed_value, min_value, path):\n \"\"\"\n Check allowed value against min value\n \"\"\"\n message = 'Default should be equal to or higher than MinValue'\n\n if isinstance(allowed_value, six.integer_types) and isinstance(min_value, six.integer_types):\n if allowed_value < min_value:\n return([RuleMatch(path, message)])\n\n return []\n\n def check_max_value(self, allowed_value, max_value, path):\n \"\"\"\n Check allowed value against max value\n \"\"\"\n message = 'Default should be less than or equal to MaxValue'\n\n if isinstance(allowed_value, six.integer_types) and isinstance(max_value, six.integer_types):\n if allowed_value > max_value:\n return([RuleMatch(path, message)])\n\n return []\n\n def check_allowed_values(self, allowed_value, allowed_values, path):\n \"\"\"\n Check allowed value against allowed values\n \"\"\"\n message = 'Default should be a value within AllowedValues'\n\n if allowed_value not in allowed_values:\n return([RuleMatch(path, message)])\n\n return []\n\n def check_min_length(self, allowed_value, min_length, path):\n \"\"\"\n Check allowed value against MinLength\n \"\"\"\n message = 'Default should have a length above or equal to MinLength'\n\n if isinstance(min_length, six.integer_types):\n if len(allowed_value) < min_length:\n return([RuleMatch(path, message)])\n\n return []\n\n def check_max_length(self, allowed_value, max_length, path):\n \"\"\"\n Check allowed value against MaxLength\n \"\"\"\n message = 'Default should have a length below or equal to MaxLength'\n\n if isinstance(max_length, six.integer_types):\n if len(allowed_value) > max_length:\n return([RuleMatch(path, message)])\n\n return []\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n\n matches = []\n\n for paramname, paramvalue in cfn.get_parameters().items():\n default_value = paramvalue.get('Default')\n if default_value is not None:\n path = ['Parameters', paramname, 'Default']\n allowed_pattern = paramvalue.get('AllowedPattern')\n if allowed_pattern:\n matches.extend(\n self.check_allowed_pattern(\n default_value, allowed_pattern, path\n )\n )\n min_value = paramvalue.get('MinValue')\n if min_value:\n matches.extend(\n self.check_min_value(\n default_value, min_value, path\n )\n )\n max_value = paramvalue.get('MaxValue')\n if max_value is not None:\n matches.extend(\n self.check_max_value(\n default_value, max_value, path\n )\n )\n allowed_values = paramvalue.get('AllowedValues')\n if allowed_values:\n matches.extend(\n self.check_allowed_values(\n default_value, allowed_values, path\n )\n )\n min_length = paramvalue.get('MinLength')\n if min_length is not None:\n matches.extend(\n self.check_min_length(\n default_value, min_length, path\n )\n )\n max_length = paramvalue.get('MaxLength')\n if max_length is not None:\n matches.extend(\n self.check_max_length(\n default_value, max_length, path\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/parameters/Default.py"}]} | 2,204 | 248 |
gh_patches_debug_29460 | rasdani/github-patches | git_diff | aimhubio__aim-2671 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Extend `aim.ext.tensorboard_tracker.run.Run` to also allow system stats, parameters, and stdout capture.
## 🚀 Feature
Allow capturing of system parameters and terminal logs by the `aim.ext.tensorboard_tracker.run.Run`, as this is great feature shouldn't be only available to the default `Run`.
### Motivation
The new feature of allowing continuous syncing from `tensorboard` files to `aim` is really nice, but because `aim.ext.tensorboard_tracker.run.Run` inherits from `BasicRun` rather than `Run`, it misses out on the ability to log the standard out, system stats and system parameters. Since `aim.ext.tensorboard_tracker.run.Run` should be a possible replacement for `Run`, I don't see a reason why this behaviour shouldn't be allowed.
It has been highlighted in Discord by @mihran113:
> The reason behind inheriting from basic run is exactly to avoid terminal log tracking and system param tracking actually, cause we don’t want to add anything else rather than what’s tracked via tensorboard. Cause there can be times when live tracking is done from a different process, and catching that process’s terminal logs and system params won’t make any sense I guess. If you’re interested you can open a PR to address those points, cause adding the possibility to enable those won’t make any harm as well.
so I believe the *default* arguments should *not* do this extra logging, but still optionally allow this behaviour.
### Pitch
Have `aim.ext.tensorboard_tracker.run.Run` inherit from `aim.sdk.run.Run` instead of `aim.sdk.run.BasicRun`, so that it can utilise it's extra capabilities.
### Alternatives
Instead of inheritance we could change the system resource tracking be a mixin?
Extend `aim.ext.tensorboard_tracker.run.Run` to also allow system stats, parameters, and stdout capture.
## 🚀 Feature
Allow capturing of system parameters and terminal logs by the `aim.ext.tensorboard_tracker.run.Run`, as this is great feature shouldn't be only available to the default `Run`.
### Motivation
The new feature of allowing continuous syncing from `tensorboard` files to `aim` is really nice, but because `aim.ext.tensorboard_tracker.run.Run` inherits from `BasicRun` rather than `Run`, it misses out on the ability to log the standard out, system stats and system parameters. Since `aim.ext.tensorboard_tracker.run.Run` should be a possible replacement for `Run`, I don't see a reason why this behaviour shouldn't be allowed.
It has been highlighted in Discord by @mihran113:
> The reason behind inheriting from basic run is exactly to avoid terminal log tracking and system param tracking actually, cause we don’t want to add anything else rather than what’s tracked via tensorboard. Cause there can be times when live tracking is done from a different process, and catching that process’s terminal logs and system params won’t make any sense I guess. If you’re interested you can open a PR to address those points, cause adding the possibility to enable those won’t make any harm as well.
so I believe the *default* arguments should *not* do this extra logging, but still optionally allow this behaviour.
### Pitch
Have `aim.ext.tensorboard_tracker.run.Run` inherit from `aim.sdk.run.Run` instead of `aim.sdk.run.BasicRun`, so that it can utilise it's extra capabilities.
### Alternatives
Instead of inheritance we could change the system resource tracking be a mixin?
</issue>
<code>
[start of aim/ext/tensorboard_tracker/run.py]
1 from typing import Optional, Union
2
3 from aim.sdk.run import BasicRun
4 from aim.ext.tensorboard_tracker.tracker import TensorboardTracker
5
6 from typing import TYPE_CHECKING
7
8 if TYPE_CHECKING:
9 from aim.sdk.repo import Repo
10
11
12 class Run(BasicRun):
13 def __init__(self, run_hash: Optional[str] = None, *,
14 sync_tensorboard_log_dir: str,
15 repo: Optional[Union[str, 'Repo']] = None,
16 experiment: Optional[str] = None,
17 force_resume: Optional[bool] = False,
18 ):
19 super().__init__(run_hash, repo=repo, read_only=False, experiment=experiment, force_resume=force_resume)
20 self['tb_log_directory'] = sync_tensorboard_log_dir
21 self._tensorboard_tracker = TensorboardTracker(self._tracker, sync_tensorboard_log_dir)
22 self._tensorboard_tracker.start()
23 self._resources.add_extra_resource(self._tensorboard_tracker)
24
[end of aim/ext/tensorboard_tracker/run.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/aim/ext/tensorboard_tracker/run.py b/aim/ext/tensorboard_tracker/run.py
--- a/aim/ext/tensorboard_tracker/run.py
+++ b/aim/ext/tensorboard_tracker/run.py
@@ -1,6 +1,6 @@
from typing import Optional, Union
-from aim.sdk.run import BasicRun
+from aim.sdk.run import Run as SdkRun
from aim.ext.tensorboard_tracker.tracker import TensorboardTracker
from typing import TYPE_CHECKING
@@ -9,14 +9,23 @@
from aim.sdk.repo import Repo
-class Run(BasicRun):
- def __init__(self, run_hash: Optional[str] = None, *,
- sync_tensorboard_log_dir: str,
- repo: Optional[Union[str, 'Repo']] = None,
- experiment: Optional[str] = None,
- force_resume: Optional[bool] = False,
- ):
- super().__init__(run_hash, repo=repo, read_only=False, experiment=experiment, force_resume=force_resume)
+class Run(SdkRun):
+ def __init__(
+ self, run_hash: Optional[str] = None, *,
+ sync_tensorboard_log_dir: str,
+ repo: Optional[Union[str, 'Repo']] = None,
+ experiment: Optional[str] = None,
+ force_resume: Optional[bool] = False,
+ system_tracking_interval: Optional[Union[int, float]] = None,
+ log_system_params: Optional[bool] = False,
+ capture_terminal_logs: Optional[bool] = False,
+ ):
+ super().__init__(
+ run_hash, repo=repo, read_only=False, experiment=experiment, force_resume=force_resume,
+ system_tracking_interval=system_tracking_interval, log_system_params=log_system_params,
+ capture_terminal_logs=capture_terminal_logs
+ )
+
self['tb_log_directory'] = sync_tensorboard_log_dir
self._tensorboard_tracker = TensorboardTracker(self._tracker, sync_tensorboard_log_dir)
self._tensorboard_tracker.start()
| {"golden_diff": "diff --git a/aim/ext/tensorboard_tracker/run.py b/aim/ext/tensorboard_tracker/run.py\n--- a/aim/ext/tensorboard_tracker/run.py\n+++ b/aim/ext/tensorboard_tracker/run.py\n@@ -1,6 +1,6 @@\n from typing import Optional, Union\n \n-from aim.sdk.run import BasicRun\n+from aim.sdk.run import Run as SdkRun\n from aim.ext.tensorboard_tracker.tracker import TensorboardTracker\n \n from typing import TYPE_CHECKING\n@@ -9,14 +9,23 @@\n from aim.sdk.repo import Repo\n \n \n-class Run(BasicRun):\n- def __init__(self, run_hash: Optional[str] = None, *,\n- sync_tensorboard_log_dir: str,\n- repo: Optional[Union[str, 'Repo']] = None,\n- experiment: Optional[str] = None,\n- force_resume: Optional[bool] = False,\n- ):\n- super().__init__(run_hash, repo=repo, read_only=False, experiment=experiment, force_resume=force_resume)\n+class Run(SdkRun):\n+ def __init__(\n+ self, run_hash: Optional[str] = None, *,\n+ sync_tensorboard_log_dir: str,\n+ repo: Optional[Union[str, 'Repo']] = None,\n+ experiment: Optional[str] = None,\n+ force_resume: Optional[bool] = False,\n+ system_tracking_interval: Optional[Union[int, float]] = None,\n+ log_system_params: Optional[bool] = False,\n+ capture_terminal_logs: Optional[bool] = False,\n+ ):\n+ super().__init__(\n+ run_hash, repo=repo, read_only=False, experiment=experiment, force_resume=force_resume,\n+ system_tracking_interval=system_tracking_interval, log_system_params=log_system_params,\n+ capture_terminal_logs=capture_terminal_logs\n+ )\n+\n self['tb_log_directory'] = sync_tensorboard_log_dir\n self._tensorboard_tracker = TensorboardTracker(self._tracker, sync_tensorboard_log_dir)\n self._tensorboard_tracker.start()\n", "issue": "Extend `aim.ext.tensorboard_tracker.run.Run` to also allow system stats, parameters, and stdout capture.\n## \ud83d\ude80 Feature\r\n\r\nAllow capturing of system parameters and terminal logs by the `aim.ext.tensorboard_tracker.run.Run`, as this is great feature shouldn't be only available to the default `Run`.\r\n\r\n### Motivation\r\n\r\nThe new feature of allowing continuous syncing from `tensorboard` files to `aim` is really nice, but because `aim.ext.tensorboard_tracker.run.Run` inherits from `BasicRun` rather than `Run`, it misses out on the ability to log the standard out, system stats and system parameters. Since `aim.ext.tensorboard_tracker.run.Run` should be a possible replacement for `Run`, I don't see a reason why this behaviour shouldn't be allowed.\r\n\r\nIt has been highlighted in Discord by @mihran113:\r\n\r\n> The reason behind inheriting from basic run is exactly to avoid terminal log tracking and system param tracking actually, cause we don\u2019t want to add anything else rather than what\u2019s tracked via tensorboard. Cause there can be times when live tracking is done from a different process, and catching that process\u2019s terminal logs and system params won\u2019t make any sense I guess. If you\u2019re interested you can open a PR to address those points, cause adding the possibility to enable those won\u2019t make any harm as well.\r\n\r\nso I believe the *default* arguments should *not* do this extra logging, but still optionally allow this behaviour. \r\n\r\n### Pitch\r\n\r\nHave `aim.ext.tensorboard_tracker.run.Run` inherit from `aim.sdk.run.Run` instead of `aim.sdk.run.BasicRun`, so that it can utilise it's extra capabilities.\r\n\r\n### Alternatives\r\n\r\nInstead of inheritance we could change the system resource tracking be a mixin? \r\n\nExtend `aim.ext.tensorboard_tracker.run.Run` to also allow system stats, parameters, and stdout capture.\n## \ud83d\ude80 Feature\r\n\r\nAllow capturing of system parameters and terminal logs by the `aim.ext.tensorboard_tracker.run.Run`, as this is great feature shouldn't be only available to the default `Run`.\r\n\r\n### Motivation\r\n\r\nThe new feature of allowing continuous syncing from `tensorboard` files to `aim` is really nice, but because `aim.ext.tensorboard_tracker.run.Run` inherits from `BasicRun` rather than `Run`, it misses out on the ability to log the standard out, system stats and system parameters. Since `aim.ext.tensorboard_tracker.run.Run` should be a possible replacement for `Run`, I don't see a reason why this behaviour shouldn't be allowed.\r\n\r\nIt has been highlighted in Discord by @mihran113:\r\n\r\n> The reason behind inheriting from basic run is exactly to avoid terminal log tracking and system param tracking actually, cause we don\u2019t want to add anything else rather than what\u2019s tracked via tensorboard. Cause there can be times when live tracking is done from a different process, and catching that process\u2019s terminal logs and system params won\u2019t make any sense I guess. If you\u2019re interested you can open a PR to address those points, cause adding the possibility to enable those won\u2019t make any harm as well.\r\n\r\nso I believe the *default* arguments should *not* do this extra logging, but still optionally allow this behaviour. \r\n\r\n### Pitch\r\n\r\nHave `aim.ext.tensorboard_tracker.run.Run` inherit from `aim.sdk.run.Run` instead of `aim.sdk.run.BasicRun`, so that it can utilise it's extra capabilities.\r\n\r\n### Alternatives\r\n\r\nInstead of inheritance we could change the system resource tracking be a mixin? \r\n\n", "before_files": [{"content": "from typing import Optional, Union\n\nfrom aim.sdk.run import BasicRun\nfrom aim.ext.tensorboard_tracker.tracker import TensorboardTracker\n\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from aim.sdk.repo import Repo\n\n\nclass Run(BasicRun):\n def __init__(self, run_hash: Optional[str] = None, *,\n sync_tensorboard_log_dir: str,\n repo: Optional[Union[str, 'Repo']] = None,\n experiment: Optional[str] = None,\n force_resume: Optional[bool] = False,\n ):\n super().__init__(run_hash, repo=repo, read_only=False, experiment=experiment, force_resume=force_resume)\n self['tb_log_directory'] = sync_tensorboard_log_dir\n self._tensorboard_tracker = TensorboardTracker(self._tracker, sync_tensorboard_log_dir)\n self._tensorboard_tracker.start()\n self._resources.add_extra_resource(self._tensorboard_tracker)\n", "path": "aim/ext/tensorboard_tracker/run.py"}]} | 1,511 | 448 |
gh_patches_debug_42129 | rasdani/github-patches | git_diff | conan-io__conan-center-index-1204 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] cgal/all: review options applied
Comming from https://github.com/conan-io/conan-center-index/pull/965#issuecomment-590802910
Seems that the recipe might require some work regarding the options and flags
</issue>
<code>
[start of recipes/cgal/all/conanfile.py]
1 import os
2 from conans import ConanFile, CMake, tools
3
4
5 class CgalConan(ConanFile):
6 name = "cgal"
7 license = "LGPL-3.0-or-later"
8 url = "https://github.com/conan-io/conan-center-index"
9 homepage = "https://github.com/CGAL/cgal"
10 description = "C++ library that aims to provide easy access to efficient and reliable algorithms"\
11 "in computational geometry."
12 topics = ("geometry", "algorithms")
13 settings = "os", "compiler", "build_type", "arch"
14 requires = "mpir/3.0.0", "mpfr/4.0.2", "boost/1.72.0", "eigen/3.3.7"
15 generators = "cmake"
16
17 _source_subfolder = "source_subfolder"
18 _cmake = None
19
20 options = {
21 "with_cgal_core": [True, False],
22 "with_cgal_qt5": [True, False],
23 "with_cgal_imageio": [True, False]
24 }
25
26 default_options = {
27 "with_cgal_core": True,
28 "with_cgal_qt5": False,
29 "with_cgal_imageio": True
30 }
31
32 def _configure_cmake(self):
33 if not self._cmake:
34 self._cmake = CMake(self)
35 self._cmake.definitions["WITH_CGAL_Core"] = self.options.with_cgal_core
36 self._cmake.definitions["WITH_CGAL_Qt5"] = self.options.with_cgal_qt5
37 self._cmake.definitions["WITH_CGAL_ImageIO"] = self.options.with_cgal_imageio
38 self._cmake.configure(source_folder=self._source_subfolder)
39 return self._cmake
40
41 def _patch_sources(self):
42 tools.replace_in_file(
43 os.path.join(self._source_subfolder, "CMakeLists.txt"),
44 "project(CGAL CXX C)", '''project(CGAL CXX C)
45 include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
46 conan_basic_setup()''')
47
48 def source(self):
49 tools.get(**self.conan_data["sources"][self.version])
50 extracted_dir = "CGAL-{}".format(self.version)
51 os.rename(extracted_dir, self._source_subfolder)
52
53 def build(self):
54 self._patch_sources()
55 cmake = self._configure_cmake()
56 cmake.build()
57
58 def package(self):
59 self.copy("LICENSE*", dst="licenses", src=self._source_subfolder)
60 cmake = self._configure_cmake()
61 cmake.install()
62 tools.rmdir(os.path.join(self.package_folder, "share"))
63 tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
64 tools.rmdir(os.path.join(self.package_folder, "bin"))
65
66 def package_info(self):
67 self.cpp_info.names["cmake_find_package"] = "CGAL"
68 self.cpp_info.names["cmake_find_package_multi"] = "CGAL"
69
70 def package_id(self):
71 self.info.header_only()
72
[end of recipes/cgal/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/cgal/all/conanfile.py b/recipes/cgal/all/conanfile.py
--- a/recipes/cgal/all/conanfile.py
+++ b/recipes/cgal/all/conanfile.py
@@ -1,5 +1,6 @@
import os
from conans import ConanFile, CMake, tools
+from conans.errors import ConanInvalidConfiguration
class CgalConan(ConanFile):
@@ -13,20 +14,26 @@
settings = "os", "compiler", "build_type", "arch"
requires = "mpir/3.0.0", "mpfr/4.0.2", "boost/1.72.0", "eigen/3.3.7"
generators = "cmake"
+ exports_sources = "CMakeLists.txt"
_source_subfolder = "source_subfolder"
+ _build_subfolder = "build_subfolder"
_cmake = None
options = {
"with_cgal_core": [True, False],
"with_cgal_qt5": [True, False],
- "with_cgal_imageio": [True, False]
+ "with_cgal_imageio": [True, False],
+ "shared": [True, False],
+ "header_only": [True, False]
}
default_options = {
"with_cgal_core": True,
"with_cgal_qt5": False,
- "with_cgal_imageio": True
+ "with_cgal_imageio": True,
+ "shared": False,
+ "header_only": True
}
def _configure_cmake(self):
@@ -35,15 +42,19 @@
self._cmake.definitions["WITH_CGAL_Core"] = self.options.with_cgal_core
self._cmake.definitions["WITH_CGAL_Qt5"] = self.options.with_cgal_qt5
self._cmake.definitions["WITH_CGAL_ImageIO"] = self.options.with_cgal_imageio
- self._cmake.configure(source_folder=self._source_subfolder)
+ self._cmake.definitions["CGAL_HEADER_ONLY"] = self.options.header_only
+ self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def _patch_sources(self):
- tools.replace_in_file(
- os.path.join(self._source_subfolder, "CMakeLists.txt"),
- "project(CGAL CXX C)", '''project(CGAL CXX C)
-include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
-conan_basic_setup()''')
+ tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
+ "CMAKE_SOURCE_DIR", "CMAKE_CURRENT_SOURCE_DIR")
+
+ def configure(self):
+ if self.options.with_cgal_qt5:
+ raise ConanInvalidConfiguration("Qt Conan package is not available yet.")
+ if self.options.header_only:
+ del self.options.shared
def source(self):
tools.get(**self.conan_data["sources"][self.version])
@@ -61,11 +72,20 @@
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
- tools.rmdir(os.path.join(self.package_folder, "bin"))
+ if self.options.get_safe("shared"):
+ for root, _, filenames in os.walk(os.path.join(self.package_folder, "bin")):
+ for filename in filenames:
+ if not filename.endswith(".dll"):
+ os.unlink(os.path.join(root, filename))
+ else:
+ tools.rmdir(os.path.join(self.package_folder, "bin"))
def package_info(self):
+ if not self.options.header_only:
+ self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.names["cmake_find_package"] = "CGAL"
self.cpp_info.names["cmake_find_package_multi"] = "CGAL"
def package_id(self):
- self.info.header_only()
+ if self.options.header_only:
+ self.info.header_only()
| {"golden_diff": "diff --git a/recipes/cgal/all/conanfile.py b/recipes/cgal/all/conanfile.py\n--- a/recipes/cgal/all/conanfile.py\n+++ b/recipes/cgal/all/conanfile.py\n@@ -1,5 +1,6 @@\n import os\n from conans import ConanFile, CMake, tools\n+from conans.errors import ConanInvalidConfiguration\n \n \n class CgalConan(ConanFile):\n@@ -13,20 +14,26 @@\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n requires = \"mpir/3.0.0\", \"mpfr/4.0.2\", \"boost/1.72.0\", \"eigen/3.3.7\"\n generators = \"cmake\"\n+ exports_sources = \"CMakeLists.txt\"\n \n _source_subfolder = \"source_subfolder\"\n+ _build_subfolder = \"build_subfolder\"\n _cmake = None\n \n options = {\n \"with_cgal_core\": [True, False],\n \"with_cgal_qt5\": [True, False],\n- \"with_cgal_imageio\": [True, False]\n+ \"with_cgal_imageio\": [True, False],\n+ \"shared\": [True, False],\n+ \"header_only\": [True, False]\n }\n \n default_options = {\n \"with_cgal_core\": True,\n \"with_cgal_qt5\": False,\n- \"with_cgal_imageio\": True\n+ \"with_cgal_imageio\": True,\n+ \"shared\": False,\n+ \"header_only\": True\n }\n \n def _configure_cmake(self):\n@@ -35,15 +42,19 @@\n self._cmake.definitions[\"WITH_CGAL_Core\"] = self.options.with_cgal_core\n self._cmake.definitions[\"WITH_CGAL_Qt5\"] = self.options.with_cgal_qt5\n self._cmake.definitions[\"WITH_CGAL_ImageIO\"] = self.options.with_cgal_imageio\n- self._cmake.configure(source_folder=self._source_subfolder)\n+ self._cmake.definitions[\"CGAL_HEADER_ONLY\"] = self.options.header_only\n+ self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n \n def _patch_sources(self):\n- tools.replace_in_file(\n- os.path.join(self._source_subfolder, \"CMakeLists.txt\"),\n- \"project(CGAL CXX C)\", '''project(CGAL CXX C)\n-include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\n-conan_basic_setup()''')\n+ tools.replace_in_file(os.path.join(self._source_subfolder, \"CMakeLists.txt\"),\n+ \"CMAKE_SOURCE_DIR\", \"CMAKE_CURRENT_SOURCE_DIR\")\n+\n+ def configure(self):\n+ if self.options.with_cgal_qt5:\n+ raise ConanInvalidConfiguration(\"Qt Conan package is not available yet.\")\n+ if self.options.header_only:\n+ del self.options.shared\n \n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n@@ -61,11 +72,20 @@\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n- tools.rmdir(os.path.join(self.package_folder, \"bin\"))\n+ if self.options.get_safe(\"shared\"):\n+ for root, _, filenames in os.walk(os.path.join(self.package_folder, \"bin\")):\n+ for filename in filenames:\n+ if not filename.endswith(\".dll\"):\n+ os.unlink(os.path.join(root, filename))\n+ else:\n+ tools.rmdir(os.path.join(self.package_folder, \"bin\"))\n \n def package_info(self):\n+ if not self.options.header_only:\n+ self.cpp_info.libs = tools.collect_libs(self)\n self.cpp_info.names[\"cmake_find_package\"] = \"CGAL\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"CGAL\"\n \n def package_id(self):\n- self.info.header_only()\n+ if self.options.header_only:\n+ self.info.header_only()\n", "issue": "[package] cgal/all: review options applied\nComming from https://github.com/conan-io/conan-center-index/pull/965#issuecomment-590802910\r\n\r\nSeems that the recipe might require some work regarding the options and flags\n", "before_files": [{"content": "import os\nfrom conans import ConanFile, CMake, tools\n\n\nclass CgalConan(ConanFile):\n name = \"cgal\"\n license = \"LGPL-3.0-or-later\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/CGAL/cgal\"\n description = \"C++ library that aims to provide easy access to efficient and reliable algorithms\"\\\n \"in computational geometry.\"\n topics = (\"geometry\", \"algorithms\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n requires = \"mpir/3.0.0\", \"mpfr/4.0.2\", \"boost/1.72.0\", \"eigen/3.3.7\"\n generators = \"cmake\"\n\n _source_subfolder = \"source_subfolder\"\n _cmake = None\n\n options = {\n \"with_cgal_core\": [True, False],\n \"with_cgal_qt5\": [True, False],\n \"with_cgal_imageio\": [True, False]\n }\n\n default_options = {\n \"with_cgal_core\": True,\n \"with_cgal_qt5\": False,\n \"with_cgal_imageio\": True\n }\n\n def _configure_cmake(self):\n if not self._cmake:\n self._cmake = CMake(self)\n self._cmake.definitions[\"WITH_CGAL_Core\"] = self.options.with_cgal_core\n self._cmake.definitions[\"WITH_CGAL_Qt5\"] = self.options.with_cgal_qt5\n self._cmake.definitions[\"WITH_CGAL_ImageIO\"] = self.options.with_cgal_imageio\n self._cmake.configure(source_folder=self._source_subfolder)\n return self._cmake\n\n def _patch_sources(self):\n tools.replace_in_file(\n os.path.join(self._source_subfolder, \"CMakeLists.txt\"),\n \"project(CGAL CXX C)\", '''project(CGAL CXX C)\ninclude(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\nconan_basic_setup()''')\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"CGAL-{}\".format(self.version)\n os.rename(extracted_dir, self._source_subfolder)\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE*\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"bin\"))\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"CGAL\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"CGAL\"\n\n def package_id(self):\n self.info.header_only()\n", "path": "recipes/cgal/all/conanfile.py"}]} | 1,412 | 922 |
gh_patches_debug_11580 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1631 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FR: key coal has negative value -9.0
```
invalid point: {'zoneKey': 'FR', 'datetime': datetime.datetime(2018, 10, 9, 11, 15, tzinfo=tzoffset(None, 7200)), 'production': {'nuclear': 41740.0, 'coal': -9.0, 'gas': 4057.0, 'oil': 188.0, 'wind': 1158.0, 'solar': 2762.0, 'biomass': 861.0, 'hydro': 3366.0}, 'storage': {'hydro': -1024.0}, 'source': 'opendata.reseaux-energies.fr', 'schemaVersion': 1}, reason:FR: key coal has negative value -9.0
```
Probably a good idea to set small negative values to 0
</issue>
<code>
[start of parsers/FR.py]
1 #!/usr/bin/env python3
2
3 import arrow
4 import json
5 import logging
6 import os
7 import math
8
9 import pandas as pd
10 import requests
11 import xml.etree.ElementTree as ET
12
13 API_ENDPOINT = 'https://opendata.reseaux-energies.fr/api/records/1.0/search/'
14
15 MAP_GENERATION = {
16 'nucleaire': 'nuclear',
17 'charbon': 'coal',
18 'gaz': 'gas',
19 'fioul': 'oil',
20 'eolien': 'wind',
21 'solaire': 'solar',
22 'bioenergies': 'biomass'
23 }
24
25 MAP_HYDRO = [
26 'hydraulique_fil_eau_eclusee',
27 'hydraulique_lacs',
28 'hydraulique_step_turbinage',
29 'pompage'
30 ]
31
32 def is_not_nan_and_truthy(v):
33 if isinstance(v, float) and math.isnan(v):
34 return False
35 return bool(v)
36
37
38 def fetch_production(zone_key='FR', session=None, target_datetime=None,
39 logger=logging.getLogger(__name__)):
40 if target_datetime:
41 to = arrow.get(target_datetime, 'Europe/Paris')
42 else:
43 to = arrow.now(tz='Europe/Paris')
44
45 # setup request
46 r = session or requests.session()
47 formatted_from = to.shift(days=-1).format('YYYY-MM-DDTHH:mm')
48 formatted_to = to.format('YYYY-MM-DDTHH:mm')
49
50 params = {
51 'dataset': 'eco2mix-national-tr',
52 'q': 'date_heure >= {} AND date_heure <= {}'.format(
53 formatted_from, formatted_to),
54 'timezone': 'Europe/Paris',
55 'rows': 100
56 }
57
58 if 'RESEAUX_ENERGIES_TOKEN' not in os.environ:
59 raise Exception(
60 'No RESEAUX_ENERGIES_TOKEN found! Please add it into secrets.env!')
61 params['apikey'] = os.environ['RESEAUX_ENERGIES_TOKEN']
62
63 # make request and create dataframe with response
64 response = r.get(API_ENDPOINT, params=params)
65 data = json.loads(response.content)
66 data = [d['fields'] for d in data['records']]
67 df = pd.DataFrame(data)
68
69 # filter out desired columns and convert values to float
70 value_columns = list(MAP_GENERATION.keys()) + MAP_HYDRO
71 df = df[['date_heure'] + value_columns]
72 df[value_columns] = df[value_columns].astype(float)
73
74 datapoints = list()
75 for row in df.iterrows():
76 production = dict()
77 for key, value in MAP_GENERATION.items():
78 production[value] = row[1][key]
79
80 # Hydro is a special case!
81 production['hydro'] = row[1]['hydraulique_lacs'] + row[1]['hydraulique_fil_eau_eclusee']
82 storage = {
83 'hydro': row[1]['pompage'] * -1 + row[1]['hydraulique_step_turbinage'] * -1
84 }
85
86 # if all production values are null, ignore datapoint
87 if not any([is_not_nan_and_truthy(v)
88 for k, v in production.items()]):
89 continue
90
91 datapoints.append({
92 'zoneKey': zone_key,
93 'datetime': arrow.get(row[1]['date_heure']).datetime,
94 'production': production,
95 'storage': storage,
96 'source': 'opendata.reseaux-energies.fr'
97 })
98
99 return datapoints
100
101
102 def fetch_price(zone_key, session=None, target_datetime=None,
103 logger=logging.getLogger(__name__)):
104 if target_datetime:
105 now = arrow.get(target_datetime, tz='Europe/Paris')
106 else:
107 now = arrow.now(tz='Europe/Paris')
108
109 r = session or requests.session()
110 formatted_from = now.shift(days=-1).format('DD/MM/YYYY')
111 formatted_to = now.format('DD/MM/YYYY')
112
113 url = 'http://www.rte-france.com/getEco2MixXml.php?type=donneesMarche&da' \
114 'teDeb={}&dateFin={}&mode=NORM'.format(formatted_from, formatted_to)
115 response = r.get(url)
116 obj = ET.fromstring(response.content)
117 datas = {}
118
119 for donnesMarche in obj:
120 if donnesMarche.tag != 'donneesMarche':
121 continue
122
123 start_date = arrow.get(arrow.get(donnesMarche.attrib['date']).datetime, 'Europe/Paris')
124
125 for item in donnesMarche:
126 if item.get('granularite') != 'Global':
127 continue
128 country_c = item.get('perimetre')
129 if zone_key != country_c:
130 continue
131 value = None
132 for value in item:
133 if value.text == 'ND':
134 continue
135 period = int(value.attrib['periode'])
136 datetime = start_date.replace(hours=+period).datetime
137 if not datetime in datas:
138 datas[datetime] = {
139 'zoneKey': zone_key,
140 'currency': 'EUR',
141 'datetime': datetime,
142 'source': 'rte-france.com',
143 }
144 data = datas[datetime]
145 data['price'] = float(value.text)
146
147 return list(datas.values())
148
149
150 if __name__ == '__main__':
151 print(fetch_production())
152
[end of parsers/FR.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/FR.py b/parsers/FR.py
--- a/parsers/FR.py
+++ b/parsers/FR.py
@@ -75,7 +75,12 @@
for row in df.iterrows():
production = dict()
for key, value in MAP_GENERATION.items():
- production[value] = row[1][key]
+ # Set small negative values to 0
+ if row[1][key] < 0 and row[1][key] > -50:
+ logger.warning('Setting small value of %s (%s) to 0.' % (key, value))
+ production[value] = 0
+ else:
+ production[value] = row[1][key]
# Hydro is a special case!
production['hydro'] = row[1]['hydraulique_lacs'] + row[1]['hydraulique_fil_eau_eclusee']
| {"golden_diff": "diff --git a/parsers/FR.py b/parsers/FR.py\n--- a/parsers/FR.py\n+++ b/parsers/FR.py\n@@ -75,7 +75,12 @@\n for row in df.iterrows():\n production = dict()\n for key, value in MAP_GENERATION.items():\n- production[value] = row[1][key]\n+ # Set small negative values to 0\n+ if row[1][key] < 0 and row[1][key] > -50:\n+ logger.warning('Setting small value of %s (%s) to 0.' % (key, value))\n+ production[value] = 0\n+ else:\n+ production[value] = row[1][key]\n \n # Hydro is a special case!\n production['hydro'] = row[1]['hydraulique_lacs'] + row[1]['hydraulique_fil_eau_eclusee']\n", "issue": "FR: key coal has negative value -9.0\n```\r\ninvalid point: {'zoneKey': 'FR', 'datetime': datetime.datetime(2018, 10, 9, 11, 15, tzinfo=tzoffset(None, 7200)), 'production': {'nuclear': 41740.0, 'coal': -9.0, 'gas': 4057.0, 'oil': 188.0, 'wind': 1158.0, 'solar': 2762.0, 'biomass': 861.0, 'hydro': 3366.0}, 'storage': {'hydro': -1024.0}, 'source': 'opendata.reseaux-energies.fr', 'schemaVersion': 1}, reason:FR: key coal has negative value -9.0\r\n```\r\n\r\nProbably a good idea to set small negative values to 0\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport arrow\nimport json\nimport logging\nimport os\nimport math\n\nimport pandas as pd\nimport requests\nimport xml.etree.ElementTree as ET\n\nAPI_ENDPOINT = 'https://opendata.reseaux-energies.fr/api/records/1.0/search/'\n\nMAP_GENERATION = {\n 'nucleaire': 'nuclear',\n 'charbon': 'coal',\n 'gaz': 'gas',\n 'fioul': 'oil',\n 'eolien': 'wind',\n 'solaire': 'solar',\n 'bioenergies': 'biomass'\n}\n\nMAP_HYDRO = [\n 'hydraulique_fil_eau_eclusee',\n 'hydraulique_lacs',\n 'hydraulique_step_turbinage',\n 'pompage'\n]\n\ndef is_not_nan_and_truthy(v):\n if isinstance(v, float) and math.isnan(v):\n return False\n return bool(v)\n\n\ndef fetch_production(zone_key='FR', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if target_datetime:\n to = arrow.get(target_datetime, 'Europe/Paris')\n else:\n to = arrow.now(tz='Europe/Paris')\n\n # setup request\n r = session or requests.session()\n formatted_from = to.shift(days=-1).format('YYYY-MM-DDTHH:mm')\n formatted_to = to.format('YYYY-MM-DDTHH:mm')\n\n params = {\n 'dataset': 'eco2mix-national-tr',\n 'q': 'date_heure >= {} AND date_heure <= {}'.format(\n formatted_from, formatted_to),\n 'timezone': 'Europe/Paris',\n 'rows': 100\n }\n\n if 'RESEAUX_ENERGIES_TOKEN' not in os.environ:\n raise Exception(\n 'No RESEAUX_ENERGIES_TOKEN found! Please add it into secrets.env!')\n params['apikey'] = os.environ['RESEAUX_ENERGIES_TOKEN']\n\n # make request and create dataframe with response\n response = r.get(API_ENDPOINT, params=params)\n data = json.loads(response.content)\n data = [d['fields'] for d in data['records']]\n df = pd.DataFrame(data)\n\n # filter out desired columns and convert values to float\n value_columns = list(MAP_GENERATION.keys()) + MAP_HYDRO\n df = df[['date_heure'] + value_columns]\n df[value_columns] = df[value_columns].astype(float)\n\n datapoints = list()\n for row in df.iterrows():\n production = dict()\n for key, value in MAP_GENERATION.items():\n production[value] = row[1][key]\n\n # Hydro is a special case!\n production['hydro'] = row[1]['hydraulique_lacs'] + row[1]['hydraulique_fil_eau_eclusee']\n storage = {\n 'hydro': row[1]['pompage'] * -1 + row[1]['hydraulique_step_turbinage'] * -1\n }\n\n # if all production values are null, ignore datapoint\n if not any([is_not_nan_and_truthy(v)\n for k, v in production.items()]):\n continue\n\n datapoints.append({\n 'zoneKey': zone_key,\n 'datetime': arrow.get(row[1]['date_heure']).datetime,\n 'production': production,\n 'storage': storage,\n 'source': 'opendata.reseaux-energies.fr'\n })\n\n return datapoints\n\n\ndef fetch_price(zone_key, session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if target_datetime:\n now = arrow.get(target_datetime, tz='Europe/Paris')\n else:\n now = arrow.now(tz='Europe/Paris')\n\n r = session or requests.session()\n formatted_from = now.shift(days=-1).format('DD/MM/YYYY')\n formatted_to = now.format('DD/MM/YYYY')\n\n url = 'http://www.rte-france.com/getEco2MixXml.php?type=donneesMarche&da' \\\n 'teDeb={}&dateFin={}&mode=NORM'.format(formatted_from, formatted_to)\n response = r.get(url)\n obj = ET.fromstring(response.content)\n datas = {}\n\n for donnesMarche in obj:\n if donnesMarche.tag != 'donneesMarche':\n continue\n\n start_date = arrow.get(arrow.get(donnesMarche.attrib['date']).datetime, 'Europe/Paris')\n\n for item in donnesMarche:\n if item.get('granularite') != 'Global':\n continue\n country_c = item.get('perimetre')\n if zone_key != country_c:\n continue\n value = None\n for value in item:\n if value.text == 'ND':\n continue\n period = int(value.attrib['periode'])\n datetime = start_date.replace(hours=+period).datetime\n if not datetime in datas:\n datas[datetime] = {\n 'zoneKey': zone_key,\n 'currency': 'EUR',\n 'datetime': datetime,\n 'source': 'rte-france.com',\n }\n data = datas[datetime]\n data['price'] = float(value.text)\n\n return list(datas.values())\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/FR.py"}]} | 2,277 | 206 |
gh_patches_debug_40863 | rasdani/github-patches | git_diff | dotkom__onlineweb4-712 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mandatory phone number in profile
It has been requested from arrkom among others and decided in HS that phone numbers in the user profile should be mandatory for people attending events. So we need to implement functionality similar to the one used for "prikkeregler".
If users hide this in their profile the info could behave as allergies and only show up when events are exported to pdf.
</issue>
<code>
[start of apps/events/views.py]
1 #-*- coding: utf-8 -*-
2
3 import datetime
4
5 from django.utils import timezone
6
7 from django.conf import settings
8 from django.contrib import messages
9 from django.contrib.auth.decorators import login_required, user_passes_test
10 from django.core.urlresolvers import reverse
11 from django.http import HttpResponseRedirect
12 from django.shortcuts import render, get_object_or_404, redirect
13 from django.utils.translation import ugettext as _
14
15 import watson
16
17 from apps.events.forms import CaptchaForm
18 from apps.events.models import Event, AttendanceEvent, Attendee
19 from apps.events.pdf_generator import EventPDF
20
21
22 def index(request):
23 return render(request, 'events/index.html', {})
24
25 def details(request, event_id, event_slug):
26 event = get_object_or_404(Event, pk=event_id)
27
28 is_attendance_event = False
29 user_anonymous = True
30 user_attending = False
31 place_on_wait_list = 0
32 will_be_on_wait_list = False
33 rules = []
34 user_status = False
35
36 try:
37 attendance_event = AttendanceEvent.objects.get(pk=event_id)
38 is_attendance_event = True
39 form = CaptchaForm(user=request.user)
40
41 if attendance_event.rule_bundles:
42 for rule_bundle in attendance_event.rule_bundles.all():
43 rules.append(rule_bundle.get_rule_strings)
44
45 if request.user.is_authenticated():
46 user_anonymous = False
47 if attendance_event.is_attendee(request.user):
48 user_attending = True
49
50
51 will_be_on_wait_list = attendance_event.will_i_be_on_wait_list
52
53 user_status = event.is_eligible_for_signup(request.user)
54
55 # Check if this user is on the waitlist
56 place_on_wait_list = event.what_place_is_user_on_wait_list(request.user)
57
58 except AttendanceEvent.DoesNotExist:
59 pass
60
61 if is_attendance_event:
62 context = {
63 'now': timezone.now(),
64 'event': event,
65 'attendance_event': attendance_event,
66 'user_anonymous': user_anonymous,
67 'user_attending': user_attending,
68 'will_be_on_wait_list': will_be_on_wait_list,
69 'rules': rules,
70 'user_status': user_status,
71 'place_on_wait_list': int(place_on_wait_list),
72 #'position_in_wait_list': position_in_wait_list,
73 'captcha_form': form,
74 }
75
76 return render(request, 'events/details.html', context)
77 else:
78 return render(request, 'events/details.html', {'event': event})
79
80
81 def get_attendee(attendee_id):
82 return get_object_or_404(Attendee, pk=attendee_id)
83
84 @login_required
85 def attendEvent(request, event_id):
86
87 event = get_object_or_404(Event, pk=event_id)
88
89 if not request.POST:
90 messages.error(request, _(u'Vennligst fyll ut skjemaet.'))
91 return redirect(event)
92 form = CaptchaForm(request.POST, user=request.user)
93
94 if not form.is_valid():
95 if not 'mark_rules' in request.POST and not request.user.mark_rules:
96 error_message = u'Du må godta prikkreglene for å melde deg på.'
97 else:
98 error_message = u'Du klarte ikke captcha-en. Er du en bot?'
99 messages.error(request, _(error_message))
100 return redirect(event)
101
102 # Check if the user is eligible to attend this event.
103 # If not, an error message will be present in the returned dict
104 attendance_event = event.attendance_event
105
106 response = event.is_eligible_for_signup(request.user);
107
108 if response['status']:
109 # First time accepting mark rules
110 if 'mark_rules' in form.cleaned_data:
111 request.user.mark_rules = True
112 request.user.save()
113 Attendee(event=attendance_event, user=request.user).save()
114 messages.success(request, _(u"Du er nå påmeldt på arrangementet!"))
115 return redirect(event)
116 else:
117 messages.error(request, response['message'])
118 return redirect(event)
119
120 @login_required
121 def unattendEvent(request, event_id):
122
123 event = get_object_or_404(Event, pk=event_id)
124 attendance_event = event.attendance_event
125
126 # Check if the deadline for unattending has passed
127 if attendance_event.unattend_deadline < timezone.now():
128 messages.error(request, _(u"Avmeldingsfristen for dette arrangementet har utløpt."))
129 return redirect(event)
130
131 event.notify_waiting_list(host=request.META['HTTP_HOST'], unattended_user=request.user)
132 Attendee.objects.get(event=attendance_event, user=request.user).delete()
133
134 messages.success(request, _(u"Du ble meldt av arrangementet."))
135 return redirect(event)
136
137 def search_events(request):
138 query = request.GET.get('query')
139 filters = {
140 'future' : request.GET.get('future'),
141 'myevents' : request.GET.get('myevents')
142 }
143 events = _search_indexed(request, query, filters)
144
145 return render(request, 'events/search.html', {'events': events})
146
147
148 def _search_indexed(request, query, filters):
149 results = []
150 kwargs = {}
151
152 if filters['future'] == 'true':
153 kwargs['event_start__gte'] = timezone.now()
154
155 if filters['myevents'] == 'true':
156 kwargs['attendance_event__attendees'] = request.user
157
158 events = Event.objects.filter(**kwargs).order_by('event_start').prefetch_related(
159 'attendance_event', 'attendance_event__attendees')
160
161 if query:
162 for result in watson.search(query, models=(events,)):
163 results.append(result.object)
164 return results[:10]
165
166 return events
167
168
169 @login_required()
170 @user_passes_test(lambda u: u.groups.filter(name='Komiteer').count() == 1)
171 def generate_pdf(request, event_id):
172
173 event = get_object_or_404(Event, pk=event_id)
174
175 groups = request.user.groups.all()
176 if not (groups.filter(name='dotKom').count() == 1 or groups.filter(name='Hovedstyret').count() == 1):
177 if event.event_type == 1 and not groups.filter(name='arrKom').count() == 1:
178 messages.error(request, _(u'Du har ikke tilgang til listen for dette arrangementet.'))
179 return redirect(event)
180
181 if event.event_type == 2 and not groups.filter(name='bedKom').count() == 1:
182 messages.error(request, _(u'Du har ikke tilgang til listen for dette arrangementet.'))
183 return redirect(event)
184
185 if event.event_type == 3 and not groups.filter(name='fagKom').count() == 1:
186 messages.error(request, _(u'Du har ikke tilgang til listen for dette arrangementet.'))
187 return redirect(event)
188
189 return EventPDF(event).render_pdf()
190
[end of apps/events/views.py]
[start of apps/events/forms.py]
1 from django import forms
2 from captcha.fields import CaptchaField
3
4 class CaptchaForm(forms.Form):
5 def __init__(self, *args, **kwargs):
6 user = kwargs.pop('user', None)
7 super(CaptchaForm, self).__init__(*args, **kwargs)
8 # Removing mark rules field if user has already accepted the rules
9 if user and user.is_authenticated() and user.mark_rules:
10 del self.fields['mark_rules']
11 mark_rules = forms.BooleanField(label=u'Jeg godtar <a href="/profile/#marks" target="_blank">prikkreglene</a>')
12 captcha = CaptchaField()
13
[end of apps/events/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/events/forms.py b/apps/events/forms.py
--- a/apps/events/forms.py
+++ b/apps/events/forms.py
@@ -1,12 +1,48 @@
+#-*- coding: utf-8 -*-
+
from django import forms
from captcha.fields import CaptchaField
+from django.utils.translation import ugettext as _
class CaptchaForm(forms.Form):
+
+ phone_number = forms.CharField(label=_(u'Telefonnummer er påkrevd for å være påmeldt et arrangement.'),
+ error_messages={'required' : _(u'Telefonnummer er påkrevd!')})
+ mark_rules = forms.BooleanField(label=_(u'Jeg godtar <a href="/profile/#marks" target="_blank">prikkreglene</a>'),
+ error_messages={'required' : _(u'Du må godta prikkereglene!')})
+ captcha = CaptchaField(error_messages={'required' : _(u'Du klarte ikke captchaen! Er du en bot?')})
+
def __init__(self, *args, **kwargs):
- user = kwargs.pop('user', None)
+ self.user = kwargs.pop('user', None)
super(CaptchaForm, self).__init__(*args, **kwargs)
+
# Removing mark rules field if user has already accepted the rules
- if user and user.is_authenticated() and user.mark_rules:
- del self.fields['mark_rules']
- mark_rules = forms.BooleanField(label=u'Jeg godtar <a href="/profile/#marks" target="_blank">prikkreglene</a>')
- captcha = CaptchaField()
+ if self.user and self.user.is_authenticated():
+ if self.user.mark_rules:
+ del self.fields['mark_rules']
+
+ if self.user.phone_number:
+ del self.fields['phone_number']
+
+
+ def clean(self):
+ super(CaptchaForm, self).clean()
+ cleaned_data = self.cleaned_data
+
+ if 'mark_rules' in self.fields:
+ if 'mark_rules' in cleaned_data:
+ mark_rules = cleaned_data['mark_rules']
+
+ if mark_rules:
+ self.user.mark_rules = True
+ self.user.save()
+
+ if 'phone_number' in self.fields:
+ if 'phone_number' in cleaned_data:
+ phone_number = cleaned_data['phone_number']
+
+ if phone_number:
+ self.user.phone_number = phone_number
+ self.user.save()
+
+ return cleaned_data
\ No newline at end of file
diff --git a/apps/events/views.py b/apps/events/views.py
--- a/apps/events/views.py
+++ b/apps/events/views.py
@@ -89,14 +89,14 @@
if not request.POST:
messages.error(request, _(u'Vennligst fyll ut skjemaet.'))
return redirect(event)
+
form = CaptchaForm(request.POST, user=request.user)
if not form.is_valid():
- if not 'mark_rules' in request.POST and not request.user.mark_rules:
- error_message = u'Du må godta prikkreglene for å melde deg på.'
- else:
- error_message = u'Du klarte ikke captcha-en. Er du en bot?'
- messages.error(request, _(error_message))
+ for field,errors in form.errors.items():
+ for error in errors:
+ messages.error(request, error)
+
return redirect(event)
# Check if the user is eligible to attend this event.
@@ -106,10 +106,6 @@
response = event.is_eligible_for_signup(request.user);
if response['status']:
- # First time accepting mark rules
- if 'mark_rules' in form.cleaned_data:
- request.user.mark_rules = True
- request.user.save()
Attendee(event=attendance_event, user=request.user).save()
messages.success(request, _(u"Du er nå påmeldt på arrangementet!"))
return redirect(event)
| {"golden_diff": "diff --git a/apps/events/forms.py b/apps/events/forms.py\n--- a/apps/events/forms.py\n+++ b/apps/events/forms.py\n@@ -1,12 +1,48 @@\n+#-*- coding: utf-8 -*-\n+\n from django import forms\n from captcha.fields import CaptchaField\n+from django.utils.translation import ugettext as _\n \n class CaptchaForm(forms.Form):\n+\n+ phone_number = forms.CharField(label=_(u'Telefonnummer er p\u00e5krevd for \u00e5 v\u00e6re p\u00e5meldt et arrangement.'),\n+ error_messages={'required' : _(u'Telefonnummer er p\u00e5krevd!')})\n+ mark_rules = forms.BooleanField(label=_(u'Jeg godtar <a href=\"/profile/#marks\" target=\"_blank\">prikkreglene</a>'),\n+ error_messages={'required' : _(u'Du m\u00e5 godta prikkereglene!')})\n+ captcha = CaptchaField(error_messages={'required' : _(u'Du klarte ikke captchaen! Er du en bot?')})\n+\n def __init__(self, *args, **kwargs):\n- user = kwargs.pop('user', None)\n+ self.user = kwargs.pop('user', None)\n super(CaptchaForm, self).__init__(*args, **kwargs)\n+\n # Removing mark rules field if user has already accepted the rules\n- if user and user.is_authenticated() and user.mark_rules:\n- del self.fields['mark_rules']\n- mark_rules = forms.BooleanField(label=u'Jeg godtar <a href=\"/profile/#marks\" target=\"_blank\">prikkreglene</a>')\n- captcha = CaptchaField()\n+ if self.user and self.user.is_authenticated():\n+ if self.user.mark_rules:\n+ del self.fields['mark_rules']\n+\n+ if self.user.phone_number:\n+ del self.fields['phone_number']\n+\n+\n+ def clean(self):\n+ super(CaptchaForm, self).clean()\n+ cleaned_data = self.cleaned_data\n+\n+ if 'mark_rules' in self.fields:\n+ if 'mark_rules' in cleaned_data:\n+ mark_rules = cleaned_data['mark_rules']\n+\n+ if mark_rules:\n+ self.user.mark_rules = True\n+ self.user.save()\n+\n+ if 'phone_number' in self.fields:\n+ if 'phone_number' in cleaned_data:\n+ phone_number = cleaned_data['phone_number']\n+\n+ if phone_number:\n+ self.user.phone_number = phone_number\n+ self.user.save()\n+\n+ return cleaned_data\n\\ No newline at end of file\ndiff --git a/apps/events/views.py b/apps/events/views.py\n--- a/apps/events/views.py\n+++ b/apps/events/views.py\n@@ -89,14 +89,14 @@\n if not request.POST:\n messages.error(request, _(u'Vennligst fyll ut skjemaet.'))\n return redirect(event)\n+\n form = CaptchaForm(request.POST, user=request.user)\n \n if not form.is_valid():\n- if not 'mark_rules' in request.POST and not request.user.mark_rules:\n- error_message = u'Du m\u00e5 godta prikkreglene for \u00e5 melde deg p\u00e5.'\n- else:\n- error_message = u'Du klarte ikke captcha-en. Er du en bot?'\n- messages.error(request, _(error_message))\n+ for field,errors in form.errors.items():\n+ for error in errors:\n+ messages.error(request, error)\n+\n return redirect(event)\n \n # Check if the user is eligible to attend this event.\n@@ -106,10 +106,6 @@\n response = event.is_eligible_for_signup(request.user);\n \n if response['status']: \n- # First time accepting mark rules\n- if 'mark_rules' in form.cleaned_data:\n- request.user.mark_rules = True\n- request.user.save()\n Attendee(event=attendance_event, user=request.user).save()\n messages.success(request, _(u\"Du er n\u00e5 p\u00e5meldt p\u00e5 arrangementet!\"))\n return redirect(event)\n", "issue": "Mandatory phone number in profile\nIt has been requested from arrkom among others and decided in HS that phone numbers in the user profile should be mandatory for people attending events. So we need to implement functionality similar to the one used for \"prikkeregler\". \n\nIf users hide this in their profile the info could behave as allergies and only show up when events are exported to pdf. \n\n", "before_files": [{"content": "#-*- coding: utf-8 -*-\n\nimport datetime\n\nfrom django.utils import timezone\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils.translation import ugettext as _\n\nimport watson\n\nfrom apps.events.forms import CaptchaForm\nfrom apps.events.models import Event, AttendanceEvent, Attendee\nfrom apps.events.pdf_generator import EventPDF\n\n\ndef index(request):\n return render(request, 'events/index.html', {})\n\ndef details(request, event_id, event_slug):\n event = get_object_or_404(Event, pk=event_id)\n\n is_attendance_event = False\n user_anonymous = True\n user_attending = False\n place_on_wait_list = 0\n will_be_on_wait_list = False\n rules = []\n user_status = False\n\n try:\n attendance_event = AttendanceEvent.objects.get(pk=event_id)\n is_attendance_event = True\n form = CaptchaForm(user=request.user)\n\n if attendance_event.rule_bundles:\n for rule_bundle in attendance_event.rule_bundles.all():\n rules.append(rule_bundle.get_rule_strings)\n\n if request.user.is_authenticated():\n user_anonymous = False\n if attendance_event.is_attendee(request.user):\n user_attending = True\n\n \n will_be_on_wait_list = attendance_event.will_i_be_on_wait_list\n\n user_status = event.is_eligible_for_signup(request.user)\n\n # Check if this user is on the waitlist\n place_on_wait_list = event.what_place_is_user_on_wait_list(request.user)\n\n except AttendanceEvent.DoesNotExist:\n pass\n\n if is_attendance_event:\n context = {\n 'now': timezone.now(),\n 'event': event,\n 'attendance_event': attendance_event,\n 'user_anonymous': user_anonymous,\n 'user_attending': user_attending,\n 'will_be_on_wait_list': will_be_on_wait_list,\n 'rules': rules,\n 'user_status': user_status,\n 'place_on_wait_list': int(place_on_wait_list),\n #'position_in_wait_list': position_in_wait_list,\n 'captcha_form': form,\n }\n \n return render(request, 'events/details.html', context)\n else:\n return render(request, 'events/details.html', {'event': event})\n\n\ndef get_attendee(attendee_id):\n return get_object_or_404(Attendee, pk=attendee_id)\n\n@login_required\ndef attendEvent(request, event_id):\n \n event = get_object_or_404(Event, pk=event_id)\n\n if not request.POST:\n messages.error(request, _(u'Vennligst fyll ut skjemaet.'))\n return redirect(event)\n form = CaptchaForm(request.POST, user=request.user)\n\n if not form.is_valid():\n if not 'mark_rules' in request.POST and not request.user.mark_rules:\n error_message = u'Du m\u00e5 godta prikkreglene for \u00e5 melde deg p\u00e5.'\n else:\n error_message = u'Du klarte ikke captcha-en. Er du en bot?'\n messages.error(request, _(error_message))\n return redirect(event)\n\n # Check if the user is eligible to attend this event.\n # If not, an error message will be present in the returned dict\n attendance_event = event.attendance_event\n\n response = event.is_eligible_for_signup(request.user);\n\n if response['status']: \n # First time accepting mark rules\n if 'mark_rules' in form.cleaned_data:\n request.user.mark_rules = True\n request.user.save()\n Attendee(event=attendance_event, user=request.user).save()\n messages.success(request, _(u\"Du er n\u00e5 p\u00e5meldt p\u00e5 arrangementet!\"))\n return redirect(event)\n else:\n messages.error(request, response['message'])\n return redirect(event)\n\n@login_required\ndef unattendEvent(request, event_id):\n\n event = get_object_or_404(Event, pk=event_id)\n attendance_event = event.attendance_event\n\n # Check if the deadline for unattending has passed\n if attendance_event.unattend_deadline < timezone.now():\n messages.error(request, _(u\"Avmeldingsfristen for dette arrangementet har utl\u00f8pt.\"))\n return redirect(event)\n\n event.notify_waiting_list(host=request.META['HTTP_HOST'], unattended_user=request.user)\n Attendee.objects.get(event=attendance_event, user=request.user).delete()\n\n messages.success(request, _(u\"Du ble meldt av arrangementet.\"))\n return redirect(event)\n\ndef search_events(request):\n query = request.GET.get('query')\n filters = {\n 'future' : request.GET.get('future'),\n 'myevents' : request.GET.get('myevents')\n }\n events = _search_indexed(request, query, filters)\n\n return render(request, 'events/search.html', {'events': events})\n\n\ndef _search_indexed(request, query, filters):\n results = []\n kwargs = {}\n\n if filters['future'] == 'true':\n kwargs['event_start__gte'] = timezone.now()\n\n if filters['myevents'] == 'true':\n kwargs['attendance_event__attendees'] = request.user\n\n events = Event.objects.filter(**kwargs).order_by('event_start').prefetch_related(\n 'attendance_event', 'attendance_event__attendees')\n\n if query:\n for result in watson.search(query, models=(events,)):\n results.append(result.object)\n return results[:10]\n\n return events\n\n\n@login_required()\n@user_passes_test(lambda u: u.groups.filter(name='Komiteer').count() == 1)\ndef generate_pdf(request, event_id):\n\n event = get_object_or_404(Event, pk=event_id)\n\n groups = request.user.groups.all()\n if not (groups.filter(name='dotKom').count() == 1 or groups.filter(name='Hovedstyret').count() == 1):\n if event.event_type == 1 and not groups.filter(name='arrKom').count() == 1:\n messages.error(request, _(u'Du har ikke tilgang til listen for dette arrangementet.'))\n return redirect(event)\n\n if event.event_type == 2 and not groups.filter(name='bedKom').count() == 1:\n messages.error(request, _(u'Du har ikke tilgang til listen for dette arrangementet.'))\n return redirect(event)\n\n if event.event_type == 3 and not groups.filter(name='fagKom').count() == 1:\n messages.error(request, _(u'Du har ikke tilgang til listen for dette arrangementet.')) \n return redirect(event)\n\n return EventPDF(event).render_pdf()\n", "path": "apps/events/views.py"}, {"content": "from django import forms\nfrom captcha.fields import CaptchaField\n\nclass CaptchaForm(forms.Form):\n def __init__(self, *args, **kwargs):\n user = kwargs.pop('user', None)\n super(CaptchaForm, self).__init__(*args, **kwargs)\n # Removing mark rules field if user has already accepted the rules\n if user and user.is_authenticated() and user.mark_rules:\n del self.fields['mark_rules']\n mark_rules = forms.BooleanField(label=u'Jeg godtar <a href=\"/profile/#marks\" target=\"_blank\">prikkreglene</a>')\n captcha = CaptchaField()\n", "path": "apps/events/forms.py"}]} | 2,733 | 880 |
gh_patches_debug_26159 | rasdani/github-patches | git_diff | keras-team__keras-7330 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
skipgram seed parameter got removed in a documentation patch, seed parameter should be readded
Patch 0af6b6c7f5cbad394673bc962dd248f50fd821ff removed the seed parameter from skipgrams. Having a seed parameter makes it easier to vary the results from``skipgram`` in a controlled way.
</issue>
<code>
[start of keras/preprocessing/sequence.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import
3
4 import numpy as np
5 import random
6 from six.moves import range
7
8
9 def pad_sequences(sequences, maxlen=None, dtype='int32',
10 padding='pre', truncating='pre', value=0.):
11 """Pads each sequence to the same length (length of the longest sequence).
12
13 If maxlen is provided, any sequence longer
14 than maxlen is truncated to maxlen.
15 Truncation happens off either the beginning (default) or
16 the end of the sequence.
17
18 Supports post-padding and pre-padding (default).
19
20 # Arguments
21 sequences: list of lists where each element is a sequence
22 maxlen: int, maximum length
23 dtype: type to cast the resulting sequence.
24 padding: 'pre' or 'post', pad either before or after each sequence.
25 truncating: 'pre' or 'post', remove values from sequences larger than
26 maxlen either in the beginning or in the end of the sequence
27 value: float, value to pad the sequences to the desired value.
28
29 # Returns
30 x: numpy array with dimensions (number_of_sequences, maxlen)
31
32 # Raises
33 ValueError: in case of invalid values for `truncating` or `padding`,
34 or in case of invalid shape for a `sequences` entry.
35 """
36 if not hasattr(sequences, '__len__'):
37 raise ValueError('`sequences` must be iterable.')
38 lengths = []
39 for x in sequences:
40 if not hasattr(x, '__len__'):
41 raise ValueError('`sequences` must be a list of iterables. '
42 'Found non-iterable: ' + str(x))
43 lengths.append(len(x))
44
45 num_samples = len(sequences)
46 if maxlen is None:
47 maxlen = np.max(lengths)
48
49 # take the sample shape from the first non empty sequence
50 # checking for consistency in the main loop below.
51 sample_shape = tuple()
52 for s in sequences:
53 if len(s) > 0:
54 sample_shape = np.asarray(s).shape[1:]
55 break
56
57 x = (np.ones((num_samples, maxlen) + sample_shape) * value).astype(dtype)
58 for idx, s in enumerate(sequences):
59 if not len(s):
60 continue # empty list/array was found
61 if truncating == 'pre':
62 trunc = s[-maxlen:]
63 elif truncating == 'post':
64 trunc = s[:maxlen]
65 else:
66 raise ValueError('Truncating type "%s" not understood' % truncating)
67
68 # check `trunc` has expected shape
69 trunc = np.asarray(trunc, dtype=dtype)
70 if trunc.shape[1:] != sample_shape:
71 raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
72 (trunc.shape[1:], idx, sample_shape))
73
74 if padding == 'post':
75 x[idx, :len(trunc)] = trunc
76 elif padding == 'pre':
77 x[idx, -len(trunc):] = trunc
78 else:
79 raise ValueError('Padding type "%s" not understood' % padding)
80 return x
81
82
83 def make_sampling_table(size, sampling_factor=1e-5):
84 """Generates a word rank-based probabilistic sampling table.
85
86 This generates an array where the ith element
87 is the probability that a word of rank i would be sampled,
88 according to the sampling distribution used in word2vec.
89
90 The word2vec formula is:
91 p(word) = min(1, sqrt(word.frequency/sampling_factor) / (word.frequency/sampling_factor))
92
93 We assume that the word frequencies follow Zipf's law (s=1) to derive
94 a numerical approximation of frequency(rank):
95 frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))
96 where gamma is the Euler-Mascheroni constant.
97
98 # Arguments
99 size: int, number of possible words to sample.
100 sampling_factor: the sampling factor in the word2vec formula.
101
102 # Returns
103 A 1D Numpy array of length `size` where the ith entry
104 is the probability that a word of rank i should be sampled.
105 """
106 gamma = 0.577
107 rank = np.arange(size)
108 rank[0] = 1
109 inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1. / (12. * rank)
110 f = sampling_factor * inv_fq
111
112 return np.minimum(1., f / np.sqrt(f))
113
114
115 def skipgrams(sequence, vocabulary_size,
116 window_size=4, negative_samples=1., shuffle=True,
117 categorical=False, sampling_table=None):
118 """Generates skipgram word pairs.
119
120 Takes a sequence (list of indexes of words),
121 returns couples of [word_index, other_word index] and labels (1s or 0s),
122 where label = 1 if 'other_word' belongs to the context of 'word',
123 and label=0 if 'other_word' is randomly sampled
124
125 # Arguments
126 sequence: a word sequence (sentence), encoded as a list
127 of word indices (integers). If using a `sampling_table`,
128 word indices are expected to match the rank
129 of the words in a reference dataset (e.g. 10 would encode
130 the 10-th most frequently occurring token).
131 Note that index 0 is expected to be a non-word and will be skipped.
132 vocabulary_size: int. maximum possible word index + 1
133 window_size: int. actually half-window.
134 The window of a word wi will be [i-window_size, i+window_size+1]
135 negative_samples: float >= 0. 0 for no negative (=random) samples.
136 1 for same number as positive samples. etc.
137 shuffle: whether to shuffle the word couples before returning them.
138 categorical: bool. if False, labels will be
139 integers (eg. [0, 1, 1 .. ]),
140 if True labels will be categorical eg. [[1,0],[0,1],[0,1] .. ]
141 sampling_table: 1D array of size `vocabulary_size` where the entry i
142 encodes the probabibily to sample a word of rank i.
143
144 # Returns
145 couples, labels: where `couples` are int pairs and
146 `labels` are either 0 or 1.
147
148 # Note
149 By convention, index 0 in the vocabulary is
150 a non-word and will be skipped.
151 """
152 couples = []
153 labels = []
154 for i, wi in enumerate(sequence):
155 if not wi:
156 continue
157 if sampling_table is not None:
158 if sampling_table[wi] < random.random():
159 continue
160
161 window_start = max(0, i - window_size)
162 window_end = min(len(sequence), i + window_size + 1)
163 for j in range(window_start, window_end):
164 if j != i:
165 wj = sequence[j]
166 if not wj:
167 continue
168 couples.append([wi, wj])
169 if categorical:
170 labels.append([0, 1])
171 else:
172 labels.append(1)
173
174 if negative_samples > 0:
175 num_negative_samples = int(len(labels) * negative_samples)
176 words = [c[0] for c in couples]
177 random.shuffle(words)
178
179 couples += [[words[i % len(words)],
180 random.randint(1, vocabulary_size - 1)] for i in range(num_negative_samples)]
181 if categorical:
182 labels += [[1, 0]] * num_negative_samples
183 else:
184 labels += [0] * num_negative_samples
185
186 if shuffle:
187 seed = random.randint(0, 10e6)
188 random.seed(seed)
189 random.shuffle(couples)
190 random.seed(seed)
191 random.shuffle(labels)
192
193 return couples, labels
194
195
196 def _remove_long_seq(maxlen, seq, label):
197 """Removes sequences that exceed the maximum length.
198
199 # Arguments
200 maxlen: int, maximum length
201 seq: list of lists where each sublist is a sequence
202 label: list where each element is an integer
203
204 # Returns
205 new_seq, new_label: shortened lists for `seq` and `label`.
206 """
207 new_seq, new_label = [], []
208 for x, y in zip(seq, label):
209 if len(x) < maxlen:
210 new_seq.append(x)
211 new_label.append(y)
212 return new_seq, new_label
213
[end of keras/preprocessing/sequence.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/keras/preprocessing/sequence.py b/keras/preprocessing/sequence.py
--- a/keras/preprocessing/sequence.py
+++ b/keras/preprocessing/sequence.py
@@ -114,7 +114,7 @@
def skipgrams(sequence, vocabulary_size,
window_size=4, negative_samples=1., shuffle=True,
- categorical=False, sampling_table=None):
+ categorical=False, sampling_table=None, seed=None):
"""Generates skipgram word pairs.
Takes a sequence (list of indexes of words),
@@ -140,6 +140,7 @@
if True labels will be categorical eg. [[1,0],[0,1],[0,1] .. ]
sampling_table: 1D array of size `vocabulary_size` where the entry i
encodes the probabibily to sample a word of rank i.
+ seed: random seed.
# Returns
couples, labels: where `couples` are int pairs and
@@ -184,7 +185,8 @@
labels += [0] * num_negative_samples
if shuffle:
- seed = random.randint(0, 10e6)
+ if seed is None:
+ seed = random.randint(0, 10e6)
random.seed(seed)
random.shuffle(couples)
random.seed(seed)
| {"golden_diff": "diff --git a/keras/preprocessing/sequence.py b/keras/preprocessing/sequence.py\n--- a/keras/preprocessing/sequence.py\n+++ b/keras/preprocessing/sequence.py\n@@ -114,7 +114,7 @@\n \n def skipgrams(sequence, vocabulary_size,\n window_size=4, negative_samples=1., shuffle=True,\n- categorical=False, sampling_table=None):\n+ categorical=False, sampling_table=None, seed=None):\n \"\"\"Generates skipgram word pairs.\n \n Takes a sequence (list of indexes of words),\n@@ -140,6 +140,7 @@\n if True labels will be categorical eg. [[1,0],[0,1],[0,1] .. ]\n sampling_table: 1D array of size `vocabulary_size` where the entry i\n encodes the probabibily to sample a word of rank i.\n+ seed: random seed.\n \n # Returns\n couples, labels: where `couples` are int pairs and\n@@ -184,7 +185,8 @@\n labels += [0] * num_negative_samples\n \n if shuffle:\n- seed = random.randint(0, 10e6)\n+ if seed is None:\n+ seed = random.randint(0, 10e6)\n random.seed(seed)\n random.shuffle(couples)\n random.seed(seed)\n", "issue": "skipgram seed parameter got removed in a documentation patch, seed parameter should be readded\nPatch 0af6b6c7f5cbad394673bc962dd248f50fd821ff removed the seed parameter from skipgrams. Having a seed parameter makes it easier to vary the results from``skipgram`` in a controlled way.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport random\nfrom six.moves import range\n\n\ndef pad_sequences(sequences, maxlen=None, dtype='int32',\n padding='pre', truncating='pre', value=0.):\n \"\"\"Pads each sequence to the same length (length of the longest sequence).\n\n If maxlen is provided, any sequence longer\n than maxlen is truncated to maxlen.\n Truncation happens off either the beginning (default) or\n the end of the sequence.\n\n Supports post-padding and pre-padding (default).\n\n # Arguments\n sequences: list of lists where each element is a sequence\n maxlen: int, maximum length\n dtype: type to cast the resulting sequence.\n padding: 'pre' or 'post', pad either before or after each sequence.\n truncating: 'pre' or 'post', remove values from sequences larger than\n maxlen either in the beginning or in the end of the sequence\n value: float, value to pad the sequences to the desired value.\n\n # Returns\n x: numpy array with dimensions (number_of_sequences, maxlen)\n\n # Raises\n ValueError: in case of invalid values for `truncating` or `padding`,\n or in case of invalid shape for a `sequences` entry.\n \"\"\"\n if not hasattr(sequences, '__len__'):\n raise ValueError('`sequences` must be iterable.')\n lengths = []\n for x in sequences:\n if not hasattr(x, '__len__'):\n raise ValueError('`sequences` must be a list of iterables. '\n 'Found non-iterable: ' + str(x))\n lengths.append(len(x))\n\n num_samples = len(sequences)\n if maxlen is None:\n maxlen = np.max(lengths)\n\n # take the sample shape from the first non empty sequence\n # checking for consistency in the main loop below.\n sample_shape = tuple()\n for s in sequences:\n if len(s) > 0:\n sample_shape = np.asarray(s).shape[1:]\n break\n\n x = (np.ones((num_samples, maxlen) + sample_shape) * value).astype(dtype)\n for idx, s in enumerate(sequences):\n if not len(s):\n continue # empty list/array was found\n if truncating == 'pre':\n trunc = s[-maxlen:]\n elif truncating == 'post':\n trunc = s[:maxlen]\n else:\n raise ValueError('Truncating type \"%s\" not understood' % truncating)\n\n # check `trunc` has expected shape\n trunc = np.asarray(trunc, dtype=dtype)\n if trunc.shape[1:] != sample_shape:\n raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %\n (trunc.shape[1:], idx, sample_shape))\n\n if padding == 'post':\n x[idx, :len(trunc)] = trunc\n elif padding == 'pre':\n x[idx, -len(trunc):] = trunc\n else:\n raise ValueError('Padding type \"%s\" not understood' % padding)\n return x\n\n\ndef make_sampling_table(size, sampling_factor=1e-5):\n \"\"\"Generates a word rank-based probabilistic sampling table.\n\n This generates an array where the ith element\n is the probability that a word of rank i would be sampled,\n according to the sampling distribution used in word2vec.\n\n The word2vec formula is:\n p(word) = min(1, sqrt(word.frequency/sampling_factor) / (word.frequency/sampling_factor))\n\n We assume that the word frequencies follow Zipf's law (s=1) to derive\n a numerical approximation of frequency(rank):\n frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))\n where gamma is the Euler-Mascheroni constant.\n\n # Arguments\n size: int, number of possible words to sample.\n sampling_factor: the sampling factor in the word2vec formula.\n\n # Returns\n A 1D Numpy array of length `size` where the ith entry\n is the probability that a word of rank i should be sampled.\n \"\"\"\n gamma = 0.577\n rank = np.arange(size)\n rank[0] = 1\n inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1. / (12. * rank)\n f = sampling_factor * inv_fq\n\n return np.minimum(1., f / np.sqrt(f))\n\n\ndef skipgrams(sequence, vocabulary_size,\n window_size=4, negative_samples=1., shuffle=True,\n categorical=False, sampling_table=None):\n \"\"\"Generates skipgram word pairs.\n\n Takes a sequence (list of indexes of words),\n returns couples of [word_index, other_word index] and labels (1s or 0s),\n where label = 1 if 'other_word' belongs to the context of 'word',\n and label=0 if 'other_word' is randomly sampled\n\n # Arguments\n sequence: a word sequence (sentence), encoded as a list\n of word indices (integers). If using a `sampling_table`,\n word indices are expected to match the rank\n of the words in a reference dataset (e.g. 10 would encode\n the 10-th most frequently occurring token).\n Note that index 0 is expected to be a non-word and will be skipped.\n vocabulary_size: int. maximum possible word index + 1\n window_size: int. actually half-window.\n The window of a word wi will be [i-window_size, i+window_size+1]\n negative_samples: float >= 0. 0 for no negative (=random) samples.\n 1 for same number as positive samples. etc.\n shuffle: whether to shuffle the word couples before returning them.\n categorical: bool. if False, labels will be\n integers (eg. [0, 1, 1 .. ]),\n if True labels will be categorical eg. [[1,0],[0,1],[0,1] .. ]\n sampling_table: 1D array of size `vocabulary_size` where the entry i\n encodes the probabibily to sample a word of rank i.\n\n # Returns\n couples, labels: where `couples` are int pairs and\n `labels` are either 0 or 1.\n\n # Note\n By convention, index 0 in the vocabulary is\n a non-word and will be skipped.\n \"\"\"\n couples = []\n labels = []\n for i, wi in enumerate(sequence):\n if not wi:\n continue\n if sampling_table is not None:\n if sampling_table[wi] < random.random():\n continue\n\n window_start = max(0, i - window_size)\n window_end = min(len(sequence), i + window_size + 1)\n for j in range(window_start, window_end):\n if j != i:\n wj = sequence[j]\n if not wj:\n continue\n couples.append([wi, wj])\n if categorical:\n labels.append([0, 1])\n else:\n labels.append(1)\n\n if negative_samples > 0:\n num_negative_samples = int(len(labels) * negative_samples)\n words = [c[0] for c in couples]\n random.shuffle(words)\n\n couples += [[words[i % len(words)],\n random.randint(1, vocabulary_size - 1)] for i in range(num_negative_samples)]\n if categorical:\n labels += [[1, 0]] * num_negative_samples\n else:\n labels += [0] * num_negative_samples\n\n if shuffle:\n seed = random.randint(0, 10e6)\n random.seed(seed)\n random.shuffle(couples)\n random.seed(seed)\n random.shuffle(labels)\n\n return couples, labels\n\n\ndef _remove_long_seq(maxlen, seq, label):\n \"\"\"Removes sequences that exceed the maximum length.\n\n # Arguments\n maxlen: int, maximum length\n seq: list of lists where each sublist is a sequence\n label: list where each element is an integer\n\n # Returns\n new_seq, new_label: shortened lists for `seq` and `label`.\n \"\"\"\n new_seq, new_label = [], []\n for x, y in zip(seq, label):\n if len(x) < maxlen:\n new_seq.append(x)\n new_label.append(y)\n return new_seq, new_label\n", "path": "keras/preprocessing/sequence.py"}]} | 3,020 | 306 |
gh_patches_debug_31395 | rasdani/github-patches | git_diff | aio-libs__aiohttp-4058 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop aiodns < 1.1
aiodns 1.1.1 was released on Oct 14, 2016
Let's drop aiodns 1.0 in aiohttp 4.0
</issue>
<code>
[start of aiohttp/resolver.py]
1 import socket
2 from typing import Any, Dict, List
3
4 from .abc import AbstractResolver
5 from .helpers import get_running_loop
6
7 __all__ = ('ThreadedResolver', 'AsyncResolver', 'DefaultResolver')
8
9 try:
10 import aiodns
11 # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')
12 except ImportError: # pragma: no cover
13 aiodns = None
14
15 aiodns_default = False
16
17
18 class ThreadedResolver(AbstractResolver):
19 """Use Executor for synchronous getaddrinfo() calls, which defaults to
20 concurrent.futures.ThreadPoolExecutor.
21 """
22
23 def __init__(self) -> None:
24 self._loop = get_running_loop()
25
26 async def resolve(self, host: str, port: int=0,
27 family: int=socket.AF_INET) -> List[Dict[str, Any]]:
28 infos = await self._loop.getaddrinfo(
29 host, port, type=socket.SOCK_STREAM, family=family)
30
31 hosts = []
32 for family, _, proto, _, address in infos:
33 hosts.append(
34 {'hostname': host,
35 'host': address[0], 'port': address[1],
36 'family': family, 'proto': proto,
37 'flags': socket.AI_NUMERICHOST})
38
39 return hosts
40
41 async def close(self) -> None:
42 pass
43
44
45 class AsyncResolver(AbstractResolver):
46 """Use the `aiodns` package to make asynchronous DNS lookups"""
47
48 def __init__(self, *args: Any, **kwargs: Any) -> None:
49 if aiodns is None:
50 raise RuntimeError("Resolver requires aiodns library")
51
52 self._loop = get_running_loop()
53 self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)
54
55 if not hasattr(self._resolver, 'gethostbyname'):
56 # aiodns 1.1 is not available, fallback to DNSResolver.query
57 self.resolve = self._resolve_with_query # type: ignore
58
59 async def resolve(self, host: str, port: int=0,
60 family: int=socket.AF_INET) -> List[Dict[str, Any]]:
61 try:
62 resp = await self._resolver.gethostbyname(host, family)
63 except aiodns.error.DNSError as exc:
64 msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
65 raise OSError(msg) from exc
66 hosts = []
67 for address in resp.addresses:
68 hosts.append(
69 {'hostname': host,
70 'host': address, 'port': port,
71 'family': family, 'proto': 0,
72 'flags': socket.AI_NUMERICHOST})
73
74 if not hosts:
75 raise OSError("DNS lookup failed")
76
77 return hosts
78
79 async def _resolve_with_query(
80 self, host: str, port: int=0,
81 family: int=socket.AF_INET) -> List[Dict[str, Any]]:
82 if family == socket.AF_INET6:
83 qtype = 'AAAA'
84 else:
85 qtype = 'A'
86
87 try:
88 resp = await self._resolver.query(host, qtype)
89 except aiodns.error.DNSError as exc:
90 msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
91 raise OSError(msg) from exc
92
93 hosts = []
94 for rr in resp:
95 hosts.append(
96 {'hostname': host,
97 'host': rr.host, 'port': port,
98 'family': family, 'proto': 0,
99 'flags': socket.AI_NUMERICHOST})
100
101 if not hosts:
102 raise OSError("DNS lookup failed")
103
104 return hosts
105
106 async def close(self) -> None:
107 return self._resolver.cancel()
108
109
110 DefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver
111
[end of aiohttp/resolver.py]
[start of setup.py]
1 import codecs
2 import os
3 import pathlib
4 import re
5 import sys
6 from distutils.command.build_ext import build_ext
7 from distutils.errors import (CCompilerError, DistutilsExecError,
8 DistutilsPlatformError)
9
10 from setuptools import Extension, setup
11
12
13 if sys.version_info < (3, 5, 3):
14 raise RuntimeError("aiohttp 3.x requires Python 3.5.3+")
15
16
17 NO_EXTENSIONS = bool(os.environ.get('AIOHTTP_NO_EXTENSIONS')) # type: bool
18
19 if sys.implementation.name != "cpython":
20 NO_EXTENSIONS = True
21
22
23 here = pathlib.Path(__file__).parent
24
25 if (here / '.git').exists() and not (here / 'vendor/http-parser/README.md').exists():
26 print("Install submodules when building from git clone", file=sys.stderr)
27 print("Hint:", file=sys.stderr)
28 print(" git submodule update --init", file=sys.stderr)
29 sys.exit(2)
30
31
32 # NOTE: makefile cythonizes all Cython modules
33
34 extensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket.c']),
35 Extension('aiohttp._http_parser',
36 ['aiohttp/_http_parser.c',
37 'vendor/http-parser/http_parser.c',
38 'aiohttp/_find_header.c'],
39 define_macros=[('HTTP_PARSER_STRICT', 0)],
40 ),
41 Extension('aiohttp._frozenlist',
42 ['aiohttp/_frozenlist.c']),
43 Extension('aiohttp._helpers',
44 ['aiohttp/_helpers.c']),
45 Extension('aiohttp._http_writer',
46 ['aiohttp/_http_writer.c'])]
47
48
49 class BuildFailed(Exception):
50 pass
51
52
53 class ve_build_ext(build_ext):
54 # This class allows C extension building to fail.
55
56 def run(self):
57 try:
58 build_ext.run(self)
59 except (DistutilsPlatformError, FileNotFoundError):
60 raise BuildFailed()
61
62 def build_extension(self, ext):
63 try:
64 build_ext.build_extension(self, ext)
65 except (CCompilerError, DistutilsExecError,
66 DistutilsPlatformError, ValueError):
67 raise BuildFailed()
68
69
70
71 txt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')
72 try:
73 version = re.findall(r"^__version__ = '([^']+)'\r?$",
74 txt, re.M)[0]
75 except IndexError:
76 raise RuntimeError('Unable to determine version.')
77
78 install_requires = [
79 'attrs>=17.3.0',
80 'chardet>=2.0,<4.0',
81 'multidict>=4.0,<5.0',
82 'async_timeout>=3.0,<4.0',
83 'yarl>=1.0,<2.0',
84 'idna-ssl>=1.0; python_version<"3.7"',
85 'typing_extensions>=3.6.5',
86 ]
87
88
89 def read(f):
90 return (here / f).read_text('utf-8').strip()
91
92
93 args = dict(
94 name='aiohttp',
95 version=version,
96 description='Async http client/server framework (asyncio)',
97 long_description='\n\n'.join((read('README.rst'), read('CHANGES.rst'))),
98 long_description_content_type="text/x-rst",
99 classifiers=[
100 'License :: OSI Approved :: Apache Software License',
101 'Intended Audience :: Developers',
102 'Programming Language :: Python',
103 'Programming Language :: Python :: 3',
104 'Programming Language :: Python :: 3.5',
105 'Programming Language :: Python :: 3.6',
106 'Programming Language :: Python :: 3.7',
107 'Development Status :: 5 - Production/Stable',
108 'Operating System :: POSIX',
109 'Operating System :: MacOS :: MacOS X',
110 'Operating System :: Microsoft :: Windows',
111 'Topic :: Internet :: WWW/HTTP',
112 'Framework :: AsyncIO',
113 ],
114 author='Nikolay Kim',
115 author_email='[email protected]',
116 maintainer=', '.join(('Nikolay Kim <[email protected]>',
117 'Andrew Svetlov <[email protected]>')),
118 maintainer_email='[email protected]',
119 url='https://github.com/aio-libs/aiohttp',
120 project_urls={
121 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',
122 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',
123 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',
124 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',
125 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',
126 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',
127 'Docs: RTD': 'https://docs.aiohttp.org',
128 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',
129 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',
130 },
131 license='Apache 2',
132 packages=['aiohttp'],
133 python_requires='>=3.5.3',
134 install_requires=install_requires,
135 extras_require={
136 'speedups': [
137 'aiodns',
138 'Brotli',
139 'cchardet',
140 ],
141 },
142 include_package_data=True,
143 )
144
145 if not NO_EXTENSIONS:
146 print("**********************")
147 print("* Accellerated build *")
148 print("**********************")
149 setup(ext_modules=extensions,
150 cmdclass=dict(build_ext=ve_build_ext),
151 **args)
152 else:
153 print("*********************")
154 print("* Pure Python build *")
155 print("*********************")
156 setup(**args)
157
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py
--- a/aiohttp/resolver.py
+++ b/aiohttp/resolver.py
@@ -52,10 +52,6 @@
self._loop = get_running_loop()
self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)
- if not hasattr(self._resolver, 'gethostbyname'):
- # aiodns 1.1 is not available, fallback to DNSResolver.query
- self.resolve = self._resolve_with_query # type: ignore
-
async def resolve(self, host: str, port: int=0,
family: int=socket.AF_INET) -> List[Dict[str, Any]]:
try:
@@ -76,33 +72,6 @@
return hosts
- async def _resolve_with_query(
- self, host: str, port: int=0,
- family: int=socket.AF_INET) -> List[Dict[str, Any]]:
- if family == socket.AF_INET6:
- qtype = 'AAAA'
- else:
- qtype = 'A'
-
- try:
- resp = await self._resolver.query(host, qtype)
- except aiodns.error.DNSError as exc:
- msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
- raise OSError(msg) from exc
-
- hosts = []
- for rr in resp:
- hosts.append(
- {'hostname': host,
- 'host': rr.host, 'port': port,
- 'family': family, 'proto': 0,
- 'flags': socket.AI_NUMERICHOST})
-
- if not hosts:
- raise OSError("DNS lookup failed")
-
- return hosts
-
async def close(self) -> None:
return self._resolver.cancel()
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -134,7 +134,7 @@
install_requires=install_requires,
extras_require={
'speedups': [
- 'aiodns',
+ 'aiodns>=1.1',
'Brotli',
'cchardet',
],
| {"golden_diff": "diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py\n--- a/aiohttp/resolver.py\n+++ b/aiohttp/resolver.py\n@@ -52,10 +52,6 @@\n self._loop = get_running_loop()\n self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)\n \n- if not hasattr(self._resolver, 'gethostbyname'):\n- # aiodns 1.1 is not available, fallback to DNSResolver.query\n- self.resolve = self._resolve_with_query # type: ignore\n-\n async def resolve(self, host: str, port: int=0,\n family: int=socket.AF_INET) -> List[Dict[str, Any]]:\n try:\n@@ -76,33 +72,6 @@\n \n return hosts\n \n- async def _resolve_with_query(\n- self, host: str, port: int=0,\n- family: int=socket.AF_INET) -> List[Dict[str, Any]]:\n- if family == socket.AF_INET6:\n- qtype = 'AAAA'\n- else:\n- qtype = 'A'\n-\n- try:\n- resp = await self._resolver.query(host, qtype)\n- except aiodns.error.DNSError as exc:\n- msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n- raise OSError(msg) from exc\n-\n- hosts = []\n- for rr in resp:\n- hosts.append(\n- {'hostname': host,\n- 'host': rr.host, 'port': port,\n- 'family': family, 'proto': 0,\n- 'flags': socket.AI_NUMERICHOST})\n-\n- if not hosts:\n- raise OSError(\"DNS lookup failed\")\n-\n- return hosts\n-\n async def close(self) -> None:\n return self._resolver.cancel()\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -134,7 +134,7 @@\n install_requires=install_requires,\n extras_require={\n 'speedups': [\n- 'aiodns',\n+ 'aiodns>=1.1',\n 'Brotli',\n 'cchardet',\n ],\n", "issue": "Drop aiodns < 1.1\naiodns 1.1.1 was released on Oct 14, 2016\r\nLet's drop aiodns 1.0 in aiohttp 4.0\n", "before_files": [{"content": "import socket\nfrom typing import Any, Dict, List\n\nfrom .abc import AbstractResolver\nfrom .helpers import get_running_loop\n\n__all__ = ('ThreadedResolver', 'AsyncResolver', 'DefaultResolver')\n\ntry:\n import aiodns\n # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')\nexcept ImportError: # pragma: no cover\n aiodns = None\n\naiodns_default = False\n\n\nclass ThreadedResolver(AbstractResolver):\n \"\"\"Use Executor for synchronous getaddrinfo() calls, which defaults to\n concurrent.futures.ThreadPoolExecutor.\n \"\"\"\n\n def __init__(self) -> None:\n self._loop = get_running_loop()\n\n async def resolve(self, host: str, port: int=0,\n family: int=socket.AF_INET) -> List[Dict[str, Any]]:\n infos = await self._loop.getaddrinfo(\n host, port, type=socket.SOCK_STREAM, family=family)\n\n hosts = []\n for family, _, proto, _, address in infos:\n hosts.append(\n {'hostname': host,\n 'host': address[0], 'port': address[1],\n 'family': family, 'proto': proto,\n 'flags': socket.AI_NUMERICHOST})\n\n return hosts\n\n async def close(self) -> None:\n pass\n\n\nclass AsyncResolver(AbstractResolver):\n \"\"\"Use the `aiodns` package to make asynchronous DNS lookups\"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n if aiodns is None:\n raise RuntimeError(\"Resolver requires aiodns library\")\n\n self._loop = get_running_loop()\n self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)\n\n if not hasattr(self._resolver, 'gethostbyname'):\n # aiodns 1.1 is not available, fallback to DNSResolver.query\n self.resolve = self._resolve_with_query # type: ignore\n\n async def resolve(self, host: str, port: int=0,\n family: int=socket.AF_INET) -> List[Dict[str, Any]]:\n try:\n resp = await self._resolver.gethostbyname(host, family)\n except aiodns.error.DNSError as exc:\n msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n raise OSError(msg) from exc\n hosts = []\n for address in resp.addresses:\n hosts.append(\n {'hostname': host,\n 'host': address, 'port': port,\n 'family': family, 'proto': 0,\n 'flags': socket.AI_NUMERICHOST})\n\n if not hosts:\n raise OSError(\"DNS lookup failed\")\n\n return hosts\n\n async def _resolve_with_query(\n self, host: str, port: int=0,\n family: int=socket.AF_INET) -> List[Dict[str, Any]]:\n if family == socket.AF_INET6:\n qtype = 'AAAA'\n else:\n qtype = 'A'\n\n try:\n resp = await self._resolver.query(host, qtype)\n except aiodns.error.DNSError as exc:\n msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n raise OSError(msg) from exc\n\n hosts = []\n for rr in resp:\n hosts.append(\n {'hostname': host,\n 'host': rr.host, 'port': port,\n 'family': family, 'proto': 0,\n 'flags': socket.AI_NUMERICHOST})\n\n if not hosts:\n raise OSError(\"DNS lookup failed\")\n\n return hosts\n\n async def close(self) -> None:\n return self._resolver.cancel()\n\n\nDefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver\n", "path": "aiohttp/resolver.py"}, {"content": "import codecs\nimport os\nimport pathlib\nimport re\nimport sys\nfrom distutils.command.build_ext import build_ext\nfrom distutils.errors import (CCompilerError, DistutilsExecError,\n DistutilsPlatformError)\n\nfrom setuptools import Extension, setup\n\n\nif sys.version_info < (3, 5, 3):\n raise RuntimeError(\"aiohttp 3.x requires Python 3.5.3+\")\n\n\nNO_EXTENSIONS = bool(os.environ.get('AIOHTTP_NO_EXTENSIONS')) # type: bool\n\nif sys.implementation.name != \"cpython\":\n NO_EXTENSIONS = True\n\n\nhere = pathlib.Path(__file__).parent\n\nif (here / '.git').exists() and not (here / 'vendor/http-parser/README.md').exists():\n print(\"Install submodules when building from git clone\", file=sys.stderr)\n print(\"Hint:\", file=sys.stderr)\n print(\" git submodule update --init\", file=sys.stderr)\n sys.exit(2)\n\n\n# NOTE: makefile cythonizes all Cython modules\n\nextensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket.c']),\n Extension('aiohttp._http_parser',\n ['aiohttp/_http_parser.c',\n 'vendor/http-parser/http_parser.c',\n 'aiohttp/_find_header.c'],\n define_macros=[('HTTP_PARSER_STRICT', 0)],\n ),\n Extension('aiohttp._frozenlist',\n ['aiohttp/_frozenlist.c']),\n Extension('aiohttp._helpers',\n ['aiohttp/_helpers.c']),\n Extension('aiohttp._http_writer',\n ['aiohttp/_http_writer.c'])]\n\n\nclass BuildFailed(Exception):\n pass\n\n\nclass ve_build_ext(build_ext):\n # This class allows C extension building to fail.\n\n def run(self):\n try:\n build_ext.run(self)\n except (DistutilsPlatformError, FileNotFoundError):\n raise BuildFailed()\n\n def build_extension(self, ext):\n try:\n build_ext.build_extension(self, ext)\n except (CCompilerError, DistutilsExecError,\n DistutilsPlatformError, ValueError):\n raise BuildFailed()\n\n\n\ntxt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')\ntry:\n version = re.findall(r\"^__version__ = '([^']+)'\\r?$\",\n txt, re.M)[0]\nexcept IndexError:\n raise RuntimeError('Unable to determine version.')\n\ninstall_requires = [\n 'attrs>=17.3.0',\n 'chardet>=2.0,<4.0',\n 'multidict>=4.0,<5.0',\n 'async_timeout>=3.0,<4.0',\n 'yarl>=1.0,<2.0',\n 'idna-ssl>=1.0; python_version<\"3.7\"',\n 'typing_extensions>=3.6.5',\n]\n\n\ndef read(f):\n return (here / f).read_text('utf-8').strip()\n\n\nargs = dict(\n name='aiohttp',\n version=version,\n description='Async http client/server framework (asyncio)',\n long_description='\\n\\n'.join((read('README.rst'), read('CHANGES.rst'))),\n long_description_content_type=\"text/x-rst\",\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Development Status :: 5 - Production/Stable',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Topic :: Internet :: WWW/HTTP',\n 'Framework :: AsyncIO',\n ],\n author='Nikolay Kim',\n author_email='[email protected]',\n maintainer=', '.join(('Nikolay Kim <[email protected]>',\n 'Andrew Svetlov <[email protected]>')),\n maintainer_email='[email protected]',\n url='https://github.com/aio-libs/aiohttp',\n project_urls={\n 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',\n 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',\n 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',\n 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',\n 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',\n 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',\n 'Docs: RTD': 'https://docs.aiohttp.org',\n 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',\n 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',\n },\n license='Apache 2',\n packages=['aiohttp'],\n python_requires='>=3.5.3',\n install_requires=install_requires,\n extras_require={\n 'speedups': [\n 'aiodns',\n 'Brotli',\n 'cchardet',\n ],\n },\n include_package_data=True,\n)\n\nif not NO_EXTENSIONS:\n print(\"**********************\")\n print(\"* Accellerated build *\")\n print(\"**********************\")\n setup(ext_modules=extensions,\n cmdclass=dict(build_ext=ve_build_ext),\n **args)\nelse:\n print(\"*********************\")\n print(\"* Pure Python build *\")\n print(\"*********************\")\n setup(**args)\n", "path": "setup.py"}]} | 3,303 | 505 |
gh_patches_debug_4345 | rasdani/github-patches | git_diff | netbox-community__netbox-16037 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to run scripts from CLI in v4.0
### Deployment Type
Self-hosted
### NetBox Version
v4.0.0
### Python Version
3.11
### Steps to Reproduce
1. Create a script
2. Run it with `python manage.py runscript 'module.ScriptName' inside the NetBox instance
### Expected Behavior
Script should run.
### Observed Behavior
Script fails with:
> AttributeError: 'Script' object has no attribute 'full_name'
Running the same script from GUI works fine, have tried multiple scripts, and haven't been able to run any via CLI in v4.
Seems to be this line that fails: https://github.com/netbox-community/netbox/blob/develop/netbox/extras/management/commands/runscript.py#L104
</issue>
<code>
[start of netbox/extras/management/commands/runscript.py]
1 import json
2 import logging
3 import sys
4 import traceback
5 import uuid
6
7 from django.contrib.auth import get_user_model
8 from django.core.management.base import BaseCommand, CommandError
9 from django.db import transaction
10
11 from core.choices import JobStatusChoices
12 from core.models import Job
13 from extras.context_managers import event_tracking
14 from extras.scripts import get_module_and_script
15 from extras.signals import clear_events
16 from utilities.exceptions import AbortTransaction
17 from utilities.request import NetBoxFakeRequest
18
19
20 class Command(BaseCommand):
21 help = "Run a script in NetBox"
22
23 def add_arguments(self, parser):
24 parser.add_argument(
25 '--loglevel',
26 help="Logging Level (default: info)",
27 dest='loglevel',
28 default='info',
29 choices=['debug', 'info', 'warning', 'error', 'critical'])
30 parser.add_argument('--commit', help="Commit this script to database", action='store_true')
31 parser.add_argument('--user', help="User script is running as")
32 parser.add_argument('--data', help="Data as a string encapsulated JSON blob")
33 parser.add_argument('script', help="Script to run")
34
35 def handle(self, *args, **options):
36
37 def _run_script():
38 """
39 Core script execution task. We capture this within a subfunction to allow for conditionally wrapping it with
40 the event_tracking context manager (which is bypassed if commit == False).
41 """
42 try:
43 try:
44 with transaction.atomic():
45 script.output = script.run(data=data, commit=commit)
46 if not commit:
47 raise AbortTransaction()
48 except AbortTransaction:
49 script.log_info("Database changes have been reverted automatically.")
50 clear_events.send(request)
51 job.data = script.get_job_data()
52 job.terminate()
53 except Exception as e:
54 stacktrace = traceback.format_exc()
55 script.log_failure(
56 f"An exception occurred: `{type(e).__name__}: {e}`\n```\n{stacktrace}\n```"
57 )
58 script.log_info("Database changes have been reverted due to error.")
59 logger.error(f"Exception raised during script execution: {e}")
60 clear_events.send(request)
61 job.data = script.get_job_data()
62 job.terminate(status=JobStatusChoices.STATUS_ERRORED, error=repr(e))
63
64 # Print any test method results
65 for test_name, attrs in job.data['tests'].items():
66 self.stdout.write(
67 "\t{}: {} success, {} info, {} warning, {} failure".format(
68 test_name, attrs['success'], attrs['info'], attrs['warning'], attrs['failure']
69 )
70 )
71
72 logger.info(f"Script completed in {job.duration}")
73
74 User = get_user_model()
75
76 # Params
77 script = options['script']
78 loglevel = options['loglevel']
79 commit = options['commit']
80
81 try:
82 data = json.loads(options['data'])
83 except TypeError:
84 data = {}
85
86 module_name, script_name = script.split('.', 1)
87 module, script = get_module_and_script(module_name, script_name)
88
89 # Take user from command line if provided and exists, other
90 if options['user']:
91 try:
92 user = User.objects.get(username=options['user'])
93 except User.DoesNotExist:
94 user = User.objects.filter(is_superuser=True).order_by('pk')[0]
95 else:
96 user = User.objects.filter(is_superuser=True).order_by('pk')[0]
97
98 # Setup logging to Stdout
99 formatter = logging.Formatter(f'[%(asctime)s][%(levelname)s] - %(message)s')
100 stdouthandler = logging.StreamHandler(sys.stdout)
101 stdouthandler.setLevel(logging.DEBUG)
102 stdouthandler.setFormatter(formatter)
103
104 logger = logging.getLogger(f"netbox.scripts.{script.full_name}")
105 logger.addHandler(stdouthandler)
106
107 try:
108 logger.setLevel({
109 'critical': logging.CRITICAL,
110 'debug': logging.DEBUG,
111 'error': logging.ERROR,
112 'fatal': logging.FATAL,
113 'info': logging.INFO,
114 'warning': logging.WARNING,
115 }[loglevel])
116 except KeyError:
117 raise CommandError(f"Invalid log level: {loglevel}")
118
119 # Initialize the script form
120 script = script()
121 form = script.as_form(data, None)
122
123 # Create the job
124 job = Job.objects.create(
125 object=module,
126 name=script.class_name,
127 user=User.objects.filter(is_superuser=True).order_by('pk')[0],
128 job_id=uuid.uuid4()
129 )
130
131 request = NetBoxFakeRequest({
132 'META': {},
133 'POST': data,
134 'GET': {},
135 'FILES': {},
136 'user': user,
137 'path': '',
138 'id': job.job_id
139 })
140
141 if form.is_valid():
142 job.status = JobStatusChoices.STATUS_RUNNING
143 job.save()
144
145 logger.info(f"Running script (commit={commit})")
146 script.request = request
147
148 # Execute the script. If commit is True, wrap it with the event_tracking context manager to ensure we process
149 # change logging, webhooks, etc.
150 with event_tracking(request):
151 _run_script()
152 else:
153 logger.error('Data is not valid:')
154 for field, errors in form.errors.get_json_data().items():
155 for error in errors:
156 logger.error(f'\t{field}: {error.get("message")}')
157 job.status = JobStatusChoices.STATUS_ERRORED
158 job.save()
159
[end of netbox/extras/management/commands/runscript.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/extras/management/commands/runscript.py b/netbox/extras/management/commands/runscript.py
--- a/netbox/extras/management/commands/runscript.py
+++ b/netbox/extras/management/commands/runscript.py
@@ -85,6 +85,7 @@
module_name, script_name = script.split('.', 1)
module, script = get_module_and_script(module_name, script_name)
+ script = script.python_class
# Take user from command line if provided and exists, other
if options['user']:
| {"golden_diff": "diff --git a/netbox/extras/management/commands/runscript.py b/netbox/extras/management/commands/runscript.py\n--- a/netbox/extras/management/commands/runscript.py\n+++ b/netbox/extras/management/commands/runscript.py\n@@ -85,6 +85,7 @@\n \n module_name, script_name = script.split('.', 1)\n module, script = get_module_and_script(module_name, script_name)\n+ script = script.python_class\n \n # Take user from command line if provided and exists, other\n if options['user']:\n", "issue": "Unable to run scripts from CLI in v4.0\n### Deployment Type\n\nSelf-hosted\n\n### NetBox Version\n\nv4.0.0\n\n### Python Version\n\n3.11\n\n### Steps to Reproduce\n\n1. Create a script\r\n2. Run it with `python manage.py runscript 'module.ScriptName' inside the NetBox instance\r\n\n\n### Expected Behavior\n\nScript should run.\n\n### Observed Behavior\n\nScript fails with:\r\n> AttributeError: 'Script' object has no attribute 'full_name'\r\n\r\nRunning the same script from GUI works fine, have tried multiple scripts, and haven't been able to run any via CLI in v4. \r\n\r\nSeems to be this line that fails: https://github.com/netbox-community/netbox/blob/develop/netbox/extras/management/commands/runscript.py#L104\n", "before_files": [{"content": "import json\nimport logging\nimport sys\nimport traceback\nimport uuid\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db import transaction\n\nfrom core.choices import JobStatusChoices\nfrom core.models import Job\nfrom extras.context_managers import event_tracking\nfrom extras.scripts import get_module_and_script\nfrom extras.signals import clear_events\nfrom utilities.exceptions import AbortTransaction\nfrom utilities.request import NetBoxFakeRequest\n\n\nclass Command(BaseCommand):\n help = \"Run a script in NetBox\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--loglevel',\n help=\"Logging Level (default: info)\",\n dest='loglevel',\n default='info',\n choices=['debug', 'info', 'warning', 'error', 'critical'])\n parser.add_argument('--commit', help=\"Commit this script to database\", action='store_true')\n parser.add_argument('--user', help=\"User script is running as\")\n parser.add_argument('--data', help=\"Data as a string encapsulated JSON blob\")\n parser.add_argument('script', help=\"Script to run\")\n\n def handle(self, *args, **options):\n\n def _run_script():\n \"\"\"\n Core script execution task. We capture this within a subfunction to allow for conditionally wrapping it with\n the event_tracking context manager (which is bypassed if commit == False).\n \"\"\"\n try:\n try:\n with transaction.atomic():\n script.output = script.run(data=data, commit=commit)\n if not commit:\n raise AbortTransaction()\n except AbortTransaction:\n script.log_info(\"Database changes have been reverted automatically.\")\n clear_events.send(request)\n job.data = script.get_job_data()\n job.terminate()\n except Exception as e:\n stacktrace = traceback.format_exc()\n script.log_failure(\n f\"An exception occurred: `{type(e).__name__}: {e}`\\n```\\n{stacktrace}\\n```\"\n )\n script.log_info(\"Database changes have been reverted due to error.\")\n logger.error(f\"Exception raised during script execution: {e}\")\n clear_events.send(request)\n job.data = script.get_job_data()\n job.terminate(status=JobStatusChoices.STATUS_ERRORED, error=repr(e))\n\n # Print any test method results\n for test_name, attrs in job.data['tests'].items():\n self.stdout.write(\n \"\\t{}: {} success, {} info, {} warning, {} failure\".format(\n test_name, attrs['success'], attrs['info'], attrs['warning'], attrs['failure']\n )\n )\n\n logger.info(f\"Script completed in {job.duration}\")\n\n User = get_user_model()\n\n # Params\n script = options['script']\n loglevel = options['loglevel']\n commit = options['commit']\n\n try:\n data = json.loads(options['data'])\n except TypeError:\n data = {}\n\n module_name, script_name = script.split('.', 1)\n module, script = get_module_and_script(module_name, script_name)\n\n # Take user from command line if provided and exists, other\n if options['user']:\n try:\n user = User.objects.get(username=options['user'])\n except User.DoesNotExist:\n user = User.objects.filter(is_superuser=True).order_by('pk')[0]\n else:\n user = User.objects.filter(is_superuser=True).order_by('pk')[0]\n\n # Setup logging to Stdout\n formatter = logging.Formatter(f'[%(asctime)s][%(levelname)s] - %(message)s')\n stdouthandler = logging.StreamHandler(sys.stdout)\n stdouthandler.setLevel(logging.DEBUG)\n stdouthandler.setFormatter(formatter)\n\n logger = logging.getLogger(f\"netbox.scripts.{script.full_name}\")\n logger.addHandler(stdouthandler)\n\n try:\n logger.setLevel({\n 'critical': logging.CRITICAL,\n 'debug': logging.DEBUG,\n 'error': logging.ERROR,\n 'fatal': logging.FATAL,\n 'info': logging.INFO,\n 'warning': logging.WARNING,\n }[loglevel])\n except KeyError:\n raise CommandError(f\"Invalid log level: {loglevel}\")\n\n # Initialize the script form\n script = script()\n form = script.as_form(data, None)\n\n # Create the job\n job = Job.objects.create(\n object=module,\n name=script.class_name,\n user=User.objects.filter(is_superuser=True).order_by('pk')[0],\n job_id=uuid.uuid4()\n )\n\n request = NetBoxFakeRequest({\n 'META': {},\n 'POST': data,\n 'GET': {},\n 'FILES': {},\n 'user': user,\n 'path': '',\n 'id': job.job_id\n })\n\n if form.is_valid():\n job.status = JobStatusChoices.STATUS_RUNNING\n job.save()\n\n logger.info(f\"Running script (commit={commit})\")\n script.request = request\n\n # Execute the script. If commit is True, wrap it with the event_tracking context manager to ensure we process\n # change logging, webhooks, etc.\n with event_tracking(request):\n _run_script()\n else:\n logger.error('Data is not valid:')\n for field, errors in form.errors.get_json_data().items():\n for error in errors:\n logger.error(f'\\t{field}: {error.get(\"message\")}')\n job.status = JobStatusChoices.STATUS_ERRORED\n job.save()\n", "path": "netbox/extras/management/commands/runscript.py"}]} | 2,252 | 125 |
gh_patches_debug_63273 | rasdani/github-patches | git_diff | weecology__retriever-400 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't download and extract Gentry dataset
If trying to download "Gentry Forest Transect Dataset" the retriever seems to download the data, but gets stuck when it comes in extracting AVALANCH.xls
Moreover force quit seems the only way to close the program.
OS: OS X El Capitan Version 10.11.3 (15D21)
Machine: Macbook Pro Early 2015 13"
</issue>
<code>
[start of app/download_manager.py]
1 """This class manages dataset downloads concurrently and processes progress
2 output."""
3
4 import wx
5 from retriever.lib.download import DownloadThread
6
7
8 class DownloadManager:
9 def __init__(self, parent):
10 self.dialog = None
11 self.worker = None
12 self.queue = []
13 self.downloaded = set()
14 self.errors = set()
15 self.warnings = set()
16 self.Parent = parent
17 self.timer = wx.Timer(parent, -1)
18 self.timer.interval = 10
19 parent.Bind(wx.EVT_TIMER, self.update, self.timer)
20
21 def Download(self, script):
22 if not script in self.queue and not (self.worker and self.worker.script == script):
23 self.queue.append(script)
24 self.downloaded.add(script)
25 if script in self.errors:
26 self.errors.remove(script)
27 self.warnings.remove(script)
28 self.Parent.script_list.RefreshMe(None)
29 if not self.timer.IsRunning() and not self.worker and len(self.queue) < 2:
30 self.timer.Start(self.timer.interval)
31 return True
32 return False
33
34 def update(self, evt):
35 self.timer.Stop()
36 terminate = False
37 if self.worker:
38 script = self.worker.script
39 if self.worker.finished() and len(self.worker.output) == 0:
40 if hasattr(script, 'warnings') and script.warnings:
41 self.warnings.add(script)
42 self.Parent.SetStatusText('\n'.join(str(w) for w in script.warnings))
43 else:
44 self.Parent.SetStatusText("")
45 self.worker = None
46 self.Parent.script_list.RefreshMe(None)
47 self.timer.Start(self.timer.interval)
48 else:
49 self.worker.output_lock.acquire()
50 while len(self.worker.output) > 0 and not terminate:
51 if "Error:" in self.worker.output[0] and script in self.downloaded:
52 self.downloaded.remove(script)
53 self.errors.add(script)
54 if self.write(self.worker) == False:
55 terminate = True
56 self.worker.output = self.worker.output[1:]
57 #self.gauge.SetValue(100 * ((self.worker.scriptnum) /
58 # (self.worker.progress_max + 1.0)))
59 self.worker.output_lock.release()
60 if terminate:
61 self.Parent.Quit(None)
62 else:
63 self.timer.Start(self.timer.interval)
64 elif self.queue:
65 script = self.queue[0]
66 self.queue = self.queue[1:]
67 self.worker = DownloadThread(self.Parent.engine, script)
68 self.worker.parent = self
69 self.worker.start()
70 self.timer.Start(10)
71
72 def flush(self):
73 pass
74
75 def write(self, worker):
76 s = worker.output[0]
77
78 if '\b' in s:
79 s = s.replace('\b', '')
80 if not self.dialog:
81 wx.GetApp().Yield()
82 self.dialog = wx.ProgressDialog("Download Progress",
83 "Downloading datasets . . .\n"
84 + " " * len(s),
85 maximum=1000,
86 parent=None,
87 style=wx.PD_SMOOTH
88 | wx.DIALOG_NO_PARENT
89 | wx.PD_CAN_ABORT
90 | wx.PD_AUTO_HIDE
91 | wx.PD_REMAINING_TIME
92 )
93 def progress(s):
94 if ' / ' in s:
95 s = s.split(' / ')
96 total = float(s[1])
97 current = float(s[0].split(': ')[1])
98 progress = int((current / total) * 1000)
99 return (progress if progress > 1 else 1)
100 else:
101 return None
102
103 current_progress = progress(s)
104 if current_progress:
105 (keepgoing, skip) = self.dialog.Update(current_progress, s)
106 else:
107 (keepgoing, skip) = self.dialog.Pulse(s)
108
109 if not keepgoing:
110 return False
111 else:
112 if self.dialog:
113 self.dialog.Update(1000, "")
114 self.dialog.Destroy()
115 self.dialog = None
116
117 if '...' in s:
118 self.Parent.SetStatusText(s)
119 else:
120 self.Parent.script_list.SetStatus(worker.script.name, s)
121
122 wx.GetApp().Yield()
123 return True
124
[end of app/download_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/download_manager.py b/app/download_manager.py
--- a/app/download_manager.py
+++ b/app/download_manager.py
@@ -102,8 +102,9 @@
current_progress = progress(s)
if current_progress:
- (keepgoing, skip) = self.dialog.Update(current_progress, s)
- else:
+ # download progress remaining-time disabled. causes bottle neck on Gentry ref: #396.
+ # (keepgoing, skip) = self.dialog.Update(current_progress, s)
+ # else:
(keepgoing, skip) = self.dialog.Pulse(s)
if not keepgoing:
| {"golden_diff": "diff --git a/app/download_manager.py b/app/download_manager.py\n--- a/app/download_manager.py\n+++ b/app/download_manager.py\n@@ -102,8 +102,9 @@\n \n current_progress = progress(s)\n if current_progress:\n- (keepgoing, skip) = self.dialog.Update(current_progress, s)\n- else:\n+ # download progress remaining-time disabled. causes bottle neck on Gentry ref: #396.\n+ # (keepgoing, skip) = self.dialog.Update(current_progress, s)\n+ # else:\n (keepgoing, skip) = self.dialog.Pulse(s)\n \n if not keepgoing:\n", "issue": "Can't download and extract Gentry dataset\nIf trying to download \"Gentry Forest Transect Dataset\" the retriever seems to download the data, but gets stuck when it comes in extracting AVALANCH.xls\nMoreover force quit seems the only way to close the program. \nOS: OS X El Capitan Version 10.11.3 (15D21)\nMachine: Macbook Pro Early 2015 13\"\n\n", "before_files": [{"content": "\"\"\"This class manages dataset downloads concurrently and processes progress\noutput.\"\"\"\n\nimport wx\nfrom retriever.lib.download import DownloadThread\n\n\nclass DownloadManager:\n def __init__(self, parent):\n self.dialog = None\n self.worker = None\n self.queue = []\n self.downloaded = set()\n self.errors = set()\n self.warnings = set()\n self.Parent = parent\n self.timer = wx.Timer(parent, -1)\n self.timer.interval = 10\n parent.Bind(wx.EVT_TIMER, self.update, self.timer)\n\n def Download(self, script):\n if not script in self.queue and not (self.worker and self.worker.script == script):\n self.queue.append(script)\n self.downloaded.add(script)\n if script in self.errors:\n self.errors.remove(script)\n self.warnings.remove(script)\n self.Parent.script_list.RefreshMe(None)\n if not self.timer.IsRunning() and not self.worker and len(self.queue) < 2:\n self.timer.Start(self.timer.interval)\n return True\n return False\n\n def update(self, evt):\n self.timer.Stop()\n terminate = False\n if self.worker:\n script = self.worker.script\n if self.worker.finished() and len(self.worker.output) == 0:\n if hasattr(script, 'warnings') and script.warnings:\n self.warnings.add(script)\n self.Parent.SetStatusText('\\n'.join(str(w) for w in script.warnings))\n else:\n self.Parent.SetStatusText(\"\")\n self.worker = None\n self.Parent.script_list.RefreshMe(None)\n self.timer.Start(self.timer.interval)\n else:\n self.worker.output_lock.acquire()\n while len(self.worker.output) > 0 and not terminate:\n if \"Error:\" in self.worker.output[0] and script in self.downloaded:\n self.downloaded.remove(script)\n self.errors.add(script)\n if self.write(self.worker) == False:\n terminate = True\n self.worker.output = self.worker.output[1:]\n #self.gauge.SetValue(100 * ((self.worker.scriptnum) /\n # (self.worker.progress_max + 1.0)))\n self.worker.output_lock.release()\n if terminate:\n self.Parent.Quit(None)\n else:\n self.timer.Start(self.timer.interval)\n elif self.queue:\n script = self.queue[0]\n self.queue = self.queue[1:]\n self.worker = DownloadThread(self.Parent.engine, script)\n self.worker.parent = self\n self.worker.start()\n self.timer.Start(10)\n\n def flush(self):\n pass\n\n def write(self, worker):\n s = worker.output[0]\n\n if '\\b' in s:\n s = s.replace('\\b', '')\n if not self.dialog:\n wx.GetApp().Yield()\n self.dialog = wx.ProgressDialog(\"Download Progress\",\n \"Downloading datasets . . .\\n\"\n + \" \" * len(s),\n maximum=1000,\n parent=None,\n style=wx.PD_SMOOTH\n | wx.DIALOG_NO_PARENT\n | wx.PD_CAN_ABORT\n | wx.PD_AUTO_HIDE\n | wx.PD_REMAINING_TIME\n )\n def progress(s):\n if ' / ' in s:\n s = s.split(' / ')\n total = float(s[1])\n current = float(s[0].split(': ')[1])\n progress = int((current / total) * 1000)\n return (progress if progress > 1 else 1)\n else:\n return None\n\n current_progress = progress(s)\n if current_progress:\n (keepgoing, skip) = self.dialog.Update(current_progress, s)\n else:\n (keepgoing, skip) = self.dialog.Pulse(s)\n\n if not keepgoing:\n return False\n else:\n if self.dialog:\n self.dialog.Update(1000, \"\")\n self.dialog.Destroy()\n self.dialog = None\n\n if '...' in s:\n self.Parent.SetStatusText(s)\n else:\n self.Parent.script_list.SetStatus(worker.script.name, s)\n\n wx.GetApp().Yield()\n return True\n", "path": "app/download_manager.py"}]} | 1,782 | 143 |
gh_patches_debug_22153 | rasdani/github-patches | git_diff | svthalia__concrexit-1794 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Display warning in admin for age restricted orders for under age members
https://github.com/svthalia/concrexit/blob/8244e4bd50db6e64a63aa1605756acc2fb413094/website/sales/admin/order_admin.py#L334
</issue>
<code>
[start of website/sales/admin/order_admin.py]
1 from functools import partial
2
3 from admin_auto_filters.filters import AutocompleteFilter
4 from django.contrib import admin, messages
5 from django.contrib.admin import register, SimpleListFilter
6 from django.forms import Field
7 from django.http import HttpRequest
8 from django.urls import resolve
9 from django.utils import timezone
10
11 from django.utils.translation import gettext_lazy as _
12
13 from payments.widgets import PaymentWidget
14 from sales.models.order import Order, OrderItem
15 from sales.models.shift import Shift
16 from sales.services import is_manager
17
18
19 class OrderItemInline(admin.TabularInline):
20 model = OrderItem
21 extra = 0
22
23 fields = ("product", "amount", "total")
24
25 def get_readonly_fields(self, request: HttpRequest, obj: Order = None):
26 default_fields = self.readonly_fields
27
28 if not (request.member and request.member.has_perm("sales.custom_prices")):
29 default_fields += ("total",)
30
31 return default_fields
32
33 def get_queryset(self, request):
34 queryset = super().get_queryset(request)
35 queryset = queryset.prefetch_related("product", "product__product")
36 return queryset
37
38 def has_add_permission(self, request, obj):
39 if obj and obj.shift.locked:
40 return False
41
42 if obj and obj.payment:
43 return False
44
45 parent = self.get_parent_object_from_request(request)
46 if not parent:
47 return False
48
49 return super().has_add_permission(request, obj)
50
51 def has_change_permission(self, request, obj=None):
52 if obj and obj.payment:
53 return False
54 if obj and obj.shift.locked:
55 return False
56 if obj and not is_manager(request.member, obj.shift):
57 return False
58 return True
59
60 def has_delete_permission(self, request, obj=None):
61 if obj and obj.payment:
62 return False
63 if obj and obj.shift.locked:
64 return False
65 if obj and not is_manager(request.member, obj.shift):
66 return False
67 return True
68
69 def get_parent_object_from_request(self, request):
70 """Get parent object to determine product list."""
71 resolved = resolve(request.path_info)
72 if resolved.kwargs:
73 parent = self.parent_model.objects.get(pk=resolved.kwargs["object_id"])
74 return parent
75 return None
76
77 def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
78 """Limit product list items to items of order's shift."""
79 field = super().formfield_for_foreignkey(db_field, request, **kwargs)
80
81 if db_field.name == "product":
82 if request is not None:
83 parent = self.get_parent_object_from_request(request)
84 if parent:
85 field.queryset = parent.shift.product_list.product_items
86 else:
87 field.queryset = field.queryset.none()
88
89 return field
90
91
92 class OrderShiftFilter(AutocompleteFilter):
93 title = _("shift")
94 field_name = "shift"
95 rel_model = Order
96
97 def queryset(self, request, queryset):
98 if self.value():
99 return queryset.filter(shift=self.value())
100 return queryset
101
102
103 class OrderMemberFilter(AutocompleteFilter):
104 title = _("member")
105 field_name = "payer"
106 rel_model = Order
107
108 def queryset(self, request, queryset):
109 if self.value():
110 return queryset.filter(payer=self.value())
111 return queryset
112
113
114 class OrderPaymentFilter(SimpleListFilter):
115 title = _("payment")
116 parameter_name = "payment"
117
118 def lookups(self, request, model_admin):
119 return (
120 ("not_required", _("No payment required")),
121 ("paid", _("Paid")),
122 ("unpaid", _("Unpaid")),
123 )
124
125 def queryset(self, request, queryset):
126 if self.value() is None:
127 return queryset
128 if self.value() == "paid":
129 return queryset.filter(payment__isnull=False)
130 if self.value() == "unpaid":
131 return queryset.filter(payment__isnull=True, total_amount__gt=0)
132 return queryset.filter(total_amount__exact=0)
133
134
135 class OrderProductFilter(SimpleListFilter):
136 title = _("product")
137 parameter_name = "product"
138
139 def lookups(self, request, model_admin):
140 qs = model_admin.get_queryset(request)
141 types = qs.filter(order_items__product__product__isnull=False).values_list(
142 "order_items__product__product__id", "order_items__product__product__name"
143 )
144 return list(types.order_by("order_items__product__product__id").distinct())
145
146 def queryset(self, request, queryset):
147 if self.value() is None:
148 return queryset
149 return queryset.filter(order_items__product__product__id__contains=self.value())
150
151
152 @register(Order)
153 class OrderAdmin(admin.ModelAdmin):
154 class Media:
155 pass
156
157 inlines = [
158 OrderItemInline,
159 ]
160 ordering = ("-created_at",)
161 date_hierarchy = "created_at"
162 search_fields = (
163 "id",
164 "payer__username",
165 "payer__first_name",
166 "payer__last_name",
167 "payer__profile__nickname",
168 )
169
170 list_display = (
171 "id",
172 "shift",
173 "created_at",
174 "order_description",
175 "num_items",
176 "discount",
177 "total_amount",
178 "paid",
179 "payer",
180 )
181 list_filter = [
182 OrderShiftFilter,
183 OrderMemberFilter,
184 OrderPaymentFilter,
185 OrderProductFilter,
186 ]
187
188 fields = (
189 "shift",
190 "created_at",
191 "order_description",
192 "num_items",
193 "age_restricted",
194 "subtotal",
195 "discount",
196 "total_amount",
197 "payer",
198 "payment",
199 "payment_url",
200 )
201
202 readonly_fields = (
203 "created_at",
204 "order_description",
205 "num_items",
206 "subtotal",
207 "total_amount",
208 "age_restricted",
209 "payment_url",
210 )
211
212 def get_readonly_fields(self, request: HttpRequest, obj: Order = None):
213 """Disallow changing shift when selected."""
214 default_fields = self.readonly_fields
215
216 if not (request.member and request.member.has_perm("sales.custom_prices")):
217 default_fields += ("discount",)
218
219 if obj and obj.shift:
220 default_fields += ("shift",)
221
222 return default_fields
223
224 def get_queryset(self, request):
225 queryset = super().get_queryset(request)
226
227 if not request.member:
228 queryset = queryset.none()
229 elif not request.member.has_perm("sales.override_manager"):
230 queryset = queryset.filter(
231 shift__managers__in=request.member.get_member_groups()
232 ).distinct()
233
234 queryset = queryset.select_properties(
235 "total_amount", "subtotal", "num_items", "age_restricted"
236 )
237 queryset = queryset.prefetch_related(
238 "shift", "shift__event", "shift__product_list"
239 )
240 queryset = queryset.prefetch_related(
241 "order_items", "order_items__product", "order_items__product__product"
242 )
243 queryset = queryset.prefetch_related("payment")
244 queryset = queryset.prefetch_related("payer")
245 return queryset
246
247 def has_add_permission(self, request):
248 if not request.member:
249 return False
250 elif not request.member.has_perm("sales.override_manager"):
251 if (
252 Shift.objects.filter(
253 start__lte=timezone.now(),
254 locked=False,
255 managers__in=request.member.get_member_groups(),
256 ).count()
257 == 0
258 ):
259 return False
260 return super().has_view_permission(request)
261
262 def has_view_permission(self, request, obj=None):
263 if obj and not is_manager(request.member, obj.shift):
264 return False
265 return super().has_view_permission(request, obj)
266
267 def has_change_permission(self, request, obj=None):
268 if obj and obj.shift.locked:
269 return False
270 if obj and obj.payment:
271 return False
272
273 if obj and not is_manager(request.member, obj.shift):
274 return False
275
276 return super().has_change_permission(request, obj)
277
278 def has_delete_permission(self, request, obj=None):
279 if obj and obj.shift.locked:
280 return False
281 if obj and obj.payment:
282 return False
283
284 if obj and not is_manager(request.member, obj.shift):
285 return False
286
287 return super().has_delete_permission(request, obj)
288
289 def get_form(self, request, obj=None, **kwargs):
290 """Override get form to use payment widget."""
291 return super().get_form(
292 request,
293 obj,
294 formfield_callback=partial(
295 self.formfield_for_dbfield, request=request, obj=obj
296 ),
297 **kwargs,
298 )
299
300 def formfield_for_dbfield(self, db_field, request, obj=None, **kwargs):
301 """Use payment widget for payments."""
302 field = super().formfield_for_dbfield(db_field, request, **kwargs)
303 if db_field.name == "payment":
304 return Field(
305 widget=PaymentWidget(obj=obj), initial=field.initial, required=False
306 )
307 if db_field.name == "shift":
308 field.queryset = Shift.objects.filter(locked=False)
309 if not request.member:
310 field.queryset = field.queryset.none()
311 elif not request.member.has_perm("sales.override_manager"):
312 field.queryset = field.queryset.filter(
313 managers__in=request.member.get_member_groups()
314 )
315 return field
316
317 def changelist_view(self, request, extra_context=None):
318 if not (request.member and request.member.has_perm("sales.override_manager")):
319 self.message_user(
320 request,
321 _("You are only seeing orders that are relevant to you."),
322 messages.WARNING,
323 )
324 return super().changelist_view(request, extra_context)
325
326 def change_view(self, request, object_id, form_url="", extra_context=None):
327 object_id
328 return super().change_view(request, object_id, form_url, extra_context)
329
330 def order_description(self, obj):
331 if obj.order_description:
332 return obj.order_description
333 return "-"
334
335 def num_items(self, obj):
336 return obj.num_items
337
338 def subtotal(self, obj):
339 if obj.subtotal:
340 return f"€{obj.subtotal:.2f}"
341 return "-"
342
343 def discount(self, obj):
344 if obj.discount:
345 return f"€{obj.discount:.2f}"
346 return "-"
347
348 def total_amount(self, obj):
349 if obj.total_amount:
350 return f"€{obj.total_amount:.2f}"
351 return "-"
352
353 def paid(self, obj):
354 if obj.total_amount is None or obj.total_amount == 0:
355 return None
356 return obj.payment is not None
357
358 paid.boolean = True
359
360 def age_restricted(self, obj):
361 return bool(obj.age_restricted) if obj else None
362
363 age_restricted.boolean = True
364
[end of website/sales/admin/order_admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/sales/admin/order_admin.py b/website/sales/admin/order_admin.py
--- a/website/sales/admin/order_admin.py
+++ b/website/sales/admin/order_admin.py
@@ -11,6 +11,7 @@
from django.utils.translation import gettext_lazy as _
from payments.widgets import PaymentWidget
+from sales import services
from sales.models.order import Order, OrderItem
from sales.models.shift import Shift
from sales.services import is_manager
@@ -221,6 +222,19 @@
return default_fields
+ def changeform_view(self, request, object_id=None, form_url="", extra_context=None):
+ if object_id:
+ obj = self.model.objects.get(pk=object_id)
+ if obj.age_restricted and obj.payer and not services.is_adult(obj.payer):
+ self.message_user(
+ request,
+ _(
+ "The payer for this order is under-age while the order is age restricted!"
+ ),
+ messages.WARNING,
+ )
+ return super().changeform_view(request, object_id, form_url, extra_context)
+
def get_queryset(self, request):
queryset = super().get_queryset(request)
| {"golden_diff": "diff --git a/website/sales/admin/order_admin.py b/website/sales/admin/order_admin.py\n--- a/website/sales/admin/order_admin.py\n+++ b/website/sales/admin/order_admin.py\n@@ -11,6 +11,7 @@\n from django.utils.translation import gettext_lazy as _\n \n from payments.widgets import PaymentWidget\n+from sales import services\n from sales.models.order import Order, OrderItem\n from sales.models.shift import Shift\n from sales.services import is_manager\n@@ -221,6 +222,19 @@\n \n return default_fields\n \n+ def changeform_view(self, request, object_id=None, form_url=\"\", extra_context=None):\n+ if object_id:\n+ obj = self.model.objects.get(pk=object_id)\n+ if obj.age_restricted and obj.payer and not services.is_adult(obj.payer):\n+ self.message_user(\n+ request,\n+ _(\n+ \"The payer for this order is under-age while the order is age restricted!\"\n+ ),\n+ messages.WARNING,\n+ )\n+ return super().changeform_view(request, object_id, form_url, extra_context)\n+\n def get_queryset(self, request):\n queryset = super().get_queryset(request)\n", "issue": "Display warning in admin for age restricted orders for under age members\nhttps://github.com/svthalia/concrexit/blob/8244e4bd50db6e64a63aa1605756acc2fb413094/website/sales/admin/order_admin.py#L334\n", "before_files": [{"content": "from functools import partial\n\nfrom admin_auto_filters.filters import AutocompleteFilter\nfrom django.contrib import admin, messages\nfrom django.contrib.admin import register, SimpleListFilter\nfrom django.forms import Field\nfrom django.http import HttpRequest\nfrom django.urls import resolve\nfrom django.utils import timezone\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom payments.widgets import PaymentWidget\nfrom sales.models.order import Order, OrderItem\nfrom sales.models.shift import Shift\nfrom sales.services import is_manager\n\n\nclass OrderItemInline(admin.TabularInline):\n model = OrderItem\n extra = 0\n\n fields = (\"product\", \"amount\", \"total\")\n\n def get_readonly_fields(self, request: HttpRequest, obj: Order = None):\n default_fields = self.readonly_fields\n\n if not (request.member and request.member.has_perm(\"sales.custom_prices\")):\n default_fields += (\"total\",)\n\n return default_fields\n\n def get_queryset(self, request):\n queryset = super().get_queryset(request)\n queryset = queryset.prefetch_related(\"product\", \"product__product\")\n return queryset\n\n def has_add_permission(self, request, obj):\n if obj and obj.shift.locked:\n return False\n\n if obj and obj.payment:\n return False\n\n parent = self.get_parent_object_from_request(request)\n if not parent:\n return False\n\n return super().has_add_permission(request, obj)\n\n def has_change_permission(self, request, obj=None):\n if obj and obj.payment:\n return False\n if obj and obj.shift.locked:\n return False\n if obj and not is_manager(request.member, obj.shift):\n return False\n return True\n\n def has_delete_permission(self, request, obj=None):\n if obj and obj.payment:\n return False\n if obj and obj.shift.locked:\n return False\n if obj and not is_manager(request.member, obj.shift):\n return False\n return True\n\n def get_parent_object_from_request(self, request):\n \"\"\"Get parent object to determine product list.\"\"\"\n resolved = resolve(request.path_info)\n if resolved.kwargs:\n parent = self.parent_model.objects.get(pk=resolved.kwargs[\"object_id\"])\n return parent\n return None\n\n def formfield_for_foreignkey(self, db_field, request=None, **kwargs):\n \"\"\"Limit product list items to items of order's shift.\"\"\"\n field = super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n if db_field.name == \"product\":\n if request is not None:\n parent = self.get_parent_object_from_request(request)\n if parent:\n field.queryset = parent.shift.product_list.product_items\n else:\n field.queryset = field.queryset.none()\n\n return field\n\n\nclass OrderShiftFilter(AutocompleteFilter):\n title = _(\"shift\")\n field_name = \"shift\"\n rel_model = Order\n\n def queryset(self, request, queryset):\n if self.value():\n return queryset.filter(shift=self.value())\n return queryset\n\n\nclass OrderMemberFilter(AutocompleteFilter):\n title = _(\"member\")\n field_name = \"payer\"\n rel_model = Order\n\n def queryset(self, request, queryset):\n if self.value():\n return queryset.filter(payer=self.value())\n return queryset\n\n\nclass OrderPaymentFilter(SimpleListFilter):\n title = _(\"payment\")\n parameter_name = \"payment\"\n\n def lookups(self, request, model_admin):\n return (\n (\"not_required\", _(\"No payment required\")),\n (\"paid\", _(\"Paid\")),\n (\"unpaid\", _(\"Unpaid\")),\n )\n\n def queryset(self, request, queryset):\n if self.value() is None:\n return queryset\n if self.value() == \"paid\":\n return queryset.filter(payment__isnull=False)\n if self.value() == \"unpaid\":\n return queryset.filter(payment__isnull=True, total_amount__gt=0)\n return queryset.filter(total_amount__exact=0)\n\n\nclass OrderProductFilter(SimpleListFilter):\n title = _(\"product\")\n parameter_name = \"product\"\n\n def lookups(self, request, model_admin):\n qs = model_admin.get_queryset(request)\n types = qs.filter(order_items__product__product__isnull=False).values_list(\n \"order_items__product__product__id\", \"order_items__product__product__name\"\n )\n return list(types.order_by(\"order_items__product__product__id\").distinct())\n\n def queryset(self, request, queryset):\n if self.value() is None:\n return queryset\n return queryset.filter(order_items__product__product__id__contains=self.value())\n\n\n@register(Order)\nclass OrderAdmin(admin.ModelAdmin):\n class Media:\n pass\n\n inlines = [\n OrderItemInline,\n ]\n ordering = (\"-created_at\",)\n date_hierarchy = \"created_at\"\n search_fields = (\n \"id\",\n \"payer__username\",\n \"payer__first_name\",\n \"payer__last_name\",\n \"payer__profile__nickname\",\n )\n\n list_display = (\n \"id\",\n \"shift\",\n \"created_at\",\n \"order_description\",\n \"num_items\",\n \"discount\",\n \"total_amount\",\n \"paid\",\n \"payer\",\n )\n list_filter = [\n OrderShiftFilter,\n OrderMemberFilter,\n OrderPaymentFilter,\n OrderProductFilter,\n ]\n\n fields = (\n \"shift\",\n \"created_at\",\n \"order_description\",\n \"num_items\",\n \"age_restricted\",\n \"subtotal\",\n \"discount\",\n \"total_amount\",\n \"payer\",\n \"payment\",\n \"payment_url\",\n )\n\n readonly_fields = (\n \"created_at\",\n \"order_description\",\n \"num_items\",\n \"subtotal\",\n \"total_amount\",\n \"age_restricted\",\n \"payment_url\",\n )\n\n def get_readonly_fields(self, request: HttpRequest, obj: Order = None):\n \"\"\"Disallow changing shift when selected.\"\"\"\n default_fields = self.readonly_fields\n\n if not (request.member and request.member.has_perm(\"sales.custom_prices\")):\n default_fields += (\"discount\",)\n\n if obj and obj.shift:\n default_fields += (\"shift\",)\n\n return default_fields\n\n def get_queryset(self, request):\n queryset = super().get_queryset(request)\n\n if not request.member:\n queryset = queryset.none()\n elif not request.member.has_perm(\"sales.override_manager\"):\n queryset = queryset.filter(\n shift__managers__in=request.member.get_member_groups()\n ).distinct()\n\n queryset = queryset.select_properties(\n \"total_amount\", \"subtotal\", \"num_items\", \"age_restricted\"\n )\n queryset = queryset.prefetch_related(\n \"shift\", \"shift__event\", \"shift__product_list\"\n )\n queryset = queryset.prefetch_related(\n \"order_items\", \"order_items__product\", \"order_items__product__product\"\n )\n queryset = queryset.prefetch_related(\"payment\")\n queryset = queryset.prefetch_related(\"payer\")\n return queryset\n\n def has_add_permission(self, request):\n if not request.member:\n return False\n elif not request.member.has_perm(\"sales.override_manager\"):\n if (\n Shift.objects.filter(\n start__lte=timezone.now(),\n locked=False,\n managers__in=request.member.get_member_groups(),\n ).count()\n == 0\n ):\n return False\n return super().has_view_permission(request)\n\n def has_view_permission(self, request, obj=None):\n if obj and not is_manager(request.member, obj.shift):\n return False\n return super().has_view_permission(request, obj)\n\n def has_change_permission(self, request, obj=None):\n if obj and obj.shift.locked:\n return False\n if obj and obj.payment:\n return False\n\n if obj and not is_manager(request.member, obj.shift):\n return False\n\n return super().has_change_permission(request, obj)\n\n def has_delete_permission(self, request, obj=None):\n if obj and obj.shift.locked:\n return False\n if obj and obj.payment:\n return False\n\n if obj and not is_manager(request.member, obj.shift):\n return False\n\n return super().has_delete_permission(request, obj)\n\n def get_form(self, request, obj=None, **kwargs):\n \"\"\"Override get form to use payment widget.\"\"\"\n return super().get_form(\n request,\n obj,\n formfield_callback=partial(\n self.formfield_for_dbfield, request=request, obj=obj\n ),\n **kwargs,\n )\n\n def formfield_for_dbfield(self, db_field, request, obj=None, **kwargs):\n \"\"\"Use payment widget for payments.\"\"\"\n field = super().formfield_for_dbfield(db_field, request, **kwargs)\n if db_field.name == \"payment\":\n return Field(\n widget=PaymentWidget(obj=obj), initial=field.initial, required=False\n )\n if db_field.name == \"shift\":\n field.queryset = Shift.objects.filter(locked=False)\n if not request.member:\n field.queryset = field.queryset.none()\n elif not request.member.has_perm(\"sales.override_manager\"):\n field.queryset = field.queryset.filter(\n managers__in=request.member.get_member_groups()\n )\n return field\n\n def changelist_view(self, request, extra_context=None):\n if not (request.member and request.member.has_perm(\"sales.override_manager\")):\n self.message_user(\n request,\n _(\"You are only seeing orders that are relevant to you.\"),\n messages.WARNING,\n )\n return super().changelist_view(request, extra_context)\n\n def change_view(self, request, object_id, form_url=\"\", extra_context=None):\n object_id\n return super().change_view(request, object_id, form_url, extra_context)\n\n def order_description(self, obj):\n if obj.order_description:\n return obj.order_description\n return \"-\"\n\n def num_items(self, obj):\n return obj.num_items\n\n def subtotal(self, obj):\n if obj.subtotal:\n return f\"\u20ac{obj.subtotal:.2f}\"\n return \"-\"\n\n def discount(self, obj):\n if obj.discount:\n return f\"\u20ac{obj.discount:.2f}\"\n return \"-\"\n\n def total_amount(self, obj):\n if obj.total_amount:\n return f\"\u20ac{obj.total_amount:.2f}\"\n return \"-\"\n\n def paid(self, obj):\n if obj.total_amount is None or obj.total_amount == 0:\n return None\n return obj.payment is not None\n\n paid.boolean = True\n\n def age_restricted(self, obj):\n return bool(obj.age_restricted) if obj else None\n\n age_restricted.boolean = True\n", "path": "website/sales/admin/order_admin.py"}]} | 3,905 | 265 |
gh_patches_debug_30074 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-266 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Duplicate schema creation
**Describe the bug**
We are currently able to create a new schema with an existing schema name, creating duplicates on our mathesar_schema table.
**Expected behavior**
* Schema name should be unique per db in mathesar_schema table.
* If a new schema creation is attempted with the same name as an existing schema, a 400 should be thrown with proper error message.
</issue>
<code>
[start of mathesar/views/api.py]
1 import logging
2 from rest_framework import status, viewsets
3 from rest_framework.exceptions import NotFound, ValidationError
4 from rest_framework.mixins import ListModelMixin, RetrieveModelMixin, CreateModelMixin
5 from rest_framework.response import Response
6 from django.core.cache import cache
7 from django_filters import rest_framework as filters
8
9
10 from mathesar.database.utils import get_non_default_database_keys
11 from mathesar.models import Table, Schema, DataFile
12 from mathesar.pagination import DefaultLimitOffsetPagination, TableLimitOffsetPagination
13 from mathesar.serializers import TableSerializer, SchemaSerializer, RecordSerializer, DataFileSerializer
14 from mathesar.utils.schemas import create_schema_and_object, reflect_schemas_from_database
15 from mathesar.utils.tables import reflect_tables_from_schema
16 from mathesar.utils.api import create_table_from_datafile, create_datafile
17 from mathesar.filters import SchemaFilter, TableFilter
18
19 logger = logging.getLogger(__name__)
20
21 DB_REFLECTION_KEY = 'database_reflected_recently'
22 DB_REFLECTION_INTERVAL = 60 * 5 # we reflect DB changes every 5 minutes
23
24
25 def reflect_db_objects():
26 if not cache.get(DB_REFLECTION_KEY):
27 for database_key in get_non_default_database_keys():
28 reflect_schemas_from_database(database_key)
29 for schema in Schema.objects.all():
30 reflect_tables_from_schema(schema)
31 cache.set(DB_REFLECTION_KEY, True, DB_REFLECTION_INTERVAL)
32
33
34 class SchemaViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):
35 def get_queryset(self):
36 reflect_db_objects()
37 return Schema.objects.all().order_by('-created_at')
38
39 serializer_class = SchemaSerializer
40 pagination_class = DefaultLimitOffsetPagination
41 filter_backends = (filters.DjangoFilterBackend,)
42 filterset_class = SchemaFilter
43
44 def create(self, request):
45 schema = create_schema_and_object(request.data['name'], request.data['database'])
46 serializer = SchemaSerializer(schema)
47 return Response(serializer.data, status=status.HTTP_201_CREATED)
48
49
50 class TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin,
51 CreateModelMixin):
52 def get_queryset(self):
53 reflect_db_objects()
54 return Table.objects.all().order_by('-created_at')
55
56 serializer_class = TableSerializer
57 pagination_class = DefaultLimitOffsetPagination
58 filter_backends = (filters.DjangoFilterBackend,)
59 filterset_class = TableFilter
60
61 def create(self, request):
62 serializer = TableSerializer(data=request.data, context={'request': request})
63 if serializer.is_valid():
64 return create_table_from_datafile(request, serializer.validated_data)
65 else:
66 raise ValidationError(serializer.errors)
67
68
69 class RecordViewSet(viewsets.ViewSet):
70 # There is no "update" method.
71 # We're not supporting PUT requests because there aren't a lot of use cases
72 # where the entire record needs to be replaced, PATCH suffices for updates.
73 queryset = Table.objects.all().order_by('-created_at')
74
75 def list(self, request, table_pk=None):
76 paginator = TableLimitOffsetPagination()
77 records = paginator.paginate_queryset(self.queryset, request, table_pk)
78 serializer = RecordSerializer(records, many=True)
79 return paginator.get_paginated_response(serializer.data)
80
81 def retrieve(self, request, pk=None, table_pk=None):
82 table = Table.objects.get(id=table_pk)
83 record = table.get_record(pk)
84 if not record:
85 raise NotFound
86 serializer = RecordSerializer(record)
87 return Response(serializer.data)
88
89 def create(self, request, table_pk=None):
90 table = Table.objects.get(id=table_pk)
91 # We only support adding a single record through the API.
92 assert isinstance((request.data), dict)
93 record = table.create_record_or_records(request.data)
94 serializer = RecordSerializer(record)
95 return Response(serializer.data, status=status.HTTP_201_CREATED)
96
97 def partial_update(self, request, pk=None, table_pk=None):
98 table = Table.objects.get(id=table_pk)
99 record = table.update_record(pk, request.data)
100 serializer = RecordSerializer(record)
101 return Response(serializer.data)
102
103 def destroy(self, request, pk=None, table_pk=None):
104 table = Table.objects.get(id=table_pk)
105 table.delete_record(pk)
106 return Response(status=status.HTTP_204_NO_CONTENT)
107
108
109 class DatabaseKeyViewSet(viewsets.ViewSet):
110 def list(self, request):
111 return Response(get_non_default_database_keys())
112
113
114 class DataFileViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin, CreateModelMixin):
115 queryset = DataFile.objects.all().order_by('-created_at')
116 serializer_class = DataFileSerializer
117 pagination_class = DefaultLimitOffsetPagination
118
119 def create(self, request):
120 serializer = DataFileSerializer(data=request.data, context={'request': request})
121 if serializer.is_valid():
122 return create_datafile(request, serializer.validated_data['file'])
123 else:
124 raise ValidationError(serializer.errors)
125
[end of mathesar/views/api.py]
[start of mathesar/utils/schemas.py]
1 from db.schemas import (
2 create_schema, get_schema_oid_from_name, get_mathesar_schemas_with_oids
3 )
4 from mathesar.database.base import create_mathesar_engine
5 from mathesar.models import Schema
6
7
8 def create_schema_and_object(name, database):
9 engine = create_mathesar_engine(database)
10 create_schema(name, engine)
11 schema_oid = get_schema_oid_from_name(name, engine)
12 schema = Schema.objects.create(oid=schema_oid, database=database)
13 return schema
14
15
16 def reflect_schemas_from_database(database):
17 engine = create_mathesar_engine(database)
18 db_schema_oids = {
19 schema["oid"] for schema in get_mathesar_schemas_with_oids(engine)
20 }
21 schemas = [
22 Schema.objects.get_or_create(oid=oid, database=database)
23 for oid in db_schema_oids
24 ]
25 for schema in Schema.objects.all():
26 if schema.oid not in db_schema_oids:
27 schema.delete()
28 return schemas
29
[end of mathesar/utils/schemas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/utils/schemas.py b/mathesar/utils/schemas.py
--- a/mathesar/utils/schemas.py
+++ b/mathesar/utils/schemas.py
@@ -1,5 +1,8 @@
+from rest_framework.exceptions import ValidationError
+
from db.schemas import (
- create_schema, get_schema_oid_from_name, get_mathesar_schemas_with_oids
+ create_schema, get_schema_oid_from_name, get_mathesar_schemas,
+ get_mathesar_schemas_with_oids
)
from mathesar.database.base import create_mathesar_engine
from mathesar.models import Schema
@@ -7,6 +10,11 @@
def create_schema_and_object(name, database):
engine = create_mathesar_engine(database)
+
+ all_schemas = get_mathesar_schemas(engine)
+ if name in all_schemas:
+ raise ValidationError({"name": "Schema name is not unique"})
+
create_schema(name, engine)
schema_oid = get_schema_oid_from_name(name, engine)
schema = Schema.objects.create(oid=schema_oid, database=database)
diff --git a/mathesar/views/api.py b/mathesar/views/api.py
--- a/mathesar/views/api.py
+++ b/mathesar/views/api.py
@@ -42,9 +42,13 @@
filterset_class = SchemaFilter
def create(self, request):
- schema = create_schema_and_object(request.data['name'], request.data['database'])
- serializer = SchemaSerializer(schema)
- return Response(serializer.data, status=status.HTTP_201_CREATED)
+ serializer = SchemaSerializer(data=request.data)
+ if serializer.is_valid():
+ schema = create_schema_and_object(request.data['name'], request.data['database'])
+ serializer = SchemaSerializer(schema)
+ return Response(serializer.data, status=status.HTTP_201_CREATED)
+ else:
+ raise ValidationError(serializer.errors)
class TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin,
| {"golden_diff": "diff --git a/mathesar/utils/schemas.py b/mathesar/utils/schemas.py\n--- a/mathesar/utils/schemas.py\n+++ b/mathesar/utils/schemas.py\n@@ -1,5 +1,8 @@\n+from rest_framework.exceptions import ValidationError\n+\n from db.schemas import (\n- create_schema, get_schema_oid_from_name, get_mathesar_schemas_with_oids\n+ create_schema, get_schema_oid_from_name, get_mathesar_schemas,\n+ get_mathesar_schemas_with_oids\n )\n from mathesar.database.base import create_mathesar_engine\n from mathesar.models import Schema\n@@ -7,6 +10,11 @@\n \n def create_schema_and_object(name, database):\n engine = create_mathesar_engine(database)\n+\n+ all_schemas = get_mathesar_schemas(engine)\n+ if name in all_schemas:\n+ raise ValidationError({\"name\": \"Schema name is not unique\"})\n+\n create_schema(name, engine)\n schema_oid = get_schema_oid_from_name(name, engine)\n schema = Schema.objects.create(oid=schema_oid, database=database)\ndiff --git a/mathesar/views/api.py b/mathesar/views/api.py\n--- a/mathesar/views/api.py\n+++ b/mathesar/views/api.py\n@@ -42,9 +42,13 @@\n filterset_class = SchemaFilter\n \n def create(self, request):\n- schema = create_schema_and_object(request.data['name'], request.data['database'])\n- serializer = SchemaSerializer(schema)\n- return Response(serializer.data, status=status.HTTP_201_CREATED)\n+ serializer = SchemaSerializer(data=request.data)\n+ if serializer.is_valid():\n+ schema = create_schema_and_object(request.data['name'], request.data['database'])\n+ serializer = SchemaSerializer(schema)\n+ return Response(serializer.data, status=status.HTTP_201_CREATED)\n+ else:\n+ raise ValidationError(serializer.errors)\n \n \n class TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin,\n", "issue": "Duplicate schema creation\n**Describe the bug**\r\nWe are currently able to create a new schema with an existing schema name, creating duplicates on our mathesar_schema table.\r\n\r\n**Expected behavior**\r\n* Schema name should be unique per db in mathesar_schema table.\r\n* If a new schema creation is attempted with the same name as an existing schema, a 400 should be thrown with proper error message.\n", "before_files": [{"content": "import logging\nfrom rest_framework import status, viewsets\nfrom rest_framework.exceptions import NotFound, ValidationError\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin, CreateModelMixin\nfrom rest_framework.response import Response\nfrom django.core.cache import cache\nfrom django_filters import rest_framework as filters\n\n\nfrom mathesar.database.utils import get_non_default_database_keys\nfrom mathesar.models import Table, Schema, DataFile\nfrom mathesar.pagination import DefaultLimitOffsetPagination, TableLimitOffsetPagination\nfrom mathesar.serializers import TableSerializer, SchemaSerializer, RecordSerializer, DataFileSerializer\nfrom mathesar.utils.schemas import create_schema_and_object, reflect_schemas_from_database\nfrom mathesar.utils.tables import reflect_tables_from_schema\nfrom mathesar.utils.api import create_table_from_datafile, create_datafile\nfrom mathesar.filters import SchemaFilter, TableFilter\n\nlogger = logging.getLogger(__name__)\n\nDB_REFLECTION_KEY = 'database_reflected_recently'\nDB_REFLECTION_INTERVAL = 60 * 5 # we reflect DB changes every 5 minutes\n\n\ndef reflect_db_objects():\n if not cache.get(DB_REFLECTION_KEY):\n for database_key in get_non_default_database_keys():\n reflect_schemas_from_database(database_key)\n for schema in Schema.objects.all():\n reflect_tables_from_schema(schema)\n cache.set(DB_REFLECTION_KEY, True, DB_REFLECTION_INTERVAL)\n\n\nclass SchemaViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n def get_queryset(self):\n reflect_db_objects()\n return Schema.objects.all().order_by('-created_at')\n\n serializer_class = SchemaSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = SchemaFilter\n\n def create(self, request):\n schema = create_schema_and_object(request.data['name'], request.data['database'])\n serializer = SchemaSerializer(schema)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n\nclass TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin,\n CreateModelMixin):\n def get_queryset(self):\n reflect_db_objects()\n return Table.objects.all().order_by('-created_at')\n\n serializer_class = TableSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = TableFilter\n\n def create(self, request):\n serializer = TableSerializer(data=request.data, context={'request': request})\n if serializer.is_valid():\n return create_table_from_datafile(request, serializer.validated_data)\n else:\n raise ValidationError(serializer.errors)\n\n\nclass RecordViewSet(viewsets.ViewSet):\n # There is no \"update\" method.\n # We're not supporting PUT requests because there aren't a lot of use cases\n # where the entire record needs to be replaced, PATCH suffices for updates.\n queryset = Table.objects.all().order_by('-created_at')\n\n def list(self, request, table_pk=None):\n paginator = TableLimitOffsetPagination()\n records = paginator.paginate_queryset(self.queryset, request, table_pk)\n serializer = RecordSerializer(records, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n def retrieve(self, request, pk=None, table_pk=None):\n table = Table.objects.get(id=table_pk)\n record = table.get_record(pk)\n if not record:\n raise NotFound\n serializer = RecordSerializer(record)\n return Response(serializer.data)\n\n def create(self, request, table_pk=None):\n table = Table.objects.get(id=table_pk)\n # We only support adding a single record through the API.\n assert isinstance((request.data), dict)\n record = table.create_record_or_records(request.data)\n serializer = RecordSerializer(record)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None, table_pk=None):\n table = Table.objects.get(id=table_pk)\n record = table.update_record(pk, request.data)\n serializer = RecordSerializer(record)\n return Response(serializer.data)\n\n def destroy(self, request, pk=None, table_pk=None):\n table = Table.objects.get(id=table_pk)\n table.delete_record(pk)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass DatabaseKeyViewSet(viewsets.ViewSet):\n def list(self, request):\n return Response(get_non_default_database_keys())\n\n\nclass DataFileViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin, CreateModelMixin):\n queryset = DataFile.objects.all().order_by('-created_at')\n serializer_class = DataFileSerializer\n pagination_class = DefaultLimitOffsetPagination\n\n def create(self, request):\n serializer = DataFileSerializer(data=request.data, context={'request': request})\n if serializer.is_valid():\n return create_datafile(request, serializer.validated_data['file'])\n else:\n raise ValidationError(serializer.errors)\n", "path": "mathesar/views/api.py"}, {"content": "from db.schemas import (\n create_schema, get_schema_oid_from_name, get_mathesar_schemas_with_oids\n)\nfrom mathesar.database.base import create_mathesar_engine\nfrom mathesar.models import Schema\n\n\ndef create_schema_and_object(name, database):\n engine = create_mathesar_engine(database)\n create_schema(name, engine)\n schema_oid = get_schema_oid_from_name(name, engine)\n schema = Schema.objects.create(oid=schema_oid, database=database)\n return schema\n\n\ndef reflect_schemas_from_database(database):\n engine = create_mathesar_engine(database)\n db_schema_oids = {\n schema[\"oid\"] for schema in get_mathesar_schemas_with_oids(engine)\n }\n schemas = [\n Schema.objects.get_or_create(oid=oid, database=database)\n for oid in db_schema_oids\n ]\n for schema in Schema.objects.all():\n if schema.oid not in db_schema_oids:\n schema.delete()\n return schemas\n", "path": "mathesar/utils/schemas.py"}]} | 2,195 | 422 |
gh_patches_debug_27609 | rasdani/github-patches | git_diff | fossasia__open-event-server-6473 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add celery, redis and SQLAlchemy integrations in sentry
- [ ] Celery integration
- [ ] Redis Integration
- [ ] SQLAlchemy Integration
</issue>
<code>
[start of app/__init__.py]
1 from celery.signals import after_task_publish
2 import logging
3 import os.path
4 from envparse import env
5
6 import sys
7 from flask import Flask, json, make_response
8 from flask_celeryext import FlaskCeleryExt
9 from app.settings import get_settings, get_setts
10 from flask_migrate import Migrate, MigrateCommand
11 from flask_script import Manager
12 from flask_login import current_user
13 from flask_jwt_extended import JWTManager
14 from flask_limiter import Limiter
15 from datetime import timedelta
16 from flask_cors import CORS
17 from flask_rest_jsonapi.errors import jsonapi_errors
18 from flask_rest_jsonapi.exceptions import JsonApiException
19 from healthcheck import HealthCheck
20 from apscheduler.schedulers.background import BackgroundScheduler
21 from elasticsearch_dsl.connections import connections
22 from pytz import utc
23
24 import sqlalchemy as sa
25
26 import stripe
27 from app.settings import get_settings
28 from app.models import db
29 from app.api.helpers.jwt import jwt_user_loader
30 from app.api.helpers.cache import cache
31 from werkzeug.middleware.profiler import ProfilerMiddleware
32 from app.views import BlueprintsManager
33 from app.api.helpers.auth import AuthManager, is_token_blacklisted
34 from app.api.helpers.scheduled_jobs import send_after_event_mail, send_event_fee_notification, \
35 send_event_fee_notification_followup, change_session_state_on_event_completion, \
36 expire_pending_tickets, send_monthly_event_invoice, event_invoices_mark_due
37 from app.models.event import Event
38 from app.models.role_invite import RoleInvite
39 from app.views.healthcheck import health_check_celery, health_check_db, health_check_migrations, check_migrations
40 from app.views.elastic_search import client
41 from app.views.elastic_cron_helpers import sync_events_elasticsearch, cron_rebuild_events_elasticsearch
42 from app.views.redis_store import redis_store
43 from app.views.celery_ import celery
44 from app.templates.flask_ext.jinja.filters import init_filters
45 import sentry_sdk
46 from sentry_sdk.integrations.flask import FlaskIntegration
47
48
49 BASE_DIR = os.path.dirname(os.path.abspath(__file__))
50
51 static_dir = os.path.dirname(os.path.dirname(__file__)) + "/static"
52 template_dir = os.path.dirname(__file__) + "/templates"
53 app = Flask(__name__, static_folder=static_dir, template_folder=template_dir)
54 limiter = Limiter(app)
55 env.read_envfile()
56
57
58 class ReverseProxied:
59 """
60 ReverseProxied flask wsgi app wrapper from http://stackoverflow.com/a/37842465/1562480 by aldel
61 """
62
63 def __init__(self, app):
64 self.app = app
65
66 def __call__(self, environ, start_response):
67 scheme = environ.get('HTTP_X_FORWARDED_PROTO')
68 if scheme:
69 environ['wsgi.url_scheme'] = scheme
70 if os.getenv('FORCE_SSL', 'no') == 'yes':
71 environ['wsgi.url_scheme'] = 'https'
72 return self.app(environ, start_response)
73
74
75 app.wsgi_app = ReverseProxied(app.wsgi_app)
76
77 app_created = False
78
79
80 def create_app():
81 global app_created
82 if not app_created:
83 BlueprintsManager.register(app)
84 Migrate(app, db)
85
86 app.config.from_object(env('APP_CONFIG', default='config.ProductionConfig'))
87 db.init_app(app)
88 _manager = Manager(app)
89 _manager.add_command('db', MigrateCommand)
90
91 if app.config['CACHING']:
92 cache.init_app(app, config={'CACHE_TYPE': 'simple'})
93 else:
94 cache.init_app(app, config={'CACHE_TYPE': 'null'})
95
96 stripe.api_key = 'SomeStripeKey'
97 app.secret_key = 'super secret key'
98 app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
99 app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'
100
101 app.logger.addHandler(logging.StreamHandler(sys.stdout))
102 app.logger.setLevel(logging.ERROR)
103
104 # set up jwt
105 app.config['JWT_HEADER_TYPE'] = 'JWT'
106 app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(days=1)
107 app.config['JWT_REFRESH_TOKEN_EXPIRES'] = timedelta(days=365)
108 app.config['JWT_ERROR_MESSAGE_KEY'] = 'error'
109 app.config['JWT_TOKEN_LOCATION'] = ['cookies', 'headers']
110 app.config['JWT_REFRESH_COOKIE_PATH'] = '/v1/auth/token/refresh'
111 app.config['JWT_SESSION_COOKIE'] = False
112 app.config['JWT_BLACKLIST_ENABLED'] = True
113 app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['refresh']
114 _jwt = JWTManager(app)
115 _jwt.user_loader_callback_loader(jwt_user_loader)
116 _jwt.token_in_blacklist_loader(is_token_blacklisted)
117
118 # setup celery
119 app.config['CELERY_BROKER_URL'] = app.config['REDIS_URL']
120 app.config['CELERY_RESULT_BACKEND'] = app.config['CELERY_BROKER_URL']
121 app.config['CELERY_ACCEPT_CONTENT'] = ['json', 'application/text']
122
123 CORS(app, resources={r"/*": {"origins": "*"}})
124 AuthManager.init_login(app)
125
126 if app.config['TESTING'] and app.config['PROFILE']:
127 # Profiling
128 app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
129
130 # development api
131 with app.app_context():
132 from app.api.admin_statistics_api.events import event_statistics
133 from app.api.auth import auth_routes
134 from app.api.attendees import attendee_misc_routes
135 from app.api.bootstrap import api_v1
136 from app.api.celery_tasks import celery_routes
137 from app.api.event_copy import event_copy
138 from app.api.exports import export_routes
139 from app.api.imports import import_routes
140 from app.api.uploads import upload_routes
141 from app.api.users import user_misc_routes
142 from app.api.orders import order_misc_routes
143 from app.api.role_invites import role_invites_misc_routes
144 from app.api.auth import ticket_blueprint, authorised_blueprint
145 from app.api.admin_translations import admin_blueprint
146 from app.api.orders import alipay_blueprint
147 from app.api.settings import admin_misc_routes
148
149 app.register_blueprint(api_v1)
150 app.register_blueprint(event_copy)
151 app.register_blueprint(upload_routes)
152 app.register_blueprint(export_routes)
153 app.register_blueprint(import_routes)
154 app.register_blueprint(celery_routes)
155 app.register_blueprint(auth_routes)
156 app.register_blueprint(event_statistics)
157 app.register_blueprint(user_misc_routes)
158 app.register_blueprint(attendee_misc_routes)
159 app.register_blueprint(order_misc_routes)
160 app.register_blueprint(role_invites_misc_routes)
161 app.register_blueprint(ticket_blueprint)
162 app.register_blueprint(authorised_blueprint)
163 app.register_blueprint(admin_blueprint)
164 app.register_blueprint(alipay_blueprint)
165 app.register_blueprint(admin_misc_routes)
166
167 sa.orm.configure_mappers()
168
169 if app.config['SERVE_STATIC']:
170 app.add_url_rule('/static/<path:filename>',
171 endpoint='static',
172 view_func=app.send_static_file)
173
174 # sentry
175 if not app_created and 'SENTRY_DSN' in app.config:
176 sentry_sdk.init(app.config['SENTRY_DSN'], integrations=[FlaskIntegration()])
177
178 # redis
179 redis_store.init_app(app)
180
181 # elasticsearch
182 if app.config['ENABLE_ELASTICSEARCH']:
183 client.init_app(app)
184 connections.add_connection('default', client.elasticsearch)
185 with app.app_context():
186 try:
187 cron_rebuild_events_elasticsearch.delay()
188 except Exception:
189 pass
190
191 app_created = True
192 return app, _manager, db, _jwt
193
194
195 current_app, manager, database, jwt = create_app()
196 init_filters(app)
197
198
199 # http://stackoverflow.com/questions/26724623/
200 @app.before_request
201 def track_user():
202 if current_user.is_authenticated:
203 current_user.update_lat()
204
205
206 def make_celery(app=None):
207 app = app or create_app()[0]
208 celery.conf.update(app.config)
209 ext = FlaskCeleryExt(app)
210 return ext.celery
211
212
213 # Health-check
214 health = HealthCheck(current_app, "/health-check")
215 health.add_check(health_check_celery)
216 health.add_check(health_check_db)
217 with current_app.app_context():
218 current_app.config['MIGRATION_STATUS'] = check_migrations()
219 health.add_check(health_check_migrations)
220
221
222 # http://stackoverflow.com/questions/9824172/find-out-whether-celery-task-exists
223 @after_task_publish.connect
224 def update_sent_state(sender=None, headers=None, **kwargs):
225 # the task may not exist if sent using `send_task` which
226 # sends tasks by name, so fall back to the default result backend
227 # if that is the case.
228 task = celery.tasks.get(sender)
229 backend = task.backend if task else celery.backend
230 backend.store_result(headers['id'], None, 'WAITING')
231
232
233 # register celery tasks. removing them will cause the tasks to not function. so don't remove them
234 # it is important to register them after celery is defined to resolve circular imports
235
236 from .api.helpers import tasks
237
238 # import helpers.tasks
239
240
241 scheduler = BackgroundScheduler(timezone=utc)
242 # scheduler.add_job(send_mail_to_expired_orders, 'interval', hours=5)
243 # scheduler.add_job(empty_trash, 'cron', hour=5, minute=30)
244 if app.config['ENABLE_ELASTICSEARCH']:
245 scheduler.add_job(sync_events_elasticsearch, 'interval', minutes=60)
246 scheduler.add_job(cron_rebuild_events_elasticsearch, 'cron', day=7)
247
248 scheduler.add_job(send_after_event_mail, 'cron', hour=5, minute=30)
249 scheduler.add_job(send_event_fee_notification, 'cron', day=1)
250 scheduler.add_job(send_event_fee_notification_followup, 'cron', day=1, month='1-12')
251 scheduler.add_job(change_session_state_on_event_completion, 'cron', hour=5, minute=30)
252 scheduler.add_job(expire_pending_tickets, 'cron', minute=45)
253 scheduler.add_job(send_monthly_event_invoice, 'cron', day=1, month='1-12')
254 scheduler.add_job(event_invoices_mark_due, 'cron', hour=5)
255 scheduler.start()
256
257
258 @app.errorhandler(500)
259 def internal_server_error(error):
260 if current_app.config['PROPOGATE_ERROR'] is True:
261 exc = JsonApiException({'pointer': ''}, str(error))
262 else:
263 exc = JsonApiException({'pointer': ''}, 'Unknown error')
264 return make_response(json.dumps(jsonapi_errors([exc.to_dict()])), exc.status,
265 {'Content-Type': 'application/vnd.api+json'})
266
267
268 if __name__ == '__main__':
269 current_app.run()
270
[end of app/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/__init__.py b/app/__init__.py
--- a/app/__init__.py
+++ b/app/__init__.py
@@ -6,6 +6,12 @@
import sys
from flask import Flask, json, make_response
from flask_celeryext import FlaskCeleryExt
+import sentry_sdk
+from sentry_sdk.integrations.flask import FlaskIntegration
+from sentry_sdk.integrations.celery import CeleryIntegration
+from sentry_sdk.integrations.redis import RedisIntegration
+from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration
+
from app.settings import get_settings, get_setts
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
@@ -42,8 +48,6 @@
from app.views.redis_store import redis_store
from app.views.celery_ import celery
from app.templates.flask_ext.jinja.filters import init_filters
-import sentry_sdk
-from sentry_sdk.integrations.flask import FlaskIntegration
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -173,7 +177,8 @@
# sentry
if not app_created and 'SENTRY_DSN' in app.config:
- sentry_sdk.init(app.config['SENTRY_DSN'], integrations=[FlaskIntegration()])
+ sentry_sdk.init(app.config['SENTRY_DSN'], integrations=[FlaskIntegration(), RedisIntegration(),
+ CeleryIntegration(), SqlalchemyIntegration()])
# redis
redis_store.init_app(app)
| {"golden_diff": "diff --git a/app/__init__.py b/app/__init__.py\n--- a/app/__init__.py\n+++ b/app/__init__.py\n@@ -6,6 +6,12 @@\n import sys\n from flask import Flask, json, make_response\n from flask_celeryext import FlaskCeleryExt\n+import sentry_sdk\n+from sentry_sdk.integrations.flask import FlaskIntegration\n+from sentry_sdk.integrations.celery import CeleryIntegration\n+from sentry_sdk.integrations.redis import RedisIntegration\n+from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration\n+\n from app.settings import get_settings, get_setts\n from flask_migrate import Migrate, MigrateCommand\n from flask_script import Manager\n@@ -42,8 +48,6 @@\n from app.views.redis_store import redis_store\n from app.views.celery_ import celery\n from app.templates.flask_ext.jinja.filters import init_filters\n-import sentry_sdk\n-from sentry_sdk.integrations.flask import FlaskIntegration\n \n \n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n@@ -173,7 +177,8 @@\n \n # sentry\n if not app_created and 'SENTRY_DSN' in app.config:\n- sentry_sdk.init(app.config['SENTRY_DSN'], integrations=[FlaskIntegration()])\n+ sentry_sdk.init(app.config['SENTRY_DSN'], integrations=[FlaskIntegration(), RedisIntegration(),\n+ CeleryIntegration(), SqlalchemyIntegration()])\n \n # redis\n redis_store.init_app(app)\n", "issue": "Add celery, redis and SQLAlchemy integrations in sentry\n- [ ] Celery integration\r\n- [ ] Redis Integration\r\n- [ ] SQLAlchemy Integration\n", "before_files": [{"content": "from celery.signals import after_task_publish\nimport logging\nimport os.path\nfrom envparse import env\n\nimport sys\nfrom flask import Flask, json, make_response\nfrom flask_celeryext import FlaskCeleryExt\nfrom app.settings import get_settings, get_setts\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager\nfrom flask_login import current_user\nfrom flask_jwt_extended import JWTManager\nfrom flask_limiter import Limiter\nfrom datetime import timedelta\nfrom flask_cors import CORS\nfrom flask_rest_jsonapi.errors import jsonapi_errors\nfrom flask_rest_jsonapi.exceptions import JsonApiException\nfrom healthcheck import HealthCheck\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom elasticsearch_dsl.connections import connections\nfrom pytz import utc\n\nimport sqlalchemy as sa\n\nimport stripe\nfrom app.settings import get_settings\nfrom app.models import db\nfrom app.api.helpers.jwt import jwt_user_loader\nfrom app.api.helpers.cache import cache\nfrom werkzeug.middleware.profiler import ProfilerMiddleware\nfrom app.views import BlueprintsManager\nfrom app.api.helpers.auth import AuthManager, is_token_blacklisted\nfrom app.api.helpers.scheduled_jobs import send_after_event_mail, send_event_fee_notification, \\\n send_event_fee_notification_followup, change_session_state_on_event_completion, \\\n expire_pending_tickets, send_monthly_event_invoice, event_invoices_mark_due\nfrom app.models.event import Event\nfrom app.models.role_invite import RoleInvite\nfrom app.views.healthcheck import health_check_celery, health_check_db, health_check_migrations, check_migrations\nfrom app.views.elastic_search import client\nfrom app.views.elastic_cron_helpers import sync_events_elasticsearch, cron_rebuild_events_elasticsearch\nfrom app.views.redis_store import redis_store\nfrom app.views.celery_ import celery\nfrom app.templates.flask_ext.jinja.filters import init_filters\nimport sentry_sdk\nfrom sentry_sdk.integrations.flask import FlaskIntegration\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nstatic_dir = os.path.dirname(os.path.dirname(__file__)) + \"/static\"\ntemplate_dir = os.path.dirname(__file__) + \"/templates\"\napp = Flask(__name__, static_folder=static_dir, template_folder=template_dir)\nlimiter = Limiter(app)\nenv.read_envfile()\n\n\nclass ReverseProxied:\n \"\"\"\n ReverseProxied flask wsgi app wrapper from http://stackoverflow.com/a/37842465/1562480 by aldel\n \"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n scheme = environ.get('HTTP_X_FORWARDED_PROTO')\n if scheme:\n environ['wsgi.url_scheme'] = scheme\n if os.getenv('FORCE_SSL', 'no') == 'yes':\n environ['wsgi.url_scheme'] = 'https'\n return self.app(environ, start_response)\n\n\napp.wsgi_app = ReverseProxied(app.wsgi_app)\n\napp_created = False\n\n\ndef create_app():\n global app_created\n if not app_created:\n BlueprintsManager.register(app)\n Migrate(app, db)\n\n app.config.from_object(env('APP_CONFIG', default='config.ProductionConfig'))\n db.init_app(app)\n _manager = Manager(app)\n _manager.add_command('db', MigrateCommand)\n\n if app.config['CACHING']:\n cache.init_app(app, config={'CACHE_TYPE': 'simple'})\n else:\n cache.init_app(app, config={'CACHE_TYPE': 'null'})\n\n stripe.api_key = 'SomeStripeKey'\n app.secret_key = 'super secret key'\n app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False\n app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'\n\n app.logger.addHandler(logging.StreamHandler(sys.stdout))\n app.logger.setLevel(logging.ERROR)\n\n # set up jwt\n app.config['JWT_HEADER_TYPE'] = 'JWT'\n app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(days=1)\n app.config['JWT_REFRESH_TOKEN_EXPIRES'] = timedelta(days=365)\n app.config['JWT_ERROR_MESSAGE_KEY'] = 'error'\n app.config['JWT_TOKEN_LOCATION'] = ['cookies', 'headers']\n app.config['JWT_REFRESH_COOKIE_PATH'] = '/v1/auth/token/refresh'\n app.config['JWT_SESSION_COOKIE'] = False\n app.config['JWT_BLACKLIST_ENABLED'] = True\n app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['refresh']\n _jwt = JWTManager(app)\n _jwt.user_loader_callback_loader(jwt_user_loader)\n _jwt.token_in_blacklist_loader(is_token_blacklisted)\n\n # setup celery\n app.config['CELERY_BROKER_URL'] = app.config['REDIS_URL']\n app.config['CELERY_RESULT_BACKEND'] = app.config['CELERY_BROKER_URL']\n app.config['CELERY_ACCEPT_CONTENT'] = ['json', 'application/text']\n\n CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n AuthManager.init_login(app)\n\n if app.config['TESTING'] and app.config['PROFILE']:\n # Profiling\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])\n\n # development api\n with app.app_context():\n from app.api.admin_statistics_api.events import event_statistics\n from app.api.auth import auth_routes\n from app.api.attendees import attendee_misc_routes\n from app.api.bootstrap import api_v1\n from app.api.celery_tasks import celery_routes\n from app.api.event_copy import event_copy\n from app.api.exports import export_routes\n from app.api.imports import import_routes\n from app.api.uploads import upload_routes\n from app.api.users import user_misc_routes\n from app.api.orders import order_misc_routes\n from app.api.role_invites import role_invites_misc_routes\n from app.api.auth import ticket_blueprint, authorised_blueprint\n from app.api.admin_translations import admin_blueprint\n from app.api.orders import alipay_blueprint\n from app.api.settings import admin_misc_routes\n\n app.register_blueprint(api_v1)\n app.register_blueprint(event_copy)\n app.register_blueprint(upload_routes)\n app.register_blueprint(export_routes)\n app.register_blueprint(import_routes)\n app.register_blueprint(celery_routes)\n app.register_blueprint(auth_routes)\n app.register_blueprint(event_statistics)\n app.register_blueprint(user_misc_routes)\n app.register_blueprint(attendee_misc_routes)\n app.register_blueprint(order_misc_routes)\n app.register_blueprint(role_invites_misc_routes)\n app.register_blueprint(ticket_blueprint)\n app.register_blueprint(authorised_blueprint)\n app.register_blueprint(admin_blueprint)\n app.register_blueprint(alipay_blueprint)\n app.register_blueprint(admin_misc_routes)\n\n sa.orm.configure_mappers()\n\n if app.config['SERVE_STATIC']:\n app.add_url_rule('/static/<path:filename>',\n endpoint='static',\n view_func=app.send_static_file)\n\n # sentry\n if not app_created and 'SENTRY_DSN' in app.config:\n sentry_sdk.init(app.config['SENTRY_DSN'], integrations=[FlaskIntegration()])\n\n # redis\n redis_store.init_app(app)\n\n # elasticsearch\n if app.config['ENABLE_ELASTICSEARCH']:\n client.init_app(app)\n connections.add_connection('default', client.elasticsearch)\n with app.app_context():\n try:\n cron_rebuild_events_elasticsearch.delay()\n except Exception:\n pass\n\n app_created = True\n return app, _manager, db, _jwt\n\n\ncurrent_app, manager, database, jwt = create_app()\ninit_filters(app)\n\n\n# http://stackoverflow.com/questions/26724623/\[email protected]_request\ndef track_user():\n if current_user.is_authenticated:\n current_user.update_lat()\n\n\ndef make_celery(app=None):\n app = app or create_app()[0]\n celery.conf.update(app.config)\n ext = FlaskCeleryExt(app)\n return ext.celery\n\n\n# Health-check\nhealth = HealthCheck(current_app, \"/health-check\")\nhealth.add_check(health_check_celery)\nhealth.add_check(health_check_db)\nwith current_app.app_context():\n current_app.config['MIGRATION_STATUS'] = check_migrations()\nhealth.add_check(health_check_migrations)\n\n\n# http://stackoverflow.com/questions/9824172/find-out-whether-celery-task-exists\n@after_task_publish.connect\ndef update_sent_state(sender=None, headers=None, **kwargs):\n # the task may not exist if sent using `send_task` which\n # sends tasks by name, so fall back to the default result backend\n # if that is the case.\n task = celery.tasks.get(sender)\n backend = task.backend if task else celery.backend\n backend.store_result(headers['id'], None, 'WAITING')\n\n\n# register celery tasks. removing them will cause the tasks to not function. so don't remove them\n# it is important to register them after celery is defined to resolve circular imports\n\nfrom .api.helpers import tasks\n\n# import helpers.tasks\n\n\nscheduler = BackgroundScheduler(timezone=utc)\n# scheduler.add_job(send_mail_to_expired_orders, 'interval', hours=5)\n# scheduler.add_job(empty_trash, 'cron', hour=5, minute=30)\nif app.config['ENABLE_ELASTICSEARCH']:\n scheduler.add_job(sync_events_elasticsearch, 'interval', minutes=60)\n scheduler.add_job(cron_rebuild_events_elasticsearch, 'cron', day=7)\n\nscheduler.add_job(send_after_event_mail, 'cron', hour=5, minute=30)\nscheduler.add_job(send_event_fee_notification, 'cron', day=1)\nscheduler.add_job(send_event_fee_notification_followup, 'cron', day=1, month='1-12')\nscheduler.add_job(change_session_state_on_event_completion, 'cron', hour=5, minute=30)\nscheduler.add_job(expire_pending_tickets, 'cron', minute=45)\nscheduler.add_job(send_monthly_event_invoice, 'cron', day=1, month='1-12')\nscheduler.add_job(event_invoices_mark_due, 'cron', hour=5)\nscheduler.start()\n\n\[email protected](500)\ndef internal_server_error(error):\n if current_app.config['PROPOGATE_ERROR'] is True:\n exc = JsonApiException({'pointer': ''}, str(error))\n else:\n exc = JsonApiException({'pointer': ''}, 'Unknown error')\n return make_response(json.dumps(jsonapi_errors([exc.to_dict()])), exc.status,\n {'Content-Type': 'application/vnd.api+json'})\n\n\nif __name__ == '__main__':\n current_app.run()\n", "path": "app/__init__.py"}]} | 3,569 | 340 |
gh_patches_debug_36849 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1024 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
get_correlations return value should be immutable
According to the [spec](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/correlationcontext/api.md#get-correlations):
> the returned value can be either an immutable collection or an immutable iterator
Currently, we return a `dict` ([link](https://github.com/open-telemetry/opentelemetry-python/blob/3cae0775ba12a2f7b4214b8b8c062c5e81002a19/opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py#L34-L37)):
```python
correlations = get_value(_CORRELATION_CONTEXT_KEY, context=context)
if isinstance(correlations, dict):
return correlations.copy()
return {}
```
This was mentioned in the PR but not definitively addressed https://github.com/open-telemetry/opentelemetry-python/pull/471#discussion_r392369812, so I thought it might be worth bringing up again before GA.
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/correlationcontext/propagation/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 import typing
16 import urllib.parse
17
18 from opentelemetry import correlationcontext
19 from opentelemetry.context import get_current
20 from opentelemetry.context.context import Context
21 from opentelemetry.trace.propagation import httptextformat
22
23
24 class CorrelationContextPropagator(httptextformat.HTTPTextFormat):
25 MAX_HEADER_LENGTH = 8192
26 MAX_PAIR_LENGTH = 4096
27 MAX_PAIRS = 180
28 _CORRELATION_CONTEXT_HEADER_NAME = "otcorrelationcontext"
29
30 def extract(
31 self,
32 get_from_carrier: httptextformat.Getter[
33 httptextformat.HTTPTextFormatT
34 ],
35 carrier: httptextformat.HTTPTextFormatT,
36 context: typing.Optional[Context] = None,
37 ) -> Context:
38 """Extract CorrelationContext from the carrier.
39
40 See
41 `opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.extract`
42 """
43
44 if context is None:
45 context = get_current()
46
47 header = _extract_first_element(
48 get_from_carrier(carrier, self._CORRELATION_CONTEXT_HEADER_NAME)
49 )
50
51 if not header or len(header) > self.MAX_HEADER_LENGTH:
52 return context
53
54 correlations = header.split(",")
55 total_correlations = self.MAX_PAIRS
56 for correlation in correlations:
57 if total_correlations <= 0:
58 return context
59 total_correlations -= 1
60 if len(correlation) > self.MAX_PAIR_LENGTH:
61 continue
62 try:
63 name, value = correlation.split("=", 1)
64 except Exception: # pylint: disable=broad-except
65 continue
66 context = correlationcontext.set_correlation(
67 urllib.parse.unquote(name).strip(),
68 urllib.parse.unquote(value).strip(),
69 context=context,
70 )
71
72 return context
73
74 def inject(
75 self,
76 set_in_carrier: httptextformat.Setter[httptextformat.HTTPTextFormatT],
77 carrier: httptextformat.HTTPTextFormatT,
78 context: typing.Optional[Context] = None,
79 ) -> None:
80 """Injects CorrelationContext into the carrier.
81
82 See
83 `opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.inject`
84 """
85 correlations = correlationcontext.get_correlations(context=context)
86 if not correlations:
87 return
88
89 correlation_context_string = _format_correlations(correlations)
90 set_in_carrier(
91 carrier,
92 self._CORRELATION_CONTEXT_HEADER_NAME,
93 correlation_context_string,
94 )
95
96
97 def _format_correlations(correlations: typing.Dict[str, object]) -> str:
98 return ",".join(
99 key + "=" + urllib.parse.quote_plus(str(value))
100 for key, value in correlations.items()
101 )
102
103
104 def _extract_first_element(
105 items: typing.Iterable[httptextformat.HTTPTextFormatT],
106 ) -> typing.Optional[httptextformat.HTTPTextFormatT]:
107 if items is None:
108 return None
109 return next(iter(items), None)
110
[end of opentelemetry-api/src/opentelemetry/correlationcontext/propagation/__init__.py]
[start of opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import typing
16
17 from opentelemetry.context import get_value, set_value
18 from opentelemetry.context.context import Context
19
20 _CORRELATION_CONTEXT_KEY = "correlation-context"
21
22
23 def get_correlations(
24 context: typing.Optional[Context] = None,
25 ) -> typing.Dict[str, object]:
26 """Returns the name/value pairs in the CorrelationContext
27
28 Args:
29 context: The Context to use. If not set, uses current Context
30
31 Returns:
32 Name/value pairs in the CorrelationContext
33 """
34 correlations = get_value(_CORRELATION_CONTEXT_KEY, context=context)
35 if isinstance(correlations, dict):
36 return correlations.copy()
37 return {}
38
39
40 def get_correlation(
41 name: str, context: typing.Optional[Context] = None
42 ) -> typing.Optional[object]:
43 """Provides access to the value for a name/value pair in the
44 CorrelationContext
45
46 Args:
47 name: The name of the value to retrieve
48 context: The Context to use. If not set, uses current Context
49
50 Returns:
51 The value associated with the given name, or null if the given name is
52 not present.
53 """
54 return get_correlations(context=context).get(name)
55
56
57 def set_correlation(
58 name: str, value: object, context: typing.Optional[Context] = None
59 ) -> Context:
60 """Sets a value in the CorrelationContext
61
62 Args:
63 name: The name of the value to set
64 value: The value to set
65 context: The Context to use. If not set, uses current Context
66
67 Returns:
68 A Context with the value updated
69 """
70 correlations = get_correlations(context=context)
71 correlations[name] = value
72 return set_value(_CORRELATION_CONTEXT_KEY, correlations, context=context)
73
74
75 def remove_correlation(
76 name: str, context: typing.Optional[Context] = None
77 ) -> Context:
78 """Removes a value from the CorrelationContext
79
80 Args:
81 name: The name of the value to remove
82 context: The Context to use. If not set, uses current Context
83
84 Returns:
85 A Context with the name/value removed
86 """
87 correlations = get_correlations(context=context)
88 correlations.pop(name, None)
89
90 return set_value(_CORRELATION_CONTEXT_KEY, correlations, context=context)
91
92
93 def clear_correlations(context: typing.Optional[Context] = None) -> Context:
94 """Removes all values from the CorrelationContext
95
96 Args:
97 context: The Context to use. If not set, uses current Context
98
99 Returns:
100 A Context with all correlations removed
101 """
102 return set_value(_CORRELATION_CONTEXT_KEY, {}, context=context)
103
[end of opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py b/opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py
--- a/opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py
@@ -13,6 +13,7 @@
# limitations under the License.
import typing
+from types import MappingProxyType
from opentelemetry.context import get_value, set_value
from opentelemetry.context.context import Context
@@ -22,7 +23,7 @@
def get_correlations(
context: typing.Optional[Context] = None,
-) -> typing.Dict[str, object]:
+) -> typing.Mapping[str, object]:
"""Returns the name/value pairs in the CorrelationContext
Args:
@@ -33,8 +34,8 @@
"""
correlations = get_value(_CORRELATION_CONTEXT_KEY, context=context)
if isinstance(correlations, dict):
- return correlations.copy()
- return {}
+ return MappingProxyType(correlations.copy())
+ return MappingProxyType({})
def get_correlation(
@@ -67,7 +68,7 @@
Returns:
A Context with the value updated
"""
- correlations = get_correlations(context=context)
+ correlations = dict(get_correlations(context=context))
correlations[name] = value
return set_value(_CORRELATION_CONTEXT_KEY, correlations, context=context)
@@ -84,7 +85,7 @@
Returns:
A Context with the name/value removed
"""
- correlations = get_correlations(context=context)
+ correlations = dict(get_correlations(context=context))
correlations.pop(name, None)
return set_value(_CORRELATION_CONTEXT_KEY, correlations, context=context)
diff --git a/opentelemetry-api/src/opentelemetry/correlationcontext/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/correlationcontext/propagation/__init__.py
--- a/opentelemetry-api/src/opentelemetry/correlationcontext/propagation/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/correlationcontext/propagation/__init__.py
@@ -94,7 +94,7 @@
)
-def _format_correlations(correlations: typing.Dict[str, object]) -> str:
+def _format_correlations(correlations: typing.Mapping[str, object]) -> str:
return ",".join(
key + "=" + urllib.parse.quote_plus(str(value))
for key, value in correlations.items()
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py b/opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n import typing\n+from types import MappingProxyType\n \n from opentelemetry.context import get_value, set_value\n from opentelemetry.context.context import Context\n@@ -22,7 +23,7 @@\n \n def get_correlations(\n context: typing.Optional[Context] = None,\n-) -> typing.Dict[str, object]:\n+) -> typing.Mapping[str, object]:\n \"\"\"Returns the name/value pairs in the CorrelationContext\n \n Args:\n@@ -33,8 +34,8 @@\n \"\"\"\n correlations = get_value(_CORRELATION_CONTEXT_KEY, context=context)\n if isinstance(correlations, dict):\n- return correlations.copy()\n- return {}\n+ return MappingProxyType(correlations.copy())\n+ return MappingProxyType({})\n \n \n def get_correlation(\n@@ -67,7 +68,7 @@\n Returns:\n A Context with the value updated\n \"\"\"\n- correlations = get_correlations(context=context)\n+ correlations = dict(get_correlations(context=context))\n correlations[name] = value\n return set_value(_CORRELATION_CONTEXT_KEY, correlations, context=context)\n \n@@ -84,7 +85,7 @@\n Returns:\n A Context with the name/value removed\n \"\"\"\n- correlations = get_correlations(context=context)\n+ correlations = dict(get_correlations(context=context))\n correlations.pop(name, None)\n \n return set_value(_CORRELATION_CONTEXT_KEY, correlations, context=context)\ndiff --git a/opentelemetry-api/src/opentelemetry/correlationcontext/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/correlationcontext/propagation/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/correlationcontext/propagation/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/correlationcontext/propagation/__init__.py\n@@ -94,7 +94,7 @@\n )\n \n \n-def _format_correlations(correlations: typing.Dict[str, object]) -> str:\n+def _format_correlations(correlations: typing.Mapping[str, object]) -> str:\n return \",\".join(\n key + \"=\" + urllib.parse.quote_plus(str(value))\n for key, value in correlations.items()\n", "issue": "get_correlations return value should be immutable\nAccording to the [spec](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/correlationcontext/api.md#get-correlations):\r\n\r\n> the returned value can be either an immutable collection or an immutable iterator\r\n\r\nCurrently, we return a `dict` ([link](https://github.com/open-telemetry/opentelemetry-python/blob/3cae0775ba12a2f7b4214b8b8c062c5e81002a19/opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py#L34-L37)): \r\n```python\r\ncorrelations = get_value(_CORRELATION_CONTEXT_KEY, context=context)\r\nif isinstance(correlations, dict):\r\n return correlations.copy()\r\nreturn {}\r\n```\r\n\r\nThis was mentioned in the PR but not definitively addressed https://github.com/open-telemetry/opentelemetry-python/pull/471#discussion_r392369812, so I thought it might be worth bringing up again before GA.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport typing\nimport urllib.parse\n\nfrom opentelemetry import correlationcontext\nfrom opentelemetry.context import get_current\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.trace.propagation import httptextformat\n\n\nclass CorrelationContextPropagator(httptextformat.HTTPTextFormat):\n MAX_HEADER_LENGTH = 8192\n MAX_PAIR_LENGTH = 4096\n MAX_PAIRS = 180\n _CORRELATION_CONTEXT_HEADER_NAME = \"otcorrelationcontext\"\n\n def extract(\n self,\n get_from_carrier: httptextformat.Getter[\n httptextformat.HTTPTextFormatT\n ],\n carrier: httptextformat.HTTPTextFormatT,\n context: typing.Optional[Context] = None,\n ) -> Context:\n \"\"\"Extract CorrelationContext from the carrier.\n\n See\n `opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.extract`\n \"\"\"\n\n if context is None:\n context = get_current()\n\n header = _extract_first_element(\n get_from_carrier(carrier, self._CORRELATION_CONTEXT_HEADER_NAME)\n )\n\n if not header or len(header) > self.MAX_HEADER_LENGTH:\n return context\n\n correlations = header.split(\",\")\n total_correlations = self.MAX_PAIRS\n for correlation in correlations:\n if total_correlations <= 0:\n return context\n total_correlations -= 1\n if len(correlation) > self.MAX_PAIR_LENGTH:\n continue\n try:\n name, value = correlation.split(\"=\", 1)\n except Exception: # pylint: disable=broad-except\n continue\n context = correlationcontext.set_correlation(\n urllib.parse.unquote(name).strip(),\n urllib.parse.unquote(value).strip(),\n context=context,\n )\n\n return context\n\n def inject(\n self,\n set_in_carrier: httptextformat.Setter[httptextformat.HTTPTextFormatT],\n carrier: httptextformat.HTTPTextFormatT,\n context: typing.Optional[Context] = None,\n ) -> None:\n \"\"\"Injects CorrelationContext into the carrier.\n\n See\n `opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.inject`\n \"\"\"\n correlations = correlationcontext.get_correlations(context=context)\n if not correlations:\n return\n\n correlation_context_string = _format_correlations(correlations)\n set_in_carrier(\n carrier,\n self._CORRELATION_CONTEXT_HEADER_NAME,\n correlation_context_string,\n )\n\n\ndef _format_correlations(correlations: typing.Dict[str, object]) -> str:\n return \",\".join(\n key + \"=\" + urllib.parse.quote_plus(str(value))\n for key, value in correlations.items()\n )\n\n\ndef _extract_first_element(\n items: typing.Iterable[httptextformat.HTTPTextFormatT],\n) -> typing.Optional[httptextformat.HTTPTextFormatT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "opentelemetry-api/src/opentelemetry/correlationcontext/propagation/__init__.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport typing\n\nfrom opentelemetry.context import get_value, set_value\nfrom opentelemetry.context.context import Context\n\n_CORRELATION_CONTEXT_KEY = \"correlation-context\"\n\n\ndef get_correlations(\n context: typing.Optional[Context] = None,\n) -> typing.Dict[str, object]:\n \"\"\"Returns the name/value pairs in the CorrelationContext\n\n Args:\n context: The Context to use. If not set, uses current Context\n\n Returns:\n Name/value pairs in the CorrelationContext\n \"\"\"\n correlations = get_value(_CORRELATION_CONTEXT_KEY, context=context)\n if isinstance(correlations, dict):\n return correlations.copy()\n return {}\n\n\ndef get_correlation(\n name: str, context: typing.Optional[Context] = None\n) -> typing.Optional[object]:\n \"\"\"Provides access to the value for a name/value pair in the\n CorrelationContext\n\n Args:\n name: The name of the value to retrieve\n context: The Context to use. If not set, uses current Context\n\n Returns:\n The value associated with the given name, or null if the given name is\n not present.\n \"\"\"\n return get_correlations(context=context).get(name)\n\n\ndef set_correlation(\n name: str, value: object, context: typing.Optional[Context] = None\n) -> Context:\n \"\"\"Sets a value in the CorrelationContext\n\n Args:\n name: The name of the value to set\n value: The value to set\n context: The Context to use. If not set, uses current Context\n\n Returns:\n A Context with the value updated\n \"\"\"\n correlations = get_correlations(context=context)\n correlations[name] = value\n return set_value(_CORRELATION_CONTEXT_KEY, correlations, context=context)\n\n\ndef remove_correlation(\n name: str, context: typing.Optional[Context] = None\n) -> Context:\n \"\"\"Removes a value from the CorrelationContext\n\n Args:\n name: The name of the value to remove\n context: The Context to use. If not set, uses current Context\n\n Returns:\n A Context with the name/value removed\n \"\"\"\n correlations = get_correlations(context=context)\n correlations.pop(name, None)\n\n return set_value(_CORRELATION_CONTEXT_KEY, correlations, context=context)\n\n\ndef clear_correlations(context: typing.Optional[Context] = None) -> Context:\n \"\"\"Removes all values from the CorrelationContext\n\n Args:\n context: The Context to use. If not set, uses current Context\n\n Returns:\n A Context with all correlations removed\n \"\"\"\n return set_value(_CORRELATION_CONTEXT_KEY, {}, context=context)\n", "path": "opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py"}]} | 2,747 | 564 |
gh_patches_debug_137 | rasdani/github-patches | git_diff | google__flax-3089 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Imcompatibility with Flax Official ImageNet example with jax version >= 0.4.7
Hi,
I was testing the [official flax example](https://github.com/google/flax/tree/main/examples/imagenet/) on Colab with jax and jaxlib version >= 0.4.7 on the colab pro+ environment with V100. After installing the requirements with `pip install -r requirements.txt` and with the following command `python main.py --workdir=./imagenet --config=configs/v100_x8.py`, the error is
```
File "/content/FlaxImageNet/main.py", line 29, in <module>
import train
File "/content/FlaxImageNet/train.py", line 30, in <module>
from flax.training import checkpoints
File "/usr/local/lib/python3.10/dist-packages/flax/training/checkpoints.py", line 34,
in <module>
from jax.experimental.global_device_array import GlobalDeviceArray
ModuleNotFoundError: No module named 'jax.experimental.global_device_array'
```
According to [this StackOverflow answer](https://stackoverflow.com/questions/76191911/no-module-named-jax-experimental-global-device-array-when-running-the-official/76192120#76192120), it seems that 'jax.experimental.global_device_array' is removed.
Therefore, it would be great if one can fix the official example so that it works on newer version of jax.
Unavailable to import checkpoints
Provide as much information as possible. At least, this should include a description of your issue and steps to reproduce the problem. If possible also provide a summary of what steps or workarounds you have already tried.
### System information
- Flax, jax, jaxlib versions (obtain with `pip show flax jax jaxlib`: All to its latest, also orbitax
Name: flax
Version: 0.6.9
Summary: Flax: A neural network library for JAX designed for flexibility
Home-page:
Author:
Author-email: Flax team <[email protected]>
License:
Location: /home/fernanda/.local/lib/python3.8/site-packages
Requires: jax, msgpack, numpy, optax, orbax-checkpoint, PyYAML, rich, tensorstore, typing-extensions
Required-by:
---
Name: jax
Version: 0.4.8
Summary: Differentiate, compile, and transform Numpy code.
Home-page: https://github.com/google/jax
Author: JAX team
Author-email: [email protected]
License: Apache-2.0
Location: /home/fernanda/.local/lib/python3.8/site-packages
Requires: ml-dtypes, numpy, opt-einsum, scipy
Required-by: chex, diffrax, equinox, flax, optax, orbax, orbax-checkpoint, richmol
---
Name: jaxlib
Version: 0.4.7
Summary: XLA library for JAX
Home-page: https://github.com/google/jax
Author: JAX team
Author-email: [email protected]
License: Apache-2.0
Location: /home/fernanda/.local/lib/python3.8/site-packages
Requires: ml-dtypes, numpy, scipy
Required-by: chex, optax, orbax, orbax-checkpoint
---
Name: orbax
Version: 0.1.7
Summary: Orbax
Home-page:
Author:
Author-email: Orbax Authors <[email protected]>
License:
Location: /home/fernanda/.local/lib/python3.8/site-packages
Requires: absl-py, cached_property, etils, importlib_resources, jax, jaxlib, msgpack, nest_asyncio, numpy, pyyaml, tensorstore, typing_extensions
- Python version: 3.8
### Problem you have encountered:
When importing checkpoints, get the following error:
"""
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-1-0eac7b685376> in <module>
11 config.update("jax_enable_x64", True)
12 from flax import serialization
---> 13 from flax.training import checkpoints
14 from jax import numpy as jnp
15 import jax
/gpfs/cfel/group/cmi/common/psi4/psi4conda/lib//python3.8/site-packages/flax/training/checkpoints.py in <module>
37 from jax import process_index
38 from jax import sharding
---> 39 from jax.experimental.global_device_array import GlobalDeviceArray
40 from jax.experimental.multihost_utils import sync_global_devices
41 import orbax.checkpoint as orbax
ModuleNotFoundError: No module named 'jax.experimental.global_device_array'
"""
I guess it is a compatibility problem between jax and flax.
### What you expected to happen:
Usual importing
</issue>
<code>
[start of flax/version.py]
1 # Copyright 2023 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Current Flax version at head on Github."""
16 __version__ = "0.6.9"
17
18
[end of flax/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/version.py b/flax/version.py
--- a/flax/version.py
+++ b/flax/version.py
@@ -13,5 +13,5 @@
# limitations under the License.
"""Current Flax version at head on Github."""
-__version__ = "0.6.9"
+__version__ = "0.6.10"
| {"golden_diff": "diff --git a/flax/version.py b/flax/version.py\n--- a/flax/version.py\n+++ b/flax/version.py\n@@ -13,5 +13,5 @@\n # limitations under the License.\n \n \"\"\"Current Flax version at head on Github.\"\"\"\n-__version__ = \"0.6.9\"\n+__version__ = \"0.6.10\"\n", "issue": "Imcompatibility with Flax Official ImageNet example with jax version >= 0.4.7\nHi, \r\n\r\nI was testing the [official flax example](https://github.com/google/flax/tree/main/examples/imagenet/) on Colab with jax and jaxlib version >= 0.4.7 on the colab pro+ environment with V100. After installing the requirements with `pip install -r requirements.txt` and with the following command `python main.py --workdir=./imagenet --config=configs/v100_x8.py`, the error is \r\n\r\n```\r\nFile \"/content/FlaxImageNet/main.py\", line 29, in <module>\r\nimport train\r\nFile \"/content/FlaxImageNet/train.py\", line 30, in <module>\r\nfrom flax.training import checkpoints\r\nFile \"/usr/local/lib/python3.10/dist-packages/flax/training/checkpoints.py\", line 34, \r\nin <module>\r\nfrom jax.experimental.global_device_array import GlobalDeviceArray\r\nModuleNotFoundError: No module named 'jax.experimental.global_device_array'\r\n```\r\n\r\nAccording to [this StackOverflow answer](https://stackoverflow.com/questions/76191911/no-module-named-jax-experimental-global-device-array-when-running-the-official/76192120#76192120), it seems that 'jax.experimental.global_device_array' is removed. \r\n\r\nTherefore, it would be great if one can fix the official example so that it works on newer version of jax. \nUnavailable to import checkpoints\nProvide as much information as possible. At least, this should include a description of your issue and steps to reproduce the problem. If possible also provide a summary of what steps or workarounds you have already tried.\r\n\r\n### System information\r\n- Flax, jax, jaxlib versions (obtain with `pip show flax jax jaxlib`: All to its latest, also orbitax\r\n\r\nName: flax\r\nVersion: 0.6.9\r\nSummary: Flax: A neural network library for JAX designed for flexibility\r\nHome-page: \r\nAuthor: \r\nAuthor-email: Flax team <[email protected]>\r\nLicense: \r\nLocation: /home/fernanda/.local/lib/python3.8/site-packages\r\nRequires: jax, msgpack, numpy, optax, orbax-checkpoint, PyYAML, rich, tensorstore, typing-extensions\r\nRequired-by: \r\n---\r\nName: jax\r\nVersion: 0.4.8\r\nSummary: Differentiate, compile, and transform Numpy code.\r\nHome-page: https://github.com/google/jax\r\nAuthor: JAX team\r\nAuthor-email: [email protected]\r\nLicense: Apache-2.0\r\nLocation: /home/fernanda/.local/lib/python3.8/site-packages\r\nRequires: ml-dtypes, numpy, opt-einsum, scipy\r\nRequired-by: chex, diffrax, equinox, flax, optax, orbax, orbax-checkpoint, richmol\r\n---\r\nName: jaxlib\r\nVersion: 0.4.7\r\nSummary: XLA library for JAX\r\nHome-page: https://github.com/google/jax\r\nAuthor: JAX team\r\nAuthor-email: [email protected]\r\nLicense: Apache-2.0\r\nLocation: /home/fernanda/.local/lib/python3.8/site-packages\r\nRequires: ml-dtypes, numpy, scipy\r\nRequired-by: chex, optax, orbax, orbax-checkpoint\r\n---\r\nName: orbax\r\nVersion: 0.1.7\r\nSummary: Orbax\r\nHome-page: \r\nAuthor: \r\nAuthor-email: Orbax Authors <[email protected]>\r\nLicense: \r\nLocation: /home/fernanda/.local/lib/python3.8/site-packages\r\nRequires: absl-py, cached_property, etils, importlib_resources, jax, jaxlib, msgpack, nest_asyncio, numpy, pyyaml, tensorstore, typing_extensions\r\n\r\n- Python version: 3.8\r\n\r\n\r\n### Problem you have encountered:\r\nWhen importing checkpoints, get the following error:\r\n \"\"\" \r\n---------------------------------------------------------------------------\r\nModuleNotFoundError Traceback (most recent call last)\r\n<ipython-input-1-0eac7b685376> in <module>\r\n 11 config.update(\"jax_enable_x64\", True)\r\n 12 from flax import serialization\r\n---> 13 from flax.training import checkpoints\r\n 14 from jax import numpy as jnp\r\n 15 import jax\r\n\r\n/gpfs/cfel/group/cmi/common/psi4/psi4conda/lib//python3.8/site-packages/flax/training/checkpoints.py in <module>\r\n 37 from jax import process_index\r\n 38 from jax import sharding\r\n---> 39 from jax.experimental.global_device_array import GlobalDeviceArray\r\n 40 from jax.experimental.multihost_utils import sync_global_devices\r\n 41 import orbax.checkpoint as orbax\r\n\r\nModuleNotFoundError: No module named 'jax.experimental.global_device_array'\r\n\r\n\"\"\"\r\n\r\nI guess it is a compatibility problem between jax and flax.\r\n\r\n### What you expected to happen:\r\n\r\nUsual importing\r\n\r\n\n", "before_files": [{"content": "# Copyright 2023 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Current Flax version at head on Github.\"\"\"\n__version__ = \"0.6.9\"\n\n", "path": "flax/version.py"}]} | 1,833 | 82 |
gh_patches_debug_42084 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-765 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: Distribution builds can have path collisions
Distribution builds are not namespace'd based on the type of artifact produced so an OpenSearch and OpenSearch Dashboard build could collide.
### Actual
The current path generated is `https://ci.opensearch.org/ci/dbc/builds/1.1.0/328/x64/manifest.yml`.
### Expected
It should have the distribution type in the path like `https://ci.opensearch.org/ci/dbc/opensearch/builds/1.1.0/328/x64/manifest.yml` and `https://ci.opensearch.org/ci/dbc/opensearch-dashboards/builds/1.1.0/328/x64/manifest.yml`
### Required changes
- [x] Update the build jobs to include the build type in the url for the generated manifests, see [bundle_recorder.py](https://github.com/opensearch-project/opensearch-build/blob/main/src/assemble_workflow/bundle_recorder.py).
- [x] Update the jenkinsfile to upload to a destination based on the build type, see [Jenkinsfile](https://github.com/opensearch-project/opensearch-build/blob/main/Jenkinsfile#L127).
### Other changes
- [ ] Migrate/ update existing artifacts to the new destination
- [x] Update the [roles](https://github.com/opensearch-project/opensearch-build/blob/main/deployment/lib/identities.ts#L11) to be separate between jobs to prevent any possible conflicts, then update the jenkins jobs to build to the correct destination.
- [x] https://github.com/opensearch-project/opensearch-build/issues/661
- [x] https://github.com/opensearch-project/opensearch-build/issues/714
</issue>
<code>
[start of src/run_assemble.py]
1 #!/usr/bin/env python
2
3 # SPDX-License-Identifier: Apache-2.0
4 #
5 # The OpenSearch Contributors require contributions made to
6 # this file be licensed under the Apache-2.0 license or a
7 # compatible open source license.
8
9 import argparse
10 import logging
11 import os
12 import sys
13
14 from assemble_workflow.bundle_recorder import BundleRecorder
15 from assemble_workflow.bundles import Bundles
16 from manifests.build_manifest import BuildManifest
17 from system import console
18 from system.temporary_directory import TemporaryDirectory
19
20
21 def main():
22 parser = argparse.ArgumentParser(description="Assemble an OpenSearch Bundle")
23 parser.add_argument("manifest", type=argparse.FileType("r"), help="Manifest file.")
24 parser.add_argument(
25 "-v",
26 "--verbose",
27 help="Show more verbose output.",
28 action="store_const",
29 default=logging.INFO,
30 const=logging.DEBUG,
31 dest="logging_level",
32 )
33 args = parser.parse_args()
34
35 console.configure(level=args.logging_level)
36
37 build_manifest = BuildManifest.from_file(args.manifest)
38 build = build_manifest.build
39 artifacts_dir = os.path.dirname(os.path.realpath(args.manifest.name))
40 output_dir = os.path.join(os.getcwd(), "bundle")
41 os.makedirs(output_dir, exist_ok=True)
42
43 with TemporaryDirectory() as work_dir:
44 logging.info(f"Bundling {build.name} ({build.architecture}) on {build.platform} into {output_dir} ...")
45
46 os.chdir(work_dir.name)
47
48 bundle_recorder = BundleRecorder(build, output_dir, artifacts_dir)
49
50 bundle = Bundles.create(build_manifest, artifacts_dir, bundle_recorder)
51
52 bundle.install_min()
53 bundle.install_plugins()
54 logging.info(f"Installed plugins: {bundle.installed_plugins}")
55
56 # Save a copy of the manifest inside of the tar
57 bundle_recorder.write_manifest(bundle.archive_path)
58 bundle.build_tar(output_dir)
59
60 bundle_recorder.write_manifest(output_dir)
61
62 logging.info("Done.")
63
64
65 if __name__ == "__main__":
66 sys.exit(main())
67
[end of src/run_assemble.py]
[start of src/assemble_workflow/bundle_recorder.py]
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import os
8 from urllib.parse import urljoin
9
10 from manifests.bundle_manifest import BundleManifest
11
12
13 class BundleRecorder:
14 def __init__(self, build, output_dir, artifacts_dir):
15 self.output_dir = output_dir
16 self.build_id = build.id
17 self.public_url = os.getenv("PUBLIC_ARTIFACT_URL", None)
18 self.version = build.version
19 self.tar_name = self.__get_tar_name(build)
20 self.artifacts_dir = artifacts_dir
21 self.architecture = build.architecture
22 self.bundle_manifest = self.BundleManifestBuilder(
23 build.id,
24 build.name,
25 build.version,
26 build.platform,
27 build.architecture,
28 self.__get_tar_location(),
29 )
30
31 def __get_tar_name(self, build):
32 parts = [
33 build.name.lower().replace(" ", "-"),
34 build.version,
35 build.platform,
36 build.architecture,
37 ]
38 return "-".join(parts) + ".tar.gz"
39
40 def __get_public_url_path(self, folder, rel_path):
41 path = "/".join((folder, self.version, self.build_id, self.architecture, rel_path))
42 return urljoin(self.public_url + "/", path)
43
44 def __get_location(self, folder_name, rel_path, abs_path):
45 if self.public_url:
46 return self.__get_public_url_path(folder_name, rel_path)
47 return abs_path
48
49 # Assembled bundles are expected to be served from a separate "bundles" folder
50 # Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id
51 def __get_tar_location(self):
52 return self.__get_location("bundles", self.tar_name, os.path.join(self.output_dir, self.tar_name))
53
54 # Build artifacts are expected to be served from a "builds" folder
55 # Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>
56 def __get_component_location(self, component_rel_path):
57 abs_path = os.path.join(self.artifacts_dir, component_rel_path)
58 return self.__get_location("builds", component_rel_path, abs_path)
59
60 def record_component(self, component, rel_path):
61 self.bundle_manifest.append_component(
62 component.name,
63 component.repository,
64 component.ref,
65 component.commit_id,
66 self.__get_component_location(rel_path),
67 )
68
69 def get_manifest(self):
70 return self.bundle_manifest.to_manifest()
71
72 def write_manifest(self, folder):
73 manifest_path = os.path.join(folder, "manifest.yml")
74 self.get_manifest().to_file(manifest_path)
75
76 class BundleManifestBuilder:
77 def __init__(self, build_id, name, version, platform, architecture, location):
78 self.data = {}
79 self.data["build"] = {}
80 self.data["build"]["id"] = build_id
81 self.data["build"]["name"] = name
82 self.data["build"]["version"] = str(version)
83 self.data["build"]["platform"] = platform
84 self.data["build"]["architecture"] = architecture
85 self.data["build"]["location"] = location
86 self.data["schema-version"] = "1.1"
87 # We need to store components as a hash so that we can append artifacts by component name
88 # When we convert to a BundleManifest this will get converted back into a list
89 self.data["components"] = []
90
91 def append_component(self, name, repository_url, ref, commit_id, location):
92 component = {
93 "name": name,
94 "repository": repository_url,
95 "ref": ref,
96 "commit_id": commit_id,
97 "location": location,
98 }
99 self.data["components"].append(component)
100
101 def to_manifest(self):
102 return BundleManifest(self.data)
103
[end of src/assemble_workflow/bundle_recorder.py]
[start of src/run_build.py]
1 #!/usr/bin/env python
2
3 # SPDX-License-Identifier: Apache-2.0
4 #
5 # The OpenSearch Contributors require contributions made to
6 # this file be licensed under the Apache-2.0 license or a
7 # compatible open source license.
8
9 import logging
10 import os
11 import sys
12
13 from build_workflow.build_args import BuildArgs
14 from build_workflow.build_recorder import BuildRecorder
15 from build_workflow.build_target import BuildTarget
16 from build_workflow.builder import Builder
17 from git.git_repository import GitRepository
18 from manifests.input_manifest import InputManifest
19 from system import console
20 from system.temporary_directory import TemporaryDirectory
21
22
23 def main():
24 args = BuildArgs()
25 console.configure(level=args.logging_level)
26 manifest = InputManifest.from_file(args.manifest)
27
28 with TemporaryDirectory(keep=args.keep) as work_dir:
29 output_dir = os.path.join(os.getcwd(), "artifacts")
30
31 logging.info(f"Building in {work_dir.name}")
32
33 os.chdir(work_dir.name)
34
35 target = BuildTarget(
36 name=manifest.build.name,
37 version=manifest.build.version,
38 snapshot=args.snapshot,
39 output_dir=output_dir,
40 platform=args.platform,
41 architecture=args.architecture,
42 )
43
44 os.makedirs(target.output_dir, exist_ok=True)
45
46 build_recorder = BuildRecorder(target)
47
48 logging.info(
49 f"Building {manifest.build.name} ({target.architecture}) into {target.output_dir}"
50 )
51
52 for component in manifest.components:
53
54 if args.component and args.component != component.name:
55 logging.info(f"Skipping {component.name}")
56 continue
57
58 logging.info(f"Building {component.name}")
59
60 with GitRepository(
61 component.repository,
62 component.ref,
63 os.path.join(work_dir.name, component.name),
64 component.working_directory,
65 ) as repo:
66 try:
67 builder = Builder(component.name, repo, build_recorder)
68 builder.build(target)
69 builder.export_artifacts()
70 except:
71 logging.error(f"Error building {component.name}, retry with: {args.component_command(component.name)}")
72 raise
73
74 build_recorder.write_manifest()
75
76 logging.info("Done.")
77
78
79 if __name__ == "__main__":
80 sys.exit(main())
81
[end of src/run_build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/assemble_workflow/bundle_recorder.py b/src/assemble_workflow/bundle_recorder.py
--- a/src/assemble_workflow/bundle_recorder.py
+++ b/src/assemble_workflow/bundle_recorder.py
@@ -11,10 +11,10 @@
class BundleRecorder:
- def __init__(self, build, output_dir, artifacts_dir):
+ def __init__(self, build, output_dir, artifacts_dir, base_url):
self.output_dir = output_dir
self.build_id = build.id
- self.public_url = os.getenv("PUBLIC_ARTIFACT_URL", None)
+ self.base_url = base_url
self.version = build.version
self.tar_name = self.__get_tar_name(build)
self.artifacts_dir = artifacts_dir
@@ -38,18 +38,18 @@
return "-".join(parts) + ".tar.gz"
def __get_public_url_path(self, folder, rel_path):
- path = "/".join((folder, self.version, self.build_id, self.architecture, rel_path))
- return urljoin(self.public_url + "/", path)
+ path = "/".join((folder, rel_path))
+ return urljoin(self.base_url + "/", path)
def __get_location(self, folder_name, rel_path, abs_path):
- if self.public_url:
+ if self.base_url:
return self.__get_public_url_path(folder_name, rel_path)
return abs_path
# Assembled bundles are expected to be served from a separate "bundles" folder
# Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id
def __get_tar_location(self):
- return self.__get_location("bundles", self.tar_name, os.path.join(self.output_dir, self.tar_name))
+ return self.__get_location("dist", self.tar_name, os.path.join(self.output_dir, self.tar_name))
# Build artifacts are expected to be served from a "builds" folder
# Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>
diff --git a/src/run_assemble.py b/src/run_assemble.py
--- a/src/run_assemble.py
+++ b/src/run_assemble.py
@@ -30,6 +30,7 @@
const=logging.DEBUG,
dest="logging_level",
)
+ parser.add_argument("-b", "--base-url", dest='base_url', help="The base url to download the artifacts.")
args = parser.parse_args()
console.configure(level=args.logging_level)
@@ -37,7 +38,7 @@
build_manifest = BuildManifest.from_file(args.manifest)
build = build_manifest.build
artifacts_dir = os.path.dirname(os.path.realpath(args.manifest.name))
- output_dir = os.path.join(os.getcwd(), "bundle")
+ output_dir = os.path.join(os.getcwd(), "dist")
os.makedirs(output_dir, exist_ok=True)
with TemporaryDirectory() as work_dir:
@@ -45,7 +46,7 @@
os.chdir(work_dir.name)
- bundle_recorder = BundleRecorder(build, output_dir, artifacts_dir)
+ bundle_recorder = BundleRecorder(build, output_dir, artifacts_dir, args.base_url)
bundle = Bundles.create(build_manifest, artifacts_dir, bundle_recorder)
diff --git a/src/run_build.py b/src/run_build.py
--- a/src/run_build.py
+++ b/src/run_build.py
@@ -26,7 +26,7 @@
manifest = InputManifest.from_file(args.manifest)
with TemporaryDirectory(keep=args.keep) as work_dir:
- output_dir = os.path.join(os.getcwd(), "artifacts")
+ output_dir = os.path.join(os.getcwd(), "builds")
logging.info(f"Building in {work_dir.name}")
| {"golden_diff": "diff --git a/src/assemble_workflow/bundle_recorder.py b/src/assemble_workflow/bundle_recorder.py\n--- a/src/assemble_workflow/bundle_recorder.py\n+++ b/src/assemble_workflow/bundle_recorder.py\n@@ -11,10 +11,10 @@\n \n \n class BundleRecorder:\n- def __init__(self, build, output_dir, artifacts_dir):\n+ def __init__(self, build, output_dir, artifacts_dir, base_url):\n self.output_dir = output_dir\n self.build_id = build.id\n- self.public_url = os.getenv(\"PUBLIC_ARTIFACT_URL\", None)\n+ self.base_url = base_url\n self.version = build.version\n self.tar_name = self.__get_tar_name(build)\n self.artifacts_dir = artifacts_dir\n@@ -38,18 +38,18 @@\n return \"-\".join(parts) + \".tar.gz\"\n \n def __get_public_url_path(self, folder, rel_path):\n- path = \"/\".join((folder, self.version, self.build_id, self.architecture, rel_path))\n- return urljoin(self.public_url + \"/\", path)\n+ path = \"/\".join((folder, rel_path))\n+ return urljoin(self.base_url + \"/\", path)\n \n def __get_location(self, folder_name, rel_path, abs_path):\n- if self.public_url:\n+ if self.base_url:\n return self.__get_public_url_path(folder_name, rel_path)\n return abs_path\n \n # Assembled bundles are expected to be served from a separate \"bundles\" folder\n # Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id\n def __get_tar_location(self):\n- return self.__get_location(\"bundles\", self.tar_name, os.path.join(self.output_dir, self.tar_name))\n+ return self.__get_location(\"dist\", self.tar_name, os.path.join(self.output_dir, self.tar_name))\n \n # Build artifacts are expected to be served from a \"builds\" folder\n # Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>\ndiff --git a/src/run_assemble.py b/src/run_assemble.py\n--- a/src/run_assemble.py\n+++ b/src/run_assemble.py\n@@ -30,6 +30,7 @@\n const=logging.DEBUG,\n dest=\"logging_level\",\n )\n+ parser.add_argument(\"-b\", \"--base-url\", dest='base_url', help=\"The base url to download the artifacts.\")\n args = parser.parse_args()\n \n console.configure(level=args.logging_level)\n@@ -37,7 +38,7 @@\n build_manifest = BuildManifest.from_file(args.manifest)\n build = build_manifest.build\n artifacts_dir = os.path.dirname(os.path.realpath(args.manifest.name))\n- output_dir = os.path.join(os.getcwd(), \"bundle\")\n+ output_dir = os.path.join(os.getcwd(), \"dist\")\n os.makedirs(output_dir, exist_ok=True)\n \n with TemporaryDirectory() as work_dir:\n@@ -45,7 +46,7 @@\n \n os.chdir(work_dir.name)\n \n- bundle_recorder = BundleRecorder(build, output_dir, artifacts_dir)\n+ bundle_recorder = BundleRecorder(build, output_dir, artifacts_dir, args.base_url)\n \n bundle = Bundles.create(build_manifest, artifacts_dir, bundle_recorder)\n \ndiff --git a/src/run_build.py b/src/run_build.py\n--- a/src/run_build.py\n+++ b/src/run_build.py\n@@ -26,7 +26,7 @@\n manifest = InputManifest.from_file(args.manifest)\n \n with TemporaryDirectory(keep=args.keep) as work_dir:\n- output_dir = os.path.join(os.getcwd(), \"artifacts\")\n+ output_dir = os.path.join(os.getcwd(), \"builds\")\n \n logging.info(f\"Building in {work_dir.name}\")\n", "issue": "[Bug]: Distribution builds can have path collisions\nDistribution builds are not namespace'd based on the type of artifact produced so an OpenSearch and OpenSearch Dashboard build could collide.\r\n\r\n### Actual\r\nThe current path generated is `https://ci.opensearch.org/ci/dbc/builds/1.1.0/328/x64/manifest.yml`.\r\n\r\n### Expected\r\nIt should have the distribution type in the path like `https://ci.opensearch.org/ci/dbc/opensearch/builds/1.1.0/328/x64/manifest.yml` and `https://ci.opensearch.org/ci/dbc/opensearch-dashboards/builds/1.1.0/328/x64/manifest.yml`\r\n\r\n### Required changes\r\n- [x] Update the build jobs to include the build type in the url for the generated manifests, see [bundle_recorder.py](https://github.com/opensearch-project/opensearch-build/blob/main/src/assemble_workflow/bundle_recorder.py).\r\n- [x] Update the jenkinsfile to upload to a destination based on the build type, see [Jenkinsfile](https://github.com/opensearch-project/opensearch-build/blob/main/Jenkinsfile#L127).\r\n\r\n### Other changes\r\n- [ ] Migrate/ update existing artifacts to the new destination\r\n- [x] Update the [roles](https://github.com/opensearch-project/opensearch-build/blob/main/deployment/lib/identities.ts#L11) to be separate between jobs to prevent any possible conflicts, then update the jenkins jobs to build to the correct destination.\r\n- [x] https://github.com/opensearch-project/opensearch-build/issues/661\r\n- [x] https://github.com/opensearch-project/opensearch-build/issues/714\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nfrom assemble_workflow.bundle_recorder import BundleRecorder\nfrom assemble_workflow.bundles import Bundles\nfrom manifests.build_manifest import BuildManifest\nfrom system import console\nfrom system.temporary_directory import TemporaryDirectory\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Assemble an OpenSearch Bundle\")\n parser.add_argument(\"manifest\", type=argparse.FileType(\"r\"), help=\"Manifest file.\")\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"Show more verbose output.\",\n action=\"store_const\",\n default=logging.INFO,\n const=logging.DEBUG,\n dest=\"logging_level\",\n )\n args = parser.parse_args()\n\n console.configure(level=args.logging_level)\n\n build_manifest = BuildManifest.from_file(args.manifest)\n build = build_manifest.build\n artifacts_dir = os.path.dirname(os.path.realpath(args.manifest.name))\n output_dir = os.path.join(os.getcwd(), \"bundle\")\n os.makedirs(output_dir, exist_ok=True)\n\n with TemporaryDirectory() as work_dir:\n logging.info(f\"Bundling {build.name} ({build.architecture}) on {build.platform} into {output_dir} ...\")\n\n os.chdir(work_dir.name)\n\n bundle_recorder = BundleRecorder(build, output_dir, artifacts_dir)\n\n bundle = Bundles.create(build_manifest, artifacts_dir, bundle_recorder)\n\n bundle.install_min()\n bundle.install_plugins()\n logging.info(f\"Installed plugins: {bundle.installed_plugins}\")\n\n # Save a copy of the manifest inside of the tar\n bundle_recorder.write_manifest(bundle.archive_path)\n bundle.build_tar(output_dir)\n\n bundle_recorder.write_manifest(output_dir)\n\n logging.info(\"Done.\")\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "path": "src/run_assemble.py"}, {"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\nfrom urllib.parse import urljoin\n\nfrom manifests.bundle_manifest import BundleManifest\n\n\nclass BundleRecorder:\n def __init__(self, build, output_dir, artifacts_dir):\n self.output_dir = output_dir\n self.build_id = build.id\n self.public_url = os.getenv(\"PUBLIC_ARTIFACT_URL\", None)\n self.version = build.version\n self.tar_name = self.__get_tar_name(build)\n self.artifacts_dir = artifacts_dir\n self.architecture = build.architecture\n self.bundle_manifest = self.BundleManifestBuilder(\n build.id,\n build.name,\n build.version,\n build.platform,\n build.architecture,\n self.__get_tar_location(),\n )\n\n def __get_tar_name(self, build):\n parts = [\n build.name.lower().replace(\" \", \"-\"),\n build.version,\n build.platform,\n build.architecture,\n ]\n return \"-\".join(parts) + \".tar.gz\"\n\n def __get_public_url_path(self, folder, rel_path):\n path = \"/\".join((folder, self.version, self.build_id, self.architecture, rel_path))\n return urljoin(self.public_url + \"/\", path)\n\n def __get_location(self, folder_name, rel_path, abs_path):\n if self.public_url:\n return self.__get_public_url_path(folder_name, rel_path)\n return abs_path\n\n # Assembled bundles are expected to be served from a separate \"bundles\" folder\n # Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id\n def __get_tar_location(self):\n return self.__get_location(\"bundles\", self.tar_name, os.path.join(self.output_dir, self.tar_name))\n\n # Build artifacts are expected to be served from a \"builds\" folder\n # Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>\n def __get_component_location(self, component_rel_path):\n abs_path = os.path.join(self.artifacts_dir, component_rel_path)\n return self.__get_location(\"builds\", component_rel_path, abs_path)\n\n def record_component(self, component, rel_path):\n self.bundle_manifest.append_component(\n component.name,\n component.repository,\n component.ref,\n component.commit_id,\n self.__get_component_location(rel_path),\n )\n\n def get_manifest(self):\n return self.bundle_manifest.to_manifest()\n\n def write_manifest(self, folder):\n manifest_path = os.path.join(folder, \"manifest.yml\")\n self.get_manifest().to_file(manifest_path)\n\n class BundleManifestBuilder:\n def __init__(self, build_id, name, version, platform, architecture, location):\n self.data = {}\n self.data[\"build\"] = {}\n self.data[\"build\"][\"id\"] = build_id\n self.data[\"build\"][\"name\"] = name\n self.data[\"build\"][\"version\"] = str(version)\n self.data[\"build\"][\"platform\"] = platform\n self.data[\"build\"][\"architecture\"] = architecture\n self.data[\"build\"][\"location\"] = location\n self.data[\"schema-version\"] = \"1.1\"\n # We need to store components as a hash so that we can append artifacts by component name\n # When we convert to a BundleManifest this will get converted back into a list\n self.data[\"components\"] = []\n\n def append_component(self, name, repository_url, ref, commit_id, location):\n component = {\n \"name\": name,\n \"repository\": repository_url,\n \"ref\": ref,\n \"commit_id\": commit_id,\n \"location\": location,\n }\n self.data[\"components\"].append(component)\n\n def to_manifest(self):\n return BundleManifest(self.data)\n", "path": "src/assemble_workflow/bundle_recorder.py"}, {"content": "#!/usr/bin/env python\n\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport logging\nimport os\nimport sys\n\nfrom build_workflow.build_args import BuildArgs\nfrom build_workflow.build_recorder import BuildRecorder\nfrom build_workflow.build_target import BuildTarget\nfrom build_workflow.builder import Builder\nfrom git.git_repository import GitRepository\nfrom manifests.input_manifest import InputManifest\nfrom system import console\nfrom system.temporary_directory import TemporaryDirectory\n\n\ndef main():\n args = BuildArgs()\n console.configure(level=args.logging_level)\n manifest = InputManifest.from_file(args.manifest)\n\n with TemporaryDirectory(keep=args.keep) as work_dir:\n output_dir = os.path.join(os.getcwd(), \"artifacts\")\n\n logging.info(f\"Building in {work_dir.name}\")\n\n os.chdir(work_dir.name)\n\n target = BuildTarget(\n name=manifest.build.name,\n version=manifest.build.version,\n snapshot=args.snapshot,\n output_dir=output_dir,\n platform=args.platform,\n architecture=args.architecture,\n )\n\n os.makedirs(target.output_dir, exist_ok=True)\n\n build_recorder = BuildRecorder(target)\n\n logging.info(\n f\"Building {manifest.build.name} ({target.architecture}) into {target.output_dir}\"\n )\n\n for component in manifest.components:\n\n if args.component and args.component != component.name:\n logging.info(f\"Skipping {component.name}\")\n continue\n\n logging.info(f\"Building {component.name}\")\n\n with GitRepository(\n component.repository,\n component.ref,\n os.path.join(work_dir.name, component.name),\n component.working_directory,\n ) as repo:\n try:\n builder = Builder(component.name, repo, build_recorder)\n builder.build(target)\n builder.export_artifacts()\n except:\n logging.error(f\"Error building {component.name}, retry with: {args.component_command(component.name)}\")\n raise\n\n build_recorder.write_manifest()\n\n logging.info(\"Done.\")\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "path": "src/run_build.py"}]} | 3,185 | 843 |
gh_patches_debug_21264 | rasdani/github-patches | git_diff | inventree__InvenTree-6250 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
News Feed task doesn't work behind proxy, impacting performance
### Please verify that this bug has NOT been raised before.
- [X] I checked and didn't find a similar issue
### Describe the bug*
The `update_news_feed` task attempts to fetch the RSS/Atom feed once daily. This, however, doesn't work behind a proxy server.
The result is that these tasks occupy workers all the time, and never complete.
Each worker is terminated roughly every 90 seconds due to this.
### Steps to Reproduce
1. Put the InvenTree backend on a network unable to reach `INVENTREE_NEWS_URL`
2. Trigger the task
3. Task will lead to continuous timeout termination of workers
### Expected behaviour
Task should finish with no new News entries added if URL is unreachable.
### Deployment Method
- [ ] Docker
- [X] Bare metal
### Version Information
0.12.10
### Please verify if you can reproduce this bug on the demo site.
- [ ] I can reproduce this bug on the demo site.
### Relevant log output
_No response_
</issue>
<code>
[start of InvenTree/common/tasks.py]
1 """Tasks (processes that get offloaded) for common app."""
2
3 import logging
4 import os
5 from datetime import datetime, timedelta
6
7 from django.conf import settings
8 from django.core.exceptions import AppRegistryNotReady
9 from django.db.utils import IntegrityError, OperationalError
10 from django.utils import timezone
11
12 import feedparser
13
14 from InvenTree.helpers_model import getModelsWithMixin
15 from InvenTree.models import InvenTreeNotesMixin
16 from InvenTree.tasks import ScheduledTask, scheduled_task
17
18 logger = logging.getLogger('inventree')
19
20
21 @scheduled_task(ScheduledTask.DAILY)
22 def delete_old_notifications():
23 """Remove old notifications from the database.
24
25 Anything older than ~3 months is removed
26 """
27 try:
28 from common.models import NotificationEntry
29 except AppRegistryNotReady: # pragma: no cover
30 logger.info(
31 "Could not perform 'delete_old_notifications' - App registry not ready"
32 )
33 return
34
35 before = timezone.now() - timedelta(days=90)
36
37 # Delete notification records before the specified date
38 NotificationEntry.objects.filter(updated__lte=before).delete()
39
40
41 @scheduled_task(ScheduledTask.DAILY)
42 def update_news_feed():
43 """Update the newsfeed."""
44 try:
45 from common.models import NewsFeedEntry
46 except AppRegistryNotReady: # pragma: no cover
47 logger.info("Could not perform 'update_news_feed' - App registry not ready")
48 return
49
50 # Fetch and parse feed
51 try:
52 d = feedparser.parse(settings.INVENTREE_NEWS_URL)
53 except Exception as entry: # pragma: no cover
54 logger.warning('update_news_feed: Error parsing the newsfeed', entry)
55 return
56
57 # Get a reference list
58 id_list = [a.feed_id for a in NewsFeedEntry.objects.all()]
59
60 # Iterate over entries
61 for entry in d.entries:
62 # Check if id already exists
63 if entry.id in id_list:
64 continue
65
66 # Create entry
67 try:
68 NewsFeedEntry.objects.create(
69 feed_id=entry.id,
70 title=entry.title,
71 link=entry.link,
72 published=entry.published,
73 author=entry.author,
74 summary=entry.summary,
75 )
76 except (IntegrityError, OperationalError):
77 # Sometimes errors-out on database start-up
78 pass
79
80 logger.info('update_news_feed: Sync done')
81
82
83 @scheduled_task(ScheduledTask.DAILY)
84 def delete_old_notes_images():
85 """Remove old notes images from the database.
86
87 Anything older than ~3 months is removed, unless it is linked to a note
88 """
89 try:
90 from common.models import NotesImage
91 except AppRegistryNotReady:
92 logger.info(
93 "Could not perform 'delete_old_notes_images' - App registry not ready"
94 )
95 return
96
97 # Remove any notes which point to non-existent image files
98 for note in NotesImage.objects.all():
99 if not os.path.exists(note.image.path):
100 logger.info('Deleting note %s - image file does not exist', note.image.path)
101 note.delete()
102
103 note_classes = getModelsWithMixin(InvenTreeNotesMixin)
104 before = datetime.now() - timedelta(days=90)
105
106 for note in NotesImage.objects.filter(date__lte=before):
107 # Find any images which are no longer referenced by a note
108
109 found = False
110
111 img = note.image.name
112
113 for model in note_classes:
114 if model.objects.filter(notes__icontains=img).exists():
115 found = True
116 break
117
118 if not found:
119 logger.info('Deleting note %s - image file not linked to a note', img)
120 note.delete()
121
122 # Finally, remove any images in the notes dir which are not linked to a note
123 notes_dir = os.path.join(settings.MEDIA_ROOT, 'notes')
124
125 try:
126 images = os.listdir(notes_dir)
127 except FileNotFoundError:
128 # Thrown if the directory does not exist
129 images = []
130
131 all_notes = NotesImage.objects.all()
132
133 for image in images:
134 found = False
135 for note in all_notes:
136 img_path = os.path.basename(note.image.path)
137 if img_path == image:
138 found = True
139 break
140
141 if not found:
142 logger.info('Deleting note %s - image file not linked to a note', image)
143 os.remove(os.path.join(notes_dir, image))
144
[end of InvenTree/common/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/InvenTree/common/tasks.py b/InvenTree/common/tasks.py
--- a/InvenTree/common/tasks.py
+++ b/InvenTree/common/tasks.py
@@ -10,6 +10,7 @@
from django.utils import timezone
import feedparser
+import requests
from InvenTree.helpers_model import getModelsWithMixin
from InvenTree.models import InvenTreeNotesMixin
@@ -47,11 +48,16 @@
logger.info("Could not perform 'update_news_feed' - App registry not ready")
return
+ # News feed isn't defined, no need to continue
+ if not settings.INVENTREE_NEWS_URL or type(settings.INVENTREE_NEWS_URL) != str:
+ return
+
# Fetch and parse feed
try:
- d = feedparser.parse(settings.INVENTREE_NEWS_URL)
- except Exception as entry: # pragma: no cover
- logger.warning('update_news_feed: Error parsing the newsfeed', entry)
+ feed = requests.get(settings.INVENTREE_NEWS_URL)
+ d = feedparser.parse(feed.content)
+ except Exception: # pragma: no cover
+ logger.warning('update_news_feed: Error parsing the newsfeed')
return
# Get a reference list
| {"golden_diff": "diff --git a/InvenTree/common/tasks.py b/InvenTree/common/tasks.py\n--- a/InvenTree/common/tasks.py\n+++ b/InvenTree/common/tasks.py\n@@ -10,6 +10,7 @@\n from django.utils import timezone\n \n import feedparser\n+import requests\n \n from InvenTree.helpers_model import getModelsWithMixin\n from InvenTree.models import InvenTreeNotesMixin\n@@ -47,11 +48,16 @@\n logger.info(\"Could not perform 'update_news_feed' - App registry not ready\")\n return\n \n+ # News feed isn't defined, no need to continue\n+ if not settings.INVENTREE_NEWS_URL or type(settings.INVENTREE_NEWS_URL) != str:\n+ return\n+\n # Fetch and parse feed\n try:\n- d = feedparser.parse(settings.INVENTREE_NEWS_URL)\n- except Exception as entry: # pragma: no cover\n- logger.warning('update_news_feed: Error parsing the newsfeed', entry)\n+ feed = requests.get(settings.INVENTREE_NEWS_URL)\n+ d = feedparser.parse(feed.content)\n+ except Exception: # pragma: no cover\n+ logger.warning('update_news_feed: Error parsing the newsfeed')\n return\n \n # Get a reference list\n", "issue": "News Feed task doesn't work behind proxy, impacting performance\n### Please verify that this bug has NOT been raised before.\n\n- [X] I checked and didn't find a similar issue\n\n### Describe the bug*\n\nThe `update_news_feed` task attempts to fetch the RSS/Atom feed once daily. This, however, doesn't work behind a proxy server.\r\n\r\nThe result is that these tasks occupy workers all the time, and never complete.\r\nEach worker is terminated roughly every 90 seconds due to this.\n\n### Steps to Reproduce\n\n1. Put the InvenTree backend on a network unable to reach `INVENTREE_NEWS_URL`\r\n2. Trigger the task\r\n3. Task will lead to continuous timeout termination of workers\n\n### Expected behaviour\n\nTask should finish with no new News entries added if URL is unreachable.\n\n### Deployment Method\n\n- [ ] Docker\n- [X] Bare metal\n\n### Version Information\n\n0.12.10\n\n### Please verify if you can reproduce this bug on the demo site.\n\n- [ ] I can reproduce this bug on the demo site.\n\n### Relevant log output\n\n_No response_\n", "before_files": [{"content": "\"\"\"Tasks (processes that get offloaded) for common app.\"\"\"\n\nimport logging\nimport os\nfrom datetime import datetime, timedelta\n\nfrom django.conf import settings\nfrom django.core.exceptions import AppRegistryNotReady\nfrom django.db.utils import IntegrityError, OperationalError\nfrom django.utils import timezone\n\nimport feedparser\n\nfrom InvenTree.helpers_model import getModelsWithMixin\nfrom InvenTree.models import InvenTreeNotesMixin\nfrom InvenTree.tasks import ScheduledTask, scheduled_task\n\nlogger = logging.getLogger('inventree')\n\n\n@scheduled_task(ScheduledTask.DAILY)\ndef delete_old_notifications():\n \"\"\"Remove old notifications from the database.\n\n Anything older than ~3 months is removed\n \"\"\"\n try:\n from common.models import NotificationEntry\n except AppRegistryNotReady: # pragma: no cover\n logger.info(\n \"Could not perform 'delete_old_notifications' - App registry not ready\"\n )\n return\n\n before = timezone.now() - timedelta(days=90)\n\n # Delete notification records before the specified date\n NotificationEntry.objects.filter(updated__lte=before).delete()\n\n\n@scheduled_task(ScheduledTask.DAILY)\ndef update_news_feed():\n \"\"\"Update the newsfeed.\"\"\"\n try:\n from common.models import NewsFeedEntry\n except AppRegistryNotReady: # pragma: no cover\n logger.info(\"Could not perform 'update_news_feed' - App registry not ready\")\n return\n\n # Fetch and parse feed\n try:\n d = feedparser.parse(settings.INVENTREE_NEWS_URL)\n except Exception as entry: # pragma: no cover\n logger.warning('update_news_feed: Error parsing the newsfeed', entry)\n return\n\n # Get a reference list\n id_list = [a.feed_id for a in NewsFeedEntry.objects.all()]\n\n # Iterate over entries\n for entry in d.entries:\n # Check if id already exists\n if entry.id in id_list:\n continue\n\n # Create entry\n try:\n NewsFeedEntry.objects.create(\n feed_id=entry.id,\n title=entry.title,\n link=entry.link,\n published=entry.published,\n author=entry.author,\n summary=entry.summary,\n )\n except (IntegrityError, OperationalError):\n # Sometimes errors-out on database start-up\n pass\n\n logger.info('update_news_feed: Sync done')\n\n\n@scheduled_task(ScheduledTask.DAILY)\ndef delete_old_notes_images():\n \"\"\"Remove old notes images from the database.\n\n Anything older than ~3 months is removed, unless it is linked to a note\n \"\"\"\n try:\n from common.models import NotesImage\n except AppRegistryNotReady:\n logger.info(\n \"Could not perform 'delete_old_notes_images' - App registry not ready\"\n )\n return\n\n # Remove any notes which point to non-existent image files\n for note in NotesImage.objects.all():\n if not os.path.exists(note.image.path):\n logger.info('Deleting note %s - image file does not exist', note.image.path)\n note.delete()\n\n note_classes = getModelsWithMixin(InvenTreeNotesMixin)\n before = datetime.now() - timedelta(days=90)\n\n for note in NotesImage.objects.filter(date__lte=before):\n # Find any images which are no longer referenced by a note\n\n found = False\n\n img = note.image.name\n\n for model in note_classes:\n if model.objects.filter(notes__icontains=img).exists():\n found = True\n break\n\n if not found:\n logger.info('Deleting note %s - image file not linked to a note', img)\n note.delete()\n\n # Finally, remove any images in the notes dir which are not linked to a note\n notes_dir = os.path.join(settings.MEDIA_ROOT, 'notes')\n\n try:\n images = os.listdir(notes_dir)\n except FileNotFoundError:\n # Thrown if the directory does not exist\n images = []\n\n all_notes = NotesImage.objects.all()\n\n for image in images:\n found = False\n for note in all_notes:\n img_path = os.path.basename(note.image.path)\n if img_path == image:\n found = True\n break\n\n if not found:\n logger.info('Deleting note %s - image file not linked to a note', image)\n os.remove(os.path.join(notes_dir, image))\n", "path": "InvenTree/common/tasks.py"}]} | 2,039 | 284 |
gh_patches_debug_55968 | rasdani/github-patches | git_diff | bridgecrewio__checkov-2740 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check Azure Front Door WAF enabled fails even when a WAF is correctly assigned
**Describe the issue**
[`CKV_AZURE_121`](https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py) fails despite a Web Application Firewall policy being correctly applied.
WAF policies are applied by specifying a value for `web_application_firewall_policy_link_id` inside a `frontend_endpoint` block within the `azurerm_frontdoor` resource itself.
The [documentation](https://docs.bridgecrew.io/docs/ensure-that-azure-front-door-enables-waf) seems to expect that the `web_application_firewall_policy_link_id` attribute is defined in the resource block itself, rather than in a sub-block (`frontend_endpoint`).
- [`azurerm_frontdoor` resource documentation reference](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/frontdoor#web_application_firewall_policy_link_id)
**Examples**
```terraform
resource "azurerm_frontdoor" "test" {
name = "test-front-door"
resource_group_name = var.resource_group_name
enforce_backend_pools_certificate_name_check = false
tags = var.tags
frontend_endpoint {
name = "DefaultFrontend"
host_name = "test-front-door.azurefd.net"
web_application_firewall_policy_link_id = azurerm_frontdoor_firewall_policy.test.id
}
# ...
```
**Version (please complete the following information):**
- Checkov Version: 2.0.930
**Additional context**
</issue>
<code>
[start of checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py]
1 from checkov.common.models.consts import ANY_VALUE
2 from checkov.common.models.enums import CheckCategories
3 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
4
5
6 class AzureFrontDoorEnablesWAF(BaseResourceValueCheck):
7 def __init__(self):
8 name = "Ensure that Azure Front Door enables WAF"
9 id = "CKV_AZURE_121"
10 supported_resources = ['azurerm_frontdoor']
11 categories = [CheckCategories.NETWORKING]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def get_inspected_key(self):
15 return "web_application_firewall_policy_link_id"
16
17 def get_expected_value(self):
18 return ANY_VALUE
19
20
21 check = AzureFrontDoorEnablesWAF()
22
[end of checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py b/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py
--- a/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py
+++ b/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py
@@ -12,7 +12,7 @@
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
- return "web_application_firewall_policy_link_id"
+ return "frontend_endpoint/[0]/web_application_firewall_policy_link_id"
def get_expected_value(self):
return ANY_VALUE
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py b/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py\n--- a/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py\n+++ b/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py\n@@ -12,7 +12,7 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def get_inspected_key(self):\n- return \"web_application_firewall_policy_link_id\"\n+ return \"frontend_endpoint/[0]/web_application_firewall_policy_link_id\"\n \n def get_expected_value(self):\n return ANY_VALUE\n", "issue": "Check Azure Front Door WAF enabled fails even when a WAF is correctly assigned\n**Describe the issue**\r\n[`CKV_AZURE_121`](https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py) fails despite a Web Application Firewall policy being correctly applied. \r\n\r\nWAF policies are applied by specifying a value for `web_application_firewall_policy_link_id` inside a `frontend_endpoint` block within the `azurerm_frontdoor` resource itself.\r\n\r\nThe [documentation](https://docs.bridgecrew.io/docs/ensure-that-azure-front-door-enables-waf) seems to expect that the `web_application_firewall_policy_link_id` attribute is defined in the resource block itself, rather than in a sub-block (`frontend_endpoint`).\r\n\r\n- [`azurerm_frontdoor` resource documentation reference](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/frontdoor#web_application_firewall_policy_link_id)\r\n\r\n**Examples**\r\n```terraform\r\nresource \"azurerm_frontdoor\" \"test\" {\r\n name = \"test-front-door\"\r\n resource_group_name = var.resource_group_name\r\n enforce_backend_pools_certificate_name_check = false\r\n tags = var.tags\r\n\r\n frontend_endpoint {\r\n name = \"DefaultFrontend\"\r\n host_name = \"test-front-door.azurefd.net\"\r\n web_application_firewall_policy_link_id = azurerm_frontdoor_firewall_policy.test.id\r\n }\r\n\r\n # ... \r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version: 2.0.930\r\n\r\n**Additional context**\r\n\n", "before_files": [{"content": "from checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass AzureFrontDoorEnablesWAF(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Azure Front Door enables WAF\"\n id = \"CKV_AZURE_121\"\n supported_resources = ['azurerm_frontdoor']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"web_application_firewall_policy_link_id\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = AzureFrontDoorEnablesWAF()\n", "path": "checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py"}]} | 1,122 | 168 |
gh_patches_debug_1917 | rasdani/github-patches | git_diff | freqtrade__freqtrade-2082 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plot_dataframe.py
## Step 1: Have you search for this issue before posting it?
Couldn't find similar issue, so starting a new issue.
## Step 2: Describe your environment
* Python Version: Python 3.6.8
* CCXT version: ccxt==1.18.992
* Branch: Master
* Last Commit ID: b8713a515e960f1ffadcf1c7ee62c4bee80b506c
## Step 3: Describe the problem:
Unable to plot my backtest results.
*Explain the problem you have encountered*
Executing the following command results in error.
Error
### Steps to reproduce:
`
Command: python3 scripts/plot_dataframe.py -s EMACrossHTF1h --export
EMACrossHTF1h_results.json -p BTC/USDT --datadir user_data/data/binance/
`
### Observed Results:
Error is thrown.
### Relevant code exceptions or logs:
`
File "scripts/plot_dataframe.py", line 113, in <module>
main(sys.argv[1:])
File "scripts/plot_dataframe.py", line 107, in main
plot_parse_args(sysargv)
File "scripts/plot_dataframe.py", line 58, in analyse_and_plot_pairs
plot_elements = init_plotscript(config)
File "/home/ubuntu/freqtrade/freqtrade/plot/plotting.py", line 57, in init_plotscript
trades = load_trades(config)
File "/home/ubuntu/freqtrade/freqtrade/data/btanalysis.py", line 113, in load_trades
return load_backtest_data(Path(config["exportfilename"]))
File "/home/ubuntu/freqtrade/freqtrade/data/btanalysis.py", line 33, in load_backtest_data
raise ValueError("File {filename} does not exist.")
ValueError: File {filename} does not exist.
`
</issue>
<code>
[start of freqtrade/data/btanalysis.py]
1 """
2 Helpers when analyzing backtest data
3 """
4 import logging
5 from pathlib import Path
6 from typing import Dict
7
8 import numpy as np
9 import pandas as pd
10 import pytz
11
12 from freqtrade import persistence
13 from freqtrade.misc import json_load
14 from freqtrade.persistence import Trade
15
16 logger = logging.getLogger(__name__)
17
18 # must align with columns in backtest.py
19 BT_DATA_COLUMNS = ["pair", "profitperc", "open_time", "close_time", "index", "duration",
20 "open_rate", "close_rate", "open_at_end", "sell_reason"]
21
22
23 def load_backtest_data(filename) -> pd.DataFrame:
24 """
25 Load backtest data file.
26 :param filename: pathlib.Path object, or string pointing to the file.
27 :return: a dataframe with the analysis results
28 """
29 if isinstance(filename, str):
30 filename = Path(filename)
31
32 if not filename.is_file():
33 raise ValueError("File {filename} does not exist.")
34
35 with filename.open() as file:
36 data = json_load(file)
37
38 df = pd.DataFrame(data, columns=BT_DATA_COLUMNS)
39
40 df['open_time'] = pd.to_datetime(df['open_time'],
41 unit='s',
42 utc=True,
43 infer_datetime_format=True
44 )
45 df['close_time'] = pd.to_datetime(df['close_time'],
46 unit='s',
47 utc=True,
48 infer_datetime_format=True
49 )
50 df['profitabs'] = df['close_rate'] - df['open_rate']
51 df = df.sort_values("open_time").reset_index(drop=True)
52 return df
53
54
55 def evaluate_result_multi(results: pd.DataFrame, freq: str, max_open_trades: int) -> pd.DataFrame:
56 """
57 Find overlapping trades by expanding each trade once per period it was open
58 and then counting overlaps
59 :param results: Results Dataframe - can be loaded
60 :param freq: Frequency used for the backtest
61 :param max_open_trades: parameter max_open_trades used during backtest run
62 :return: dataframe with open-counts per time-period in freq
63 """
64 dates = [pd.Series(pd.date_range(row[1].open_time, row[1].close_time, freq=freq))
65 for row in results[['open_time', 'close_time']].iterrows()]
66 deltas = [len(x) for x in dates]
67 dates = pd.Series(pd.concat(dates).values, name='date')
68 df2 = pd.DataFrame(np.repeat(results.values, deltas, axis=0), columns=results.columns)
69
70 df2 = pd.concat([dates, df2], axis=1)
71 df2 = df2.set_index('date')
72 df_final = df2.resample(freq)[['pair']].count()
73 return df_final[df_final['pair'] > max_open_trades]
74
75
76 def load_trades_from_db(db_url: str) -> pd.DataFrame:
77 """
78 Load trades from a DB (using dburl)
79 :param db_url: Sqlite url (default format sqlite:///tradesv3.dry-run.sqlite)
80 :return: Dataframe containing Trades
81 """
82 trades: pd.DataFrame = pd.DataFrame([], columns=BT_DATA_COLUMNS)
83 persistence.init(db_url, clean_open_orders=False)
84 columns = ["pair", "profit", "open_time", "close_time",
85 "open_rate", "close_rate", "duration", "sell_reason",
86 "max_rate", "min_rate"]
87
88 trades = pd.DataFrame([(t.pair, t.calc_profit(),
89 t.open_date.replace(tzinfo=pytz.UTC),
90 t.close_date.replace(tzinfo=pytz.UTC) if t.close_date else None,
91 t.open_rate, t.close_rate,
92 t.close_date.timestamp() - t.open_date.timestamp()
93 if t.close_date else None,
94 t.sell_reason,
95 t.max_rate,
96 t.min_rate,
97 )
98 for t in Trade.query.all()],
99 columns=columns)
100
101 return trades
102
103
104 def load_trades(config) -> pd.DataFrame:
105 """
106 Based on configuration option "trade_source":
107 * loads data from DB (using `db_url`)
108 * loads data from backtestfile (using `exportfilename`)
109 """
110 if config["trade_source"] == "DB":
111 return load_trades_from_db(config["db_url"])
112 elif config["trade_source"] == "file":
113 return load_backtest_data(Path(config["exportfilename"]))
114
115
116 def extract_trades_of_period(dataframe: pd.DataFrame, trades: pd.DataFrame) -> pd.DataFrame:
117 """
118 Compare trades and backtested pair DataFrames to get trades performed on backtested period
119 :return: the DataFrame of a trades of period
120 """
121 trades = trades.loc[(trades['open_time'] >= dataframe.iloc[0]['date']) &
122 (trades['close_time'] <= dataframe.iloc[-1]['date'])]
123 return trades
124
125
126 def combine_tickers_with_mean(tickers: Dict[str, pd.DataFrame], column: str = "close"):
127 """
128 Combine multiple dataframes "column"
129 :param tickers: Dict of Dataframes, dict key should be pair.
130 :param column: Column in the original dataframes to use
131 :return: DataFrame with the column renamed to the dict key, and a column
132 named mean, containing the mean of all pairs.
133 """
134 df_comb = pd.concat([tickers[pair].set_index('date').rename(
135 {column: pair}, axis=1)[pair] for pair in tickers], axis=1)
136
137 df_comb['mean'] = df_comb.mean(axis=1)
138
139 return df_comb
140
141
142 def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str) -> pd.DataFrame:
143 """
144 Adds a column `col_name` with the cumulative profit for the given trades array.
145 :param df: DataFrame with date index
146 :param trades: DataFrame containing trades (requires columns close_time and profitperc)
147 :return: Returns df with one additional column, col_name, containing the cumulative profit.
148 """
149 df[col_name] = trades.set_index('close_time')['profitperc'].cumsum()
150 # Set first value to 0
151 df.loc[df.iloc[0].name, col_name] = 0
152 # FFill to get continuous
153 df[col_name] = df[col_name].ffill()
154 return df
155
[end of freqtrade/data/btanalysis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/freqtrade/data/btanalysis.py b/freqtrade/data/btanalysis.py
--- a/freqtrade/data/btanalysis.py
+++ b/freqtrade/data/btanalysis.py
@@ -30,7 +30,7 @@
filename = Path(filename)
if not filename.is_file():
- raise ValueError("File {filename} does not exist.")
+ raise ValueError(f"File {filename} does not exist.")
with filename.open() as file:
data = json_load(file)
| {"golden_diff": "diff --git a/freqtrade/data/btanalysis.py b/freqtrade/data/btanalysis.py\n--- a/freqtrade/data/btanalysis.py\n+++ b/freqtrade/data/btanalysis.py\n@@ -30,7 +30,7 @@\n filename = Path(filename)\n \n if not filename.is_file():\n- raise ValueError(\"File {filename} does not exist.\")\n+ raise ValueError(f\"File {filename} does not exist.\")\n \n with filename.open() as file:\n data = json_load(file)\n", "issue": "plot_dataframe.py\n## Step 1: Have you search for this issue before posting it?\r\n\r\nCouldn't find similar issue, so starting a new issue.\r\n\r\n## Step 2: Describe your environment\r\n\r\n * Python Version: Python 3.6.8\r\n * CCXT version: ccxt==1.18.992\r\n * Branch: Master \r\n * Last Commit ID: b8713a515e960f1ffadcf1c7ee62c4bee80b506c\r\n \r\n## Step 3: Describe the problem:\r\nUnable to plot my backtest results.\r\n\r\n*Explain the problem you have encountered*\r\nExecuting the following command results in error.\r\nError\r\n### Steps to reproduce:\r\n\r\n`\r\n Command: python3 scripts/plot_dataframe.py -s EMACrossHTF1h --export \r\n EMACrossHTF1h_results.json -p BTC/USDT --datadir user_data/data/binance/\r\n`\r\n \r\n### Observed Results:\r\n\r\nError is thrown. \r\n\r\n### Relevant code exceptions or logs:\r\n`\r\n File \"scripts/plot_dataframe.py\", line 113, in <module>\r\n main(sys.argv[1:])\r\n File \"scripts/plot_dataframe.py\", line 107, in main\r\n plot_parse_args(sysargv)\r\n File \"scripts/plot_dataframe.py\", line 58, in analyse_and_plot_pairs\r\n plot_elements = init_plotscript(config)\r\n File \"/home/ubuntu/freqtrade/freqtrade/plot/plotting.py\", line 57, in init_plotscript\r\n trades = load_trades(config)\r\n File \"/home/ubuntu/freqtrade/freqtrade/data/btanalysis.py\", line 113, in load_trades\r\n return load_backtest_data(Path(config[\"exportfilename\"]))\r\n File \"/home/ubuntu/freqtrade/freqtrade/data/btanalysis.py\", line 33, in load_backtest_data\r\n raise ValueError(\"File {filename} does not exist.\")\r\n ValueError: File {filename} does not exist.\r\n`\r\n\n", "before_files": [{"content": "\"\"\"\nHelpers when analyzing backtest data\n\"\"\"\nimport logging\nfrom pathlib import Path\nfrom typing import Dict\n\nimport numpy as np\nimport pandas as pd\nimport pytz\n\nfrom freqtrade import persistence\nfrom freqtrade.misc import json_load\nfrom freqtrade.persistence import Trade\n\nlogger = logging.getLogger(__name__)\n\n# must align with columns in backtest.py\nBT_DATA_COLUMNS = [\"pair\", \"profitperc\", \"open_time\", \"close_time\", \"index\", \"duration\",\n \"open_rate\", \"close_rate\", \"open_at_end\", \"sell_reason\"]\n\n\ndef load_backtest_data(filename) -> pd.DataFrame:\n \"\"\"\n Load backtest data file.\n :param filename: pathlib.Path object, or string pointing to the file.\n :return: a dataframe with the analysis results\n \"\"\"\n if isinstance(filename, str):\n filename = Path(filename)\n\n if not filename.is_file():\n raise ValueError(\"File {filename} does not exist.\")\n\n with filename.open() as file:\n data = json_load(file)\n\n df = pd.DataFrame(data, columns=BT_DATA_COLUMNS)\n\n df['open_time'] = pd.to_datetime(df['open_time'],\n unit='s',\n utc=True,\n infer_datetime_format=True\n )\n df['close_time'] = pd.to_datetime(df['close_time'],\n unit='s',\n utc=True,\n infer_datetime_format=True\n )\n df['profitabs'] = df['close_rate'] - df['open_rate']\n df = df.sort_values(\"open_time\").reset_index(drop=True)\n return df\n\n\ndef evaluate_result_multi(results: pd.DataFrame, freq: str, max_open_trades: int) -> pd.DataFrame:\n \"\"\"\n Find overlapping trades by expanding each trade once per period it was open\n and then counting overlaps\n :param results: Results Dataframe - can be loaded\n :param freq: Frequency used for the backtest\n :param max_open_trades: parameter max_open_trades used during backtest run\n :return: dataframe with open-counts per time-period in freq\n \"\"\"\n dates = [pd.Series(pd.date_range(row[1].open_time, row[1].close_time, freq=freq))\n for row in results[['open_time', 'close_time']].iterrows()]\n deltas = [len(x) for x in dates]\n dates = pd.Series(pd.concat(dates).values, name='date')\n df2 = pd.DataFrame(np.repeat(results.values, deltas, axis=0), columns=results.columns)\n\n df2 = pd.concat([dates, df2], axis=1)\n df2 = df2.set_index('date')\n df_final = df2.resample(freq)[['pair']].count()\n return df_final[df_final['pair'] > max_open_trades]\n\n\ndef load_trades_from_db(db_url: str) -> pd.DataFrame:\n \"\"\"\n Load trades from a DB (using dburl)\n :param db_url: Sqlite url (default format sqlite:///tradesv3.dry-run.sqlite)\n :return: Dataframe containing Trades\n \"\"\"\n trades: pd.DataFrame = pd.DataFrame([], columns=BT_DATA_COLUMNS)\n persistence.init(db_url, clean_open_orders=False)\n columns = [\"pair\", \"profit\", \"open_time\", \"close_time\",\n \"open_rate\", \"close_rate\", \"duration\", \"sell_reason\",\n \"max_rate\", \"min_rate\"]\n\n trades = pd.DataFrame([(t.pair, t.calc_profit(),\n t.open_date.replace(tzinfo=pytz.UTC),\n t.close_date.replace(tzinfo=pytz.UTC) if t.close_date else None,\n t.open_rate, t.close_rate,\n t.close_date.timestamp() - t.open_date.timestamp()\n if t.close_date else None,\n t.sell_reason,\n t.max_rate,\n t.min_rate,\n )\n for t in Trade.query.all()],\n columns=columns)\n\n return trades\n\n\ndef load_trades(config) -> pd.DataFrame:\n \"\"\"\n Based on configuration option \"trade_source\":\n * loads data from DB (using `db_url`)\n * loads data from backtestfile (using `exportfilename`)\n \"\"\"\n if config[\"trade_source\"] == \"DB\":\n return load_trades_from_db(config[\"db_url\"])\n elif config[\"trade_source\"] == \"file\":\n return load_backtest_data(Path(config[\"exportfilename\"]))\n\n\ndef extract_trades_of_period(dataframe: pd.DataFrame, trades: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Compare trades and backtested pair DataFrames to get trades performed on backtested period\n :return: the DataFrame of a trades of period\n \"\"\"\n trades = trades.loc[(trades['open_time'] >= dataframe.iloc[0]['date']) &\n (trades['close_time'] <= dataframe.iloc[-1]['date'])]\n return trades\n\n\ndef combine_tickers_with_mean(tickers: Dict[str, pd.DataFrame], column: str = \"close\"):\n \"\"\"\n Combine multiple dataframes \"column\"\n :param tickers: Dict of Dataframes, dict key should be pair.\n :param column: Column in the original dataframes to use\n :return: DataFrame with the column renamed to the dict key, and a column\n named mean, containing the mean of all pairs.\n \"\"\"\n df_comb = pd.concat([tickers[pair].set_index('date').rename(\n {column: pair}, axis=1)[pair] for pair in tickers], axis=1)\n\n df_comb['mean'] = df_comb.mean(axis=1)\n\n return df_comb\n\n\ndef create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str) -> pd.DataFrame:\n \"\"\"\n Adds a column `col_name` with the cumulative profit for the given trades array.\n :param df: DataFrame with date index\n :param trades: DataFrame containing trades (requires columns close_time and profitperc)\n :return: Returns df with one additional column, col_name, containing the cumulative profit.\n \"\"\"\n df[col_name] = trades.set_index('close_time')['profitperc'].cumsum()\n # Set first value to 0\n df.loc[df.iloc[0].name, col_name] = 0\n # FFill to get continuous\n df[col_name] = df[col_name].ffill()\n return df\n", "path": "freqtrade/data/btanalysis.py"}]} | 2,695 | 113 |
gh_patches_debug_23532 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-29 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Configure flake8 & GitHub Action correctly
Our flake8 setup has a couple of issues:
- Failures on the GitHub Action don't actually block merge.
- We need to set up our style guide for flake8.
</issue>
<code>
[start of mathesar/forms/widgets.py]
1 from django.forms.widgets import TextInput
2
3 class DataListInput(TextInput):
4 """
5 Widget that adds a <data_list> element to the standard text input widget.
6 See TextInput for further details.
7
8 Attributes:
9 data_list: List of strings, where each string is a data_list value, or
10 a callable that returns a list of the same form
11 data_list_id: ID of the data_list, generated when render() is called.
12 Of the form [widget_id | widget_name]_data_list
13 """
14 template_name = "mathesar/widgets/data_list.html"
15
16 def __init__(self, data_list, attrs=None):
17 super().__init__(attrs=attrs)
18 self.data_list = data_list
19 self.data_list_id = "_data_list"
20
21 def get_context(self, name, value, attrs):
22 context = super().get_context(name, value, attrs)
23 if callable(self.data_list):
24 context["widget"]["data_list"] = self.data_list()
25 else:
26 context["widget"]["data_list"] = self.data_list
27 context["widget"]["data_list_id"] = self.data_list_id
28 return context
29
30 def render(self, name, value, attrs=None, renderer=None):
31 # In practice, there should always be an ID attribute, but we fallback
32 # to using widget name if ID is missing
33 if attrs and "id" in attrs:
34 self.data_list_id = attrs["id"] + "_data_list"
35 else:
36 self.data_list_id = name + "_data_list"
37 attrs = {} if attrs is None else attrs
38 attrs["list"] = self.data_list_id
39 return super().render(name, value, attrs, renderer)
40
41
[end of mathesar/forms/widgets.py]
[start of mathesar/forms/forms.py]
1 from django import forms
2 from django.core.exceptions import ValidationError
3
4 from mathesar.database.schemas import get_all_schemas
5 from mathesar.forms.widgets import DataListInput
6
7 def validate_csv(value):
8 if not value.name.lower().endswith(".csv"):
9 raise ValidationError(f"{value.name} is not a CSV file")
10
11 class UploadFileForm(forms.Form):
12 collection_name = forms.CharField(min_length=1, label="Collection Name")
13
14 application_name = forms.CharField(
15 min_length=1, label="Application Name",
16 widget=DataListInput(get_all_schemas)
17 )
18
19 file = forms.FileField(validators=[validate_csv], label="CSV File")
20
[end of mathesar/forms/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/forms/forms.py b/mathesar/forms/forms.py
--- a/mathesar/forms/forms.py
+++ b/mathesar/forms/forms.py
@@ -4,10 +4,12 @@
from mathesar.database.schemas import get_all_schemas
from mathesar.forms.widgets import DataListInput
+
def validate_csv(value):
if not value.name.lower().endswith(".csv"):
raise ValidationError(f"{value.name} is not a CSV file")
+
class UploadFileForm(forms.Form):
collection_name = forms.CharField(min_length=1, label="Collection Name")
diff --git a/mathesar/forms/widgets.py b/mathesar/forms/widgets.py
--- a/mathesar/forms/widgets.py
+++ b/mathesar/forms/widgets.py
@@ -1,5 +1,6 @@
from django.forms.widgets import TextInput
+
class DataListInput(TextInput):
"""
Widget that adds a <data_list> element to the standard text input widget.
@@ -37,4 +38,3 @@
attrs = {} if attrs is None else attrs
attrs["list"] = self.data_list_id
return super().render(name, value, attrs, renderer)
-
| {"golden_diff": "diff --git a/mathesar/forms/forms.py b/mathesar/forms/forms.py\n--- a/mathesar/forms/forms.py\n+++ b/mathesar/forms/forms.py\n@@ -4,10 +4,12 @@\n from mathesar.database.schemas import get_all_schemas\n from mathesar.forms.widgets import DataListInput\n \n+\n def validate_csv(value):\n if not value.name.lower().endswith(\".csv\"):\n raise ValidationError(f\"{value.name} is not a CSV file\")\n \n+\n class UploadFileForm(forms.Form):\n collection_name = forms.CharField(min_length=1, label=\"Collection Name\")\n \ndiff --git a/mathesar/forms/widgets.py b/mathesar/forms/widgets.py\n--- a/mathesar/forms/widgets.py\n+++ b/mathesar/forms/widgets.py\n@@ -1,5 +1,6 @@\n from django.forms.widgets import TextInput\n \n+\n class DataListInput(TextInput):\n \"\"\"\n Widget that adds a <data_list> element to the standard text input widget.\n@@ -37,4 +38,3 @@\n attrs = {} if attrs is None else attrs\n attrs[\"list\"] = self.data_list_id\n return super().render(name, value, attrs, renderer)\n-\n", "issue": "Configure flake8 & GitHub Action correctly\nOur flake8 setup has a couple of issues:\r\n- Failures on the GitHub Action don't actually block merge.\r\n- We need to set up our style guide for flake8.\n", "before_files": [{"content": "from django.forms.widgets import TextInput\n\nclass DataListInput(TextInput):\n \"\"\"\n Widget that adds a <data_list> element to the standard text input widget.\n See TextInput for further details.\n\n Attributes:\n data_list: List of strings, where each string is a data_list value, or\n a callable that returns a list of the same form\n data_list_id: ID of the data_list, generated when render() is called.\n Of the form [widget_id | widget_name]_data_list\n \"\"\"\n template_name = \"mathesar/widgets/data_list.html\"\n\n def __init__(self, data_list, attrs=None):\n super().__init__(attrs=attrs)\n self.data_list = data_list\n self.data_list_id = \"_data_list\"\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n if callable(self.data_list):\n context[\"widget\"][\"data_list\"] = self.data_list()\n else:\n context[\"widget\"][\"data_list\"] = self.data_list\n context[\"widget\"][\"data_list_id\"] = self.data_list_id\n return context\n\n def render(self, name, value, attrs=None, renderer=None):\n # In practice, there should always be an ID attribute, but we fallback\n # to using widget name if ID is missing\n if attrs and \"id\" in attrs:\n self.data_list_id = attrs[\"id\"] + \"_data_list\"\n else:\n self.data_list_id = name + \"_data_list\"\n attrs = {} if attrs is None else attrs\n attrs[\"list\"] = self.data_list_id\n return super().render(name, value, attrs, renderer)\n\n", "path": "mathesar/forms/widgets.py"}, {"content": "from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom mathesar.database.schemas import get_all_schemas\nfrom mathesar.forms.widgets import DataListInput\n\ndef validate_csv(value):\n if not value.name.lower().endswith(\".csv\"):\n raise ValidationError(f\"{value.name} is not a CSV file\")\n\nclass UploadFileForm(forms.Form):\n collection_name = forms.CharField(min_length=1, label=\"Collection Name\")\n\n application_name = forms.CharField(\n min_length=1, label=\"Application Name\",\n widget=DataListInput(get_all_schemas)\n )\n\n file = forms.FileField(validators=[validate_csv], label=\"CSV File\")\n", "path": "mathesar/forms/forms.py"}]} | 1,209 | 248 |
gh_patches_debug_6566 | rasdani/github-patches | git_diff | GPflow__GPflow-175 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
error while importing GPflow
I can not import GPflow. I instelled it by `python setup.py develop` on virtualenv. Tests are also failing to run.
### Import error
```
In [2]: import GPflow
---------------------------------------------------------------------------
NotFoundError Traceback (most recent call last)
<ipython-input-2-d5391a053bbd> in <module>()
----> 1 import GPflow
/home/me/<...>/GPflow/GPflow/__init__.py in <module>()
15
16 # flake8: noqa
---> 17 from . import likelihoods, kernels, param, model, gpmc, sgpmc, priors, gpr, svgp, vgp, sgpr
18 from ._version import __version__
/home/me/<...>/GPflow/GPflow/likelihoods.py in <module>()
17 import tensorflow as tf
18 import numpy as np
---> 19 from .param import Parameterized, Param
20 from . import transforms
21 hermgauss = np.polynomial.hermite.hermgauss
/home/me/<...>/GPflow/GPflow/param.py in <module>()
17 import pandas as pd
18 import tensorflow as tf
---> 19 from . import transforms
20 from contextlib import contextmanager
21 from functools import wraps
/home/me/<...>/GPflow/GPflow/transforms.py in <module>()
16 import numpy as np
17 import tensorflow as tf
---> 18 import GPflow.tf_hacks as tfh
19
20
/home/me/<...>/GPflow/GPflow/tf_hacks.py in <module>()
28
29
---> 30 _custom_op_module = tf.load_op_library(os.path.join(os.path.dirname(__file__), 'tfops', 'matpackops.so'))
31 vec_to_tri = _custom_op_module.vec_to_tri
32 tri_to_vec = _custom_op_module.tri_to_vec
/home/me/.virtualenvs/tf_0_10/lib/python2.7/site-packages/tensorflow/python/framework/load_library.pyc in load_op_library(library_filename)
73 return _OP_LIBRARY_MAP[library_filename]
74 # pylint: disable=protected-access
---> 75 raise errors._make_specific_exception(None, None, error_msg, error_code)
76 # pylint: enable=protected-access
77 finally:
NotFoundError: GPflow/tfops/matpackops.so: undefined symbol: _ZN10tensorflow7strings6StrCatB5cxx11ERKNS0_8AlphaNumE
```
### Test error
```
running test
running egg_info
writing requirements to GPflow.egg-info/requires.txt
writing GPflow.egg-info/PKG-INFO
writing top-level names to GPflow.egg-info/top_level.txt
writing dependency_links to GPflow.egg-info/dependency_links.txt
reading manifest file 'GPflow.egg-info/SOURCES.txt'
writing manifest file 'GPflow.egg-info/SOURCES.txt'
running build_ext
Traceback (most recent call last):
File "setup.py", line 50, in <module>
'Topic :: Scientific/Engineering :: Artificial Intelligence']
File "/usr/lib64/python2.7/distutils/core.py", line 151, in setup
dist.run_commands()
File "/usr/lib64/python2.7/distutils/dist.py", line 953, in run_commands
self.run_command(cmd)
File "/usr/lib64/python2.7/distutils/dist.py", line 972, in run_command
cmd_obj.run()
File "/home/me/.virtualenvs/tf_0_10/lib/python2.7/site-packages/setuptools/command/test.py", line 172, in run
self.run_tests()
File "/home/me/.virtualenvs/tf_0_10/lib/python2.7/site-packages/setuptools/command/test.py", line 193, in run_tests
testRunner=self._resolve_as_ep(self.test_runner),
File "/usr/lib64/python2.7/unittest/main.py", line 94, in __init__
self.parseArgs(argv)
File "/usr/lib64/python2.7/unittest/main.py", line 149, in parseArgs
self.createTests()
File "/usr/lib64/python2.7/unittest/main.py", line 158, in createTests
self.module)
File "/usr/lib64/python2.7/unittest/loader.py", line 130, in loadTestsFromNames
suites = [self.loadTestsFromName(name, module) for name in names]
File "/usr/lib64/python2.7/unittest/loader.py", line 103, in loadTestsFromName
return self.loadTestsFromModule(obj)
File "/home/me/.virtualenvs/tf_0_10/lib/python2.7/site-packages/setuptools/command/test.py", line 40, in loadTestsFromModule
tests.append(self.loadTestsFromName(submodule))
File "/usr/lib64/python2.7/unittest/loader.py", line 100, in loadTestsFromName
parent, obj = obj, getattr(obj, part)
AttributeError: 'module' object has no attribute 'test_variational'
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from __future__ import print_function
4 from setuptools import setup
5 import re
6 import os
7 import sys
8
9 # load version form _version.py
10 VERSIONFILE = "GPflow/_version.py"
11 verstrline = open(VERSIONFILE, "rt").read()
12 VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
13 mo = re.search(VSRE, verstrline, re.M)
14 if mo:
15 verstr = mo.group(1)
16 else:
17 raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
18
19 # Compile the bespoke TensorFlow ops in-place. Not sure how this would work if this script wasn't executed as `develop`.
20 compile_command = "g++ -std=c++11 -shared ./GPflow/tfops/vec_to_tri.cc " \
21 "GPflow/tfops/tri_to_vec.cc -o GPflow/tfops/matpackops.so " \
22 "-fPIC -I $(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())')"
23 if sys.platform == "darwin":
24 # Additional command for Macs, as instructed by the TensorFlow docs
25 compile_command += " -undefined dynamic_lookup"
26 os.system(compile_command)
27
28 setup(name='GPflow',
29 version=verstr,
30 author="James Hensman, Alex Matthews",
31 author_email="[email protected]",
32 description=("Gaussian process methods in tensorflow"),
33 license="BSD 3-clause",
34 keywords="machine-learning gaussian-processes kernels tensorflow",
35 url="http://github.com/gpflow/gpflow",
36 package_data={'GPflow': ['GPflow/tfops/*.so']},
37 include_package_data=True,
38 ext_modules=[],
39 packages=["GPflow"],
40 package_dir={'GPflow': 'GPflow'},
41 py_modules=['GPflow.__init__'],
42 test_suite='testing',
43 install_requires=['numpy>=1.9', 'scipy>=0.16', 'tensorflow>=0.10.0rc0'],
44 classifiers=['License :: OSI Approved :: BSD License',
45 'Natural Language :: English',
46 'Operating System :: MacOS :: MacOS X',
47 'Operating System :: Microsoft :: Windows',
48 'Operating System :: POSIX :: Linux',
49 'Programming Language :: Python :: 2.7',
50 'Topic :: Scientific/Engineering :: Artificial Intelligence']
51 )
52
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,6 +23,10 @@
if sys.platform == "darwin":
# Additional command for Macs, as instructed by the TensorFlow docs
compile_command += " -undefined dynamic_lookup"
+elif sys.platform.startswith("linux"):
+ gcc_version = int(re.search('\d+.', os.popen("gcc --version").read()).group()[0])
+ if gcc_version == 5:
+ compile_command += " -D_GLIBCXX_USE_CXX11_ABI=0"
os.system(compile_command)
setup(name='GPflow',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,6 +23,10 @@\n if sys.platform == \"darwin\":\n # Additional command for Macs, as instructed by the TensorFlow docs\n compile_command += \" -undefined dynamic_lookup\"\n+elif sys.platform.startswith(\"linux\"):\n+ gcc_version = int(re.search('\\d+.', os.popen(\"gcc --version\").read()).group()[0])\n+ if gcc_version == 5:\n+ compile_command += \" -D_GLIBCXX_USE_CXX11_ABI=0\"\n os.system(compile_command)\n \n setup(name='GPflow',\n", "issue": "error while importing GPflow \nI can not import GPflow. I instelled it by `python setup.py develop` on virtualenv. Tests are also failing to run.\n### Import error\n\n```\nIn [2]: import GPflow\n---------------------------------------------------------------------------\nNotFoundError Traceback (most recent call last)\n<ipython-input-2-d5391a053bbd> in <module>()\n----> 1 import GPflow\n\n/home/me/<...>/GPflow/GPflow/__init__.py in <module>()\n 15 \n 16 # flake8: noqa\n---> 17 from . import likelihoods, kernels, param, model, gpmc, sgpmc, priors, gpr, svgp, vgp, sgpr\n 18 from ._version import __version__\n\n/home/me/<...>/GPflow/GPflow/likelihoods.py in <module>()\n 17 import tensorflow as tf\n 18 import numpy as np\n---> 19 from .param import Parameterized, Param\n 20 from . import transforms\n 21 hermgauss = np.polynomial.hermite.hermgauss\n\n/home/me/<...>/GPflow/GPflow/param.py in <module>()\n 17 import pandas as pd\n 18 import tensorflow as tf\n---> 19 from . import transforms\n 20 from contextlib import contextmanager\n 21 from functools import wraps\n\n/home/me/<...>/GPflow/GPflow/transforms.py in <module>()\n 16 import numpy as np\n 17 import tensorflow as tf\n---> 18 import GPflow.tf_hacks as tfh\n 19 \n 20 \n\n/home/me/<...>/GPflow/GPflow/tf_hacks.py in <module>()\n 28 \n 29 \n---> 30 _custom_op_module = tf.load_op_library(os.path.join(os.path.dirname(__file__), 'tfops', 'matpackops.so'))\n 31 vec_to_tri = _custom_op_module.vec_to_tri\n 32 tri_to_vec = _custom_op_module.tri_to_vec\n\n/home/me/.virtualenvs/tf_0_10/lib/python2.7/site-packages/tensorflow/python/framework/load_library.pyc in load_op_library(library_filename)\n 73 return _OP_LIBRARY_MAP[library_filename]\n 74 # pylint: disable=protected-access\n---> 75 raise errors._make_specific_exception(None, None, error_msg, error_code)\n 76 # pylint: enable=protected-access\n 77 finally:\n\nNotFoundError: GPflow/tfops/matpackops.so: undefined symbol: _ZN10tensorflow7strings6StrCatB5cxx11ERKNS0_8AlphaNumE\n\n```\n### Test error\n\n```\nrunning test\nrunning egg_info\nwriting requirements to GPflow.egg-info/requires.txt\nwriting GPflow.egg-info/PKG-INFO\nwriting top-level names to GPflow.egg-info/top_level.txt\nwriting dependency_links to GPflow.egg-info/dependency_links.txt\nreading manifest file 'GPflow.egg-info/SOURCES.txt'\nwriting manifest file 'GPflow.egg-info/SOURCES.txt'\nrunning build_ext\nTraceback (most recent call last):\n File \"setup.py\", line 50, in <module>\n 'Topic :: Scientific/Engineering :: Artificial Intelligence']\n File \"/usr/lib64/python2.7/distutils/core.py\", line 151, in setup\n dist.run_commands()\n File \"/usr/lib64/python2.7/distutils/dist.py\", line 953, in run_commands\n self.run_command(cmd)\n File \"/usr/lib64/python2.7/distutils/dist.py\", line 972, in run_command\n cmd_obj.run()\n File \"/home/me/.virtualenvs/tf_0_10/lib/python2.7/site-packages/setuptools/command/test.py\", line 172, in run\n self.run_tests()\n File \"/home/me/.virtualenvs/tf_0_10/lib/python2.7/site-packages/setuptools/command/test.py\", line 193, in run_tests\n testRunner=self._resolve_as_ep(self.test_runner),\n File \"/usr/lib64/python2.7/unittest/main.py\", line 94, in __init__\n self.parseArgs(argv)\n File \"/usr/lib64/python2.7/unittest/main.py\", line 149, in parseArgs\n self.createTests()\n File \"/usr/lib64/python2.7/unittest/main.py\", line 158, in createTests\n self.module)\n File \"/usr/lib64/python2.7/unittest/loader.py\", line 130, in loadTestsFromNames\n suites = [self.loadTestsFromName(name, module) for name in names]\n File \"/usr/lib64/python2.7/unittest/loader.py\", line 103, in loadTestsFromName\n return self.loadTestsFromModule(obj)\n File \"/home/me/.virtualenvs/tf_0_10/lib/python2.7/site-packages/setuptools/command/test.py\", line 40, in loadTestsFromModule\n tests.append(self.loadTestsFromName(submodule))\n File \"/usr/lib64/python2.7/unittest/loader.py\", line 100, in loadTestsFromName\n parent, obj = obj, getattr(obj, part)\nAttributeError: 'module' object has no attribute 'test_variational'\n\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n# load version form _version.py\nVERSIONFILE = \"GPflow/_version.py\"\nverstrline = open(VERSIONFILE, \"rt\").read()\nVSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\nmo = re.search(VSRE, verstrline, re.M)\nif mo:\n verstr = mo.group(1)\nelse:\n raise RuntimeError(\"Unable to find version string in %s.\" % (VERSIONFILE,))\n\n# Compile the bespoke TensorFlow ops in-place. Not sure how this would work if this script wasn't executed as `develop`.\ncompile_command = \"g++ -std=c++11 -shared ./GPflow/tfops/vec_to_tri.cc \" \\\n \"GPflow/tfops/tri_to_vec.cc -o GPflow/tfops/matpackops.so \" \\\n \"-fPIC -I $(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())')\"\nif sys.platform == \"darwin\":\n # Additional command for Macs, as instructed by the TensorFlow docs\n compile_command += \" -undefined dynamic_lookup\"\nos.system(compile_command)\n\nsetup(name='GPflow',\n version=verstr,\n author=\"James Hensman, Alex Matthews\",\n author_email=\"[email protected]\",\n description=(\"Gaussian process methods in tensorflow\"),\n license=\"BSD 3-clause\",\n keywords=\"machine-learning gaussian-processes kernels tensorflow\",\n url=\"http://github.com/gpflow/gpflow\",\n package_data={'GPflow': ['GPflow/tfops/*.so']},\n include_package_data=True,\n ext_modules=[],\n packages=[\"GPflow\"],\n package_dir={'GPflow': 'GPflow'},\n py_modules=['GPflow.__init__'],\n test_suite='testing',\n install_requires=['numpy>=1.9', 'scipy>=0.16', 'tensorflow>=0.10.0rc0'],\n classifiers=['License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence']\n )\n", "path": "setup.py"}]} | 2,363 | 142 |
gh_patches_debug_22492 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-1153 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[INF] Set multi-test env to compat dependency and system
<!-- Thank you for your PR!
BEFORE YOU CONTINUE! Please add the appropriate three-letter abbreviation to your title.
The abbreviations can be:
- [DOC]: Documentation fixes.
- [ENH]: Code contributions and new features.
- [TST]: Test-related contributions.
- [INF]: Infrastructure-related contributions.
Also, do not forget to tag the relevant issue here as well.
Finally, as commits come in, don't forget to regularly rebase!
-->
# PR Description
Please describe the changes proposed in the pull request:
Aim:
- Set multi-test environment
- Compat different dependencies and systems
ToDo/Doing:
- [x] Set latest env: test pyjanitor work with the latest dependencies to get the minimal python version
- [ ] Set minimal env: get the minimal version of dependencies
Part of #1133
# PR Checklist
<!-- This checklist exists for newcomers who are not yet familiar with our requirements. If you are experienced with
the project, please feel free to delete this section. -->
Please ensure that you have done the following:
1. [x] PR in from a fork off your branch. Do not PR from `<your_username>`:`dev`, but rather from `<your_username>`:`<feature-branch_name>`.
<!-- Doing this helps us keep the commit history much cleaner than it would otherwise be. -->
2. [x] If you're not on the contributors list, add yourself to `AUTHORS.md`.
<!-- We'd like to acknowledge your contributions! -->
3. [x] Add a line to `CHANGELOG.md` under the latest version header (i.e. the one that is "on deck") describing the contribution.
- Do use some discretion here; if there are multiple PRs that are related, keep them in a single line.
# Automatic checks
There will be automatic checks run on the PR. These include:
- Building a preview of the docs on Netlify
- Automatically linting the code
- Making sure the code is documented
- Making sure that all tests are passed
- Making sure that code coverage doesn't go down.
# Relevant Reviewers
<!-- Finally, please tag relevant maintainers to review. -->
Please tag maintainers to review.
- @ericmjl
</issue>
<code>
[start of janitor/functions/encode_categorical.py]
1 import warnings
2 from enum import Enum
3 from typing import Hashable, Iterable, Union
4
5 import pandas_flavor as pf
6 import pandas as pd
7 from pandas.api.types import is_list_like
8
9 from janitor.utils import check, check_column, deprecated_alias
10
11
12 @pf.register_dataframe_method
13 @deprecated_alias(columns="column_names")
14 def encode_categorical(
15 df: pd.DataFrame,
16 column_names: Union[str, Iterable[str], Hashable] = None,
17 **kwargs,
18 ) -> pd.DataFrame:
19 """Encode the specified columns with Pandas' [category dtype][cat].
20
21 [cat]: http://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html
22
23 It is syntactic sugar around `pd.Categorical`.
24
25 This method does not mutate the original DataFrame.
26
27 Simply pass a string, or a sequence of column names to `column_names`;
28 alternatively, you can pass kwargs, where the keys are the column names
29 and the values can either be None, `sort`, `appearance`
30 or a 1-D array-like object.
31
32 - None: column is cast to an unordered categorical.
33 - `sort`: column is cast to an ordered categorical,
34 with the order defined by the sort-order of the categories.
35 - `appearance`: column is cast to an ordered categorical,
36 with the order defined by the order of appearance
37 in the original column.
38 - 1d-array-like object: column is cast to an ordered categorical,
39 with the categories and order as specified
40 in the input array.
41
42 `column_names` and `kwargs` parameters cannot be used at the same time.
43
44 Example: Using `column_names`
45
46 >>> import pandas as pd
47 >>> import janitor
48 >>> df = pd.DataFrame({
49 ... "foo": ["b", "b", "a", "c", "b"],
50 ... "bar": range(4, 9),
51 ... })
52 >>> df
53 foo bar
54 0 b 4
55 1 b 5
56 2 a 6
57 3 c 7
58 4 b 8
59 >>> df.dtypes
60 foo object
61 bar int64
62 dtype: object
63 >>> enc_df = df.encode_categorical(column_names="foo")
64 >>> enc_df.dtypes
65 foo category
66 bar int64
67 dtype: object
68 >>> enc_df["foo"].cat.categories
69 Index(['a', 'b', 'c'], dtype='object')
70 >>> enc_df["foo"].cat.ordered
71 False
72
73 Example: Using `kwargs` to specify an ordered categorical.
74
75 >>> import pandas as pd
76 >>> import janitor
77 >>> df = pd.DataFrame({
78 ... "foo": ["b", "b", "a", "c", "b"],
79 ... "bar": range(4, 9),
80 ... })
81 >>> df.dtypes
82 foo object
83 bar int64
84 dtype: object
85 >>> enc_df = df.encode_categorical(foo="appearance")
86 >>> enc_df.dtypes
87 foo category
88 bar int64
89 dtype: object
90 >>> enc_df["foo"].cat.categories
91 Index(['b', 'a', 'c'], dtype='object')
92 >>> enc_df["foo"].cat.ordered
93 True
94
95 :param df: A pandas DataFrame object.
96 :param column_names: A column name or an iterable (list or tuple)
97 of column names.
98 :param **kwargs: A mapping from column name to either `None`,
99 `'sort'` or `'appearance'`, or a 1-D array. This is useful
100 in creating categorical columns that are ordered, or
101 if the user needs to explicitly specify the categories.
102 :returns: A pandas DataFrame.
103 :raises ValueError: If both `column_names` and `kwargs` are provided.
104 """ # noqa: E501
105
106 if all((column_names, kwargs)):
107 raise ValueError(
108 "Only one of `column_names` or `kwargs` can be provided."
109 )
110 # column_names deal with only category dtype (unordered)
111 # kwargs takes care of scenarios where user wants an ordered category
112 # or user supplies specific categories to create the categorical
113 if column_names is not None:
114 check("column_names", column_names, [list, tuple, Hashable])
115 if isinstance(column_names, Hashable):
116 column_names = [column_names]
117 check_column(df, column_names)
118 dtypes = {col: "category" for col in column_names}
119 return df.astype(dtypes)
120
121 return _computations_as_categorical(df, **kwargs)
122
123
124 def _computations_as_categorical(df: pd.DataFrame, **kwargs) -> pd.DataFrame:
125 """
126 This function handles cases where
127 categorical columns are created with an order,
128 or specific values supplied for the categories.
129 It uses a kwarg, where the key is the column name,
130 and the value is either a string, or a 1D array.
131 The default for value is None and will return a categorical dtype
132 with no order and categories inferred from the column.
133 A DataFrame, with categorical columns, is returned.
134 """
135
136 categories_dict = _as_categorical_checks(df, **kwargs)
137
138 categories_dtypes = {}
139
140 for column_name, value in categories_dict.items():
141 if value is None:
142 cat_dtype = pd.CategoricalDtype()
143 elif isinstance(value, str):
144 if value == _CategoryOrder.SORT.value:
145 _, cat_dtype = df[column_name].factorize(sort=True)
146 else:
147 _, cat_dtype = df[column_name].factorize(sort=False)
148 if cat_dtype.empty:
149 raise ValueError(
150 "Kindly ensure there is at least "
151 f"one non-null value in {column_name}."
152 )
153 cat_dtype = pd.CategoricalDtype(categories=cat_dtype, ordered=True)
154
155 else: # 1-D array
156 cat_dtype = pd.CategoricalDtype(categories=value, ordered=True)
157
158 categories_dtypes[column_name] = cat_dtype
159
160 return df.astype(categories_dtypes)
161
162
163 def _as_categorical_checks(df: pd.DataFrame, **kwargs) -> dict:
164 """
165 This function raises errors if columns in `kwargs` are
166 absent from the dataframe's columns.
167 It also raises errors if the value in `kwargs`
168 is not a string (`'appearance'` or `'sort'`), or a 1D array.
169
170 This function is executed before proceeding to the computation phase.
171
172 If all checks pass, a dictionary of column names and value is returned.
173
174 :param df: The pandas DataFrame object.
175 :param **kwargs: A pairing of column name and value.
176 :returns: A dictionary.
177 :raises TypeError: If `value` is not a 1-D array, or a string.
178 :raises ValueError: If `value` is a 1-D array, and contains nulls,
179 or is non-unique.
180 """
181
182 check_column(df, kwargs)
183
184 categories_dict = {}
185
186 for column_name, value in kwargs.items():
187 # type check
188 if (value is not None) and not (
189 is_list_like(value) or isinstance(value, str)
190 ):
191 raise TypeError(f"{value} should be list-like or a string.")
192 if is_list_like(value):
193 if not hasattr(value, "shape"):
194 value = pd.Index([*value])
195
196 arr_ndim = value.ndim
197 if (arr_ndim != 1) or isinstance(value, pd.MultiIndex):
198 raise ValueError(
199 f"{value} is not a 1-D array. "
200 "Kindly provide a 1-D array-like object."
201 )
202
203 if not isinstance(value, (pd.Series, pd.Index)):
204 value = pd.Index(value)
205
206 if value.hasnans:
207 raise ValueError(
208 "Kindly ensure there are no nulls in the array provided."
209 )
210
211 if not value.is_unique:
212 raise ValueError(
213 "Kindly provide unique, "
214 "non-null values for the array provided."
215 )
216
217 if value.empty:
218 raise ValueError(
219 "Kindly ensure there is at least "
220 "one non-null value in the array provided."
221 )
222
223 # uniques, without nulls
224 uniques = df[column_name].factorize(sort=False)[-1]
225 if uniques.empty:
226 raise ValueError(
227 "Kindly ensure there is at least "
228 f"one non-null value in {column_name}."
229 )
230
231 missing = uniques.difference(value, sort=False)
232 if not missing.empty and (uniques.size > missing.size):
233 warnings.warn(
234 f"Values {tuple(missing)} are missing from "
235 f"the provided categories {value} "
236 f"for {column_name}; this may create nulls "
237 "in the new categorical column.",
238 UserWarning,
239 stacklevel=2,
240 )
241
242 elif uniques.equals(missing):
243 warnings.warn(
244 f"None of the values in {column_name} are in "
245 f"{value}; this might create nulls for all values "
246 f"in the new categorical column.",
247 UserWarning,
248 stacklevel=2,
249 )
250
251 elif isinstance(value, str):
252 category_order_types = {ent.value for ent in _CategoryOrder}
253 if value.lower() not in category_order_types:
254 raise ValueError(
255 "Argument should be one of 'appearance' or 'sort'."
256 )
257
258 categories_dict[column_name] = value
259
260 return categories_dict
261
262
263 class _CategoryOrder(Enum):
264 """
265 order types for encode_categorical.
266 """
267
268 SORT = "sort"
269 APPEARANCE = "appearance"
270
[end of janitor/functions/encode_categorical.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/janitor/functions/encode_categorical.py b/janitor/functions/encode_categorical.py
--- a/janitor/functions/encode_categorical.py
+++ b/janitor/functions/encode_categorical.py
@@ -2,8 +2,9 @@
from enum import Enum
from typing import Hashable, Iterable, Union
-import pandas_flavor as pf
+import numpy as np
import pandas as pd
+import pandas_flavor as pf
from pandas.api.types import is_list_like
from janitor.utils import check, check_column, deprecated_alias
@@ -191,10 +192,9 @@
raise TypeError(f"{value} should be list-like or a string.")
if is_list_like(value):
if not hasattr(value, "shape"):
- value = pd.Index([*value])
+ value = np.asarray(value)
- arr_ndim = value.ndim
- if (arr_ndim != 1) or isinstance(value, pd.MultiIndex):
+ if (value.ndim != 1) or isinstance(value, pd.MultiIndex):
raise ValueError(
f"{value} is not a 1-D array. "
"Kindly provide a 1-D array-like object."
| {"golden_diff": "diff --git a/janitor/functions/encode_categorical.py b/janitor/functions/encode_categorical.py\n--- a/janitor/functions/encode_categorical.py\n+++ b/janitor/functions/encode_categorical.py\n@@ -2,8 +2,9 @@\n from enum import Enum\n from typing import Hashable, Iterable, Union\n \n-import pandas_flavor as pf\n+import numpy as np\n import pandas as pd\n+import pandas_flavor as pf\n from pandas.api.types import is_list_like\n \n from janitor.utils import check, check_column, deprecated_alias\n@@ -191,10 +192,9 @@\n raise TypeError(f\"{value} should be list-like or a string.\")\n if is_list_like(value):\n if not hasattr(value, \"shape\"):\n- value = pd.Index([*value])\n+ value = np.asarray(value)\n \n- arr_ndim = value.ndim\n- if (arr_ndim != 1) or isinstance(value, pd.MultiIndex):\n+ if (value.ndim != 1) or isinstance(value, pd.MultiIndex):\n raise ValueError(\n f\"{value} is not a 1-D array. \"\n \"Kindly provide a 1-D array-like object.\"\n", "issue": "[INF] Set multi-test env to compat dependency and system\n<!-- Thank you for your PR!\r\n\r\nBEFORE YOU CONTINUE! Please add the appropriate three-letter abbreviation to your title.\r\n\r\nThe abbreviations can be:\r\n- [DOC]: Documentation fixes.\r\n- [ENH]: Code contributions and new features.\r\n- [TST]: Test-related contributions.\r\n- [INF]: Infrastructure-related contributions.\r\n\r\nAlso, do not forget to tag the relevant issue here as well.\r\n\r\nFinally, as commits come in, don't forget to regularly rebase!\r\n-->\r\n\r\n# PR Description\r\n\r\nPlease describe the changes proposed in the pull request:\r\n\r\nAim:\r\n- Set multi-test environment\r\n- Compat different dependencies and systems\r\n\r\nToDo/Doing:\r\n- [x] Set latest env: test pyjanitor work with the latest dependencies to get the minimal python version\r\n- [ ] Set minimal env: get the minimal version of dependencies\r\n\r\nPart of #1133\r\n\r\n# PR Checklist\r\n\r\n<!-- This checklist exists for newcomers who are not yet familiar with our requirements. If you are experienced with\r\nthe project, please feel free to delete this section. -->\r\n\r\nPlease ensure that you have done the following:\r\n\r\n1. [x] PR in from a fork off your branch. Do not PR from `<your_username>`:`dev`, but rather from `<your_username>`:`<feature-branch_name>`.\r\n<!-- Doing this helps us keep the commit history much cleaner than it would otherwise be. -->\r\n2. [x] If you're not on the contributors list, add yourself to `AUTHORS.md`.\r\n<!-- We'd like to acknowledge your contributions! -->\r\n3. [x] Add a line to `CHANGELOG.md` under the latest version header (i.e. the one that is \"on deck\") describing the contribution.\r\n - Do use some discretion here; if there are multiple PRs that are related, keep them in a single line.\r\n\r\n# Automatic checks\r\n\r\nThere will be automatic checks run on the PR. These include:\r\n\r\n- Building a preview of the docs on Netlify\r\n- Automatically linting the code\r\n- Making sure the code is documented\r\n- Making sure that all tests are passed\r\n- Making sure that code coverage doesn't go down.\r\n\r\n# Relevant Reviewers\r\n\r\n<!-- Finally, please tag relevant maintainers to review. -->\r\n\r\nPlease tag maintainers to review.\r\n\r\n- @ericmjl\r\n\n", "before_files": [{"content": "import warnings\nfrom enum import Enum\nfrom typing import Hashable, Iterable, Union\n\nimport pandas_flavor as pf\nimport pandas as pd\nfrom pandas.api.types import is_list_like\n\nfrom janitor.utils import check, check_column, deprecated_alias\n\n\[email protected]_dataframe_method\n@deprecated_alias(columns=\"column_names\")\ndef encode_categorical(\n df: pd.DataFrame,\n column_names: Union[str, Iterable[str], Hashable] = None,\n **kwargs,\n) -> pd.DataFrame:\n \"\"\"Encode the specified columns with Pandas' [category dtype][cat].\n\n [cat]: http://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html\n\n It is syntactic sugar around `pd.Categorical`.\n\n This method does not mutate the original DataFrame.\n\n Simply pass a string, or a sequence of column names to `column_names`;\n alternatively, you can pass kwargs, where the keys are the column names\n and the values can either be None, `sort`, `appearance`\n or a 1-D array-like object.\n\n - None: column is cast to an unordered categorical.\n - `sort`: column is cast to an ordered categorical,\n with the order defined by the sort-order of the categories.\n - `appearance`: column is cast to an ordered categorical,\n with the order defined by the order of appearance\n in the original column.\n - 1d-array-like object: column is cast to an ordered categorical,\n with the categories and order as specified\n in the input array.\n\n `column_names` and `kwargs` parameters cannot be used at the same time.\n\n Example: Using `column_names`\n\n >>> import pandas as pd\n >>> import janitor\n >>> df = pd.DataFrame({\n ... \"foo\": [\"b\", \"b\", \"a\", \"c\", \"b\"],\n ... \"bar\": range(4, 9),\n ... })\n >>> df\n foo bar\n 0 b 4\n 1 b 5\n 2 a 6\n 3 c 7\n 4 b 8\n >>> df.dtypes\n foo object\n bar int64\n dtype: object\n >>> enc_df = df.encode_categorical(column_names=\"foo\")\n >>> enc_df.dtypes\n foo category\n bar int64\n dtype: object\n >>> enc_df[\"foo\"].cat.categories\n Index(['a', 'b', 'c'], dtype='object')\n >>> enc_df[\"foo\"].cat.ordered\n False\n\n Example: Using `kwargs` to specify an ordered categorical.\n\n >>> import pandas as pd\n >>> import janitor\n >>> df = pd.DataFrame({\n ... \"foo\": [\"b\", \"b\", \"a\", \"c\", \"b\"],\n ... \"bar\": range(4, 9),\n ... })\n >>> df.dtypes\n foo object\n bar int64\n dtype: object\n >>> enc_df = df.encode_categorical(foo=\"appearance\")\n >>> enc_df.dtypes\n foo category\n bar int64\n dtype: object\n >>> enc_df[\"foo\"].cat.categories\n Index(['b', 'a', 'c'], dtype='object')\n >>> enc_df[\"foo\"].cat.ordered\n True\n\n :param df: A pandas DataFrame object.\n :param column_names: A column name or an iterable (list or tuple)\n of column names.\n :param **kwargs: A mapping from column name to either `None`,\n `'sort'` or `'appearance'`, or a 1-D array. This is useful\n in creating categorical columns that are ordered, or\n if the user needs to explicitly specify the categories.\n :returns: A pandas DataFrame.\n :raises ValueError: If both `column_names` and `kwargs` are provided.\n \"\"\" # noqa: E501\n\n if all((column_names, kwargs)):\n raise ValueError(\n \"Only one of `column_names` or `kwargs` can be provided.\"\n )\n # column_names deal with only category dtype (unordered)\n # kwargs takes care of scenarios where user wants an ordered category\n # or user supplies specific categories to create the categorical\n if column_names is not None:\n check(\"column_names\", column_names, [list, tuple, Hashable])\n if isinstance(column_names, Hashable):\n column_names = [column_names]\n check_column(df, column_names)\n dtypes = {col: \"category\" for col in column_names}\n return df.astype(dtypes)\n\n return _computations_as_categorical(df, **kwargs)\n\n\ndef _computations_as_categorical(df: pd.DataFrame, **kwargs) -> pd.DataFrame:\n \"\"\"\n This function handles cases where\n categorical columns are created with an order,\n or specific values supplied for the categories.\n It uses a kwarg, where the key is the column name,\n and the value is either a string, or a 1D array.\n The default for value is None and will return a categorical dtype\n with no order and categories inferred from the column.\n A DataFrame, with categorical columns, is returned.\n \"\"\"\n\n categories_dict = _as_categorical_checks(df, **kwargs)\n\n categories_dtypes = {}\n\n for column_name, value in categories_dict.items():\n if value is None:\n cat_dtype = pd.CategoricalDtype()\n elif isinstance(value, str):\n if value == _CategoryOrder.SORT.value:\n _, cat_dtype = df[column_name].factorize(sort=True)\n else:\n _, cat_dtype = df[column_name].factorize(sort=False)\n if cat_dtype.empty:\n raise ValueError(\n \"Kindly ensure there is at least \"\n f\"one non-null value in {column_name}.\"\n )\n cat_dtype = pd.CategoricalDtype(categories=cat_dtype, ordered=True)\n\n else: # 1-D array\n cat_dtype = pd.CategoricalDtype(categories=value, ordered=True)\n\n categories_dtypes[column_name] = cat_dtype\n\n return df.astype(categories_dtypes)\n\n\ndef _as_categorical_checks(df: pd.DataFrame, **kwargs) -> dict:\n \"\"\"\n This function raises errors if columns in `kwargs` are\n absent from the dataframe's columns.\n It also raises errors if the value in `kwargs`\n is not a string (`'appearance'` or `'sort'`), or a 1D array.\n\n This function is executed before proceeding to the computation phase.\n\n If all checks pass, a dictionary of column names and value is returned.\n\n :param df: The pandas DataFrame object.\n :param **kwargs: A pairing of column name and value.\n :returns: A dictionary.\n :raises TypeError: If `value` is not a 1-D array, or a string.\n :raises ValueError: If `value` is a 1-D array, and contains nulls,\n or is non-unique.\n \"\"\"\n\n check_column(df, kwargs)\n\n categories_dict = {}\n\n for column_name, value in kwargs.items():\n # type check\n if (value is not None) and not (\n is_list_like(value) or isinstance(value, str)\n ):\n raise TypeError(f\"{value} should be list-like or a string.\")\n if is_list_like(value):\n if not hasattr(value, \"shape\"):\n value = pd.Index([*value])\n\n arr_ndim = value.ndim\n if (arr_ndim != 1) or isinstance(value, pd.MultiIndex):\n raise ValueError(\n f\"{value} is not a 1-D array. \"\n \"Kindly provide a 1-D array-like object.\"\n )\n\n if not isinstance(value, (pd.Series, pd.Index)):\n value = pd.Index(value)\n\n if value.hasnans:\n raise ValueError(\n \"Kindly ensure there are no nulls in the array provided.\"\n )\n\n if not value.is_unique:\n raise ValueError(\n \"Kindly provide unique, \"\n \"non-null values for the array provided.\"\n )\n\n if value.empty:\n raise ValueError(\n \"Kindly ensure there is at least \"\n \"one non-null value in the array provided.\"\n )\n\n # uniques, without nulls\n uniques = df[column_name].factorize(sort=False)[-1]\n if uniques.empty:\n raise ValueError(\n \"Kindly ensure there is at least \"\n f\"one non-null value in {column_name}.\"\n )\n\n missing = uniques.difference(value, sort=False)\n if not missing.empty and (uniques.size > missing.size):\n warnings.warn(\n f\"Values {tuple(missing)} are missing from \"\n f\"the provided categories {value} \"\n f\"for {column_name}; this may create nulls \"\n \"in the new categorical column.\",\n UserWarning,\n stacklevel=2,\n )\n\n elif uniques.equals(missing):\n warnings.warn(\n f\"None of the values in {column_name} are in \"\n f\"{value}; this might create nulls for all values \"\n f\"in the new categorical column.\",\n UserWarning,\n stacklevel=2,\n )\n\n elif isinstance(value, str):\n category_order_types = {ent.value for ent in _CategoryOrder}\n if value.lower() not in category_order_types:\n raise ValueError(\n \"Argument should be one of 'appearance' or 'sort'.\"\n )\n\n categories_dict[column_name] = value\n\n return categories_dict\n\n\nclass _CategoryOrder(Enum):\n \"\"\"\n order types for encode_categorical.\n \"\"\"\n\n SORT = \"sort\"\n APPEARANCE = \"appearance\"\n", "path": "janitor/functions/encode_categorical.py"}]} | 3,834 | 259 |
gh_patches_debug_14801 | rasdani/github-patches | git_diff | scikit-hep__awkward-2274 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tests are failing in "Build Docs", possibly due to new Sphinx theme
Is pydata_sphinx_theme broken, perhaps? It had a 0.13 release 12 hours ago.
_Originally posted by @henryiii in https://github.com/scikit-hep/awkward/issues/2268#issuecomment-1448934363_
For example, https://github.com/scikit-hep/awkward/actions/runs/4297198800/jobs/7489883737
This open issue, https://github.com/pydata/pydata-sphinx-theme/issues/1149, has the same error message and they say
> I think it ought to be possible to run our basic dev commands (test, docs, docs-live) in the dev environment we recommend contributors to use
which sounds like something we do.
</issue>
<code>
[start of docs/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 import awkward
14 import datetime
15 import os
16 import runpy
17 import pathlib
18
19 # -- Project information -----------------------------------------------------
20
21 project = "Awkward Array"
22 copyright = f"{datetime.datetime.now().year}, Awkward Array development team"
23 author = "Jim Pivarski"
24
25 parts = awkward.__version__.split(".")
26 version = ".".join(parts[:2])
27 release = ".".join(parts)
28
29 # -- General configuration ---------------------------------------------------
30
31 # Add any Sphinx extension module names here, as strings. They can be
32 # extensions coming with Sphinx (named "sphinx.ext.*") or your custom
33 # ones.
34 extensions = [
35 "sphinx_copybutton",
36 "sphinx_design",
37 "sphinx_external_toc",
38 "sphinx.ext.intersphinx",
39 "myst_nb",
40 # Preserve old links
41 "jupyterlite_sphinx",
42 "IPython.sphinxext.ipython_console_highlighting",
43 "IPython.sphinxext.ipython_directive",
44 ]
45
46 # Allow the CI to set version_match="main"
47 if "DOCS_VERSION" in os.environ:
48 version_match = os.environ["DOCS_VERSION"]
49 else:
50 version_match = version
51
52
53 # Specify a canonical version
54 if "DOCS_CANONICAL_VERSION" in os.environ:
55 canonical_version = os.environ["DOCS_CANONICAL_VERSION"]
56 html_baseurl = f"https://awkward-array.org/doc/{canonical_version}/"
57
58 # Build sitemap on main
59 if version_match == canonical_version:
60 extensions.append("sphinx_sitemap")
61 # Sitemap URLs are relative to `html_baseurl`
62 sitemap_url_scheme = "{link}"
63
64 # Add any paths that contain templates here, relative to this directory.
65 templates_path = ["_templates"]
66
67 # List of patterns, relative to source directory, that match files and
68 # directories to ignore when looking for source files.
69 # This pattern also affects html_static_path and html_extra_path.
70 exclude_patterns = ["_build", "_templates", "Thumbs.db", "jupyter_execute", ".*"]
71
72 # -- Options for HTML output -------------------------------------------------
73
74 # The theme to use for HTML and HTML Help pages. See the documentation for
75 # a list of builtin themes.
76
77 html_context = {
78 "github_user": "scikit-hep",
79 "github_repo": "awkward",
80 "github_version": "main",
81 "doc_path": "docs",
82 }
83 html_theme = "pydata_sphinx_theme"
84 html_show_sourcelink = True
85 html_theme_options = {
86 "logo": {
87 "image_light": "image/logo-300px.png",
88 "image_dark": "image/logo-300px-white.png",
89 },
90 "github_url": "https://github.com/scikit-hep/awkward",
91 # Add light/dark mode and documentation version switcher:
92 "navbar_end": ["theme-switcher", "navbar-icon-links"],
93 "footer_items": ["copyright", "sphinx-version", "funding"],
94 "icon_links": [
95 {
96 "name": "PyPI",
97 "url": "https://pypi.org/project/awkward",
98 "icon": "fab fa-python",
99 }
100 ],
101 "use_edit_page_button": True,
102 "external_links": [
103 {
104 "name": "Contributor guide",
105 "url": "https://github.com/scikit-hep/awkward/blob/main/CONTRIBUTING.md",
106 },
107 {
108 "name": "Release history",
109 "url": "https://github.com/scikit-hep/awkward/releases",
110 },
111 ],
112 }
113
114 # Disable analytics for previews
115 if "DOCS_REPORT_ANALYTICS" in os.environ:
116 html_theme_options["analytics"] = {
117 "plausible_analytics_domain": "awkward-array.org",
118 "plausible_analytics_url": "https://views.scientific-python.org/js/plausible.js",
119 }
120
121 # Don't show version for offline builds by default
122 if "DOCS_SHOW_VERSION" in os.environ:
123 html_theme_options["switcher"] = {
124 "json_url": "https://awkward-array.org/doc/switcher.json",
125 "version_match": version_match,
126 }
127 html_theme_options["navbar_start"] = ["navbar-logo", "version-switcher"]
128
129 # Add any paths that contain custom static files (such as style sheets) here,
130 # relative to this directory. They are copied after the builtin static files,
131 # so a file named "default.css" will overwrite the builtin "default.css".
132 html_static_path = ["_static"]
133 html_css_files = ["css/awkward.css"]
134
135 # MyST settings
136 myst_enable_extensions = ["colon_fence"]
137
138 nb_execution_mode = "cache"
139 nb_execution_raise_on_error = True
140 # unpkg is currently _very_ slow
141 nb_ipywidgets_js = {
142 # Load RequireJS, used by the IPywidgets for dependency management
143 "https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js": {
144 "integrity": "sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=",
145 "crossorigin": "anonymous",
146 },
147 # Load IPywidgets bundle for embedding.
148 "https://cdn.jsdelivr.net/npm/@jupyter-widgets/[email protected]/dist/embed-amd.js": {
149 "data-jupyter-widgets-cdn": "https://cdn.jsdelivr.net/npm/",
150 "crossorigin": "anonymous",
151 },
152 }
153 nb_execution_show_tb = True
154
155 # Additional stuff
156 master_doc = "index"
157
158 # Cross-reference existing Python objects
159 intersphinx_mapping = {
160 "python": ("https://docs.python.org/3/", None),
161 "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None),
162 "numpy": ("https://numpy.org/doc/stable", None),
163 "scipy": ("https://docs.scipy.org/doc/scipy", None),
164 "numba": ("https://numba.pydata.org/numba-doc/latest", None),
165 "arrow": ("https://arrow.apache.org/docs/", None),
166 "jax": ("https://jax.readthedocs.io/en/latest", None),
167 }
168
169
170 # JupyterLite configuration
171 jupyterlite_dir = "./lite"
172 # Don't override ipynb format
173 jupyterlite_bind_ipynb_suffix = False
174 # We've disabled localstorage, so we must provide the contents explicitly
175 jupyterlite_contents = ["getting-started/demo/*"]
176
177 linkcheck_ignore = [
178 r"^https?:\/\/github\.com\/.*$",
179 r"^getting-started\/try-awkward-array\.html$", # Relative link won't resolve
180 r"^https?:\/\/$", # Bare https:// allowed
181 ]
182 # Eventually we need to revisit these
183 if (datetime.date.today() - datetime.date(2022, 12, 13)) < datetime.timedelta(days=30):
184 linkcheck_ignore.extend(
185 [
186 r"^https:\/\/doi.org\/10\.1051\/epjconf\/202024505023$",
187 r"^https:\/\/doi.org\/10\.1051\/epjconf\/202125103002$",
188 ]
189 )
190
191 # Generate Python docstrings
192 HERE = pathlib.Path(__file__).parent
193 runpy.run_path(HERE / "prepare_docstrings.py", run_name="__main__")
194
195
196 # Sphinx doesn't usually want content to fit the screen, so we hack the styles for this page
197 def install_jupyterlite_styles(app, pagename, templatename, context, event_arg) -> None:
198 if pagename != "getting-started/try-awkward-array":
199 return
200
201 app.add_css_file("css/try-awkward-array.css")
202
203
204 def setup(app):
205 app.connect("html-page-context", install_jupyterlite_styles)
206
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -84,13 +84,13 @@
html_show_sourcelink = True
html_theme_options = {
"logo": {
- "image_light": "image/logo-300px.png",
- "image_dark": "image/logo-300px-white.png",
+ "image_light": "_static/image/logo-300px.png",
+ "image_dark": "_static/image/logo-300px-white.png",
},
"github_url": "https://github.com/scikit-hep/awkward",
# Add light/dark mode and documentation version switcher:
"navbar_end": ["theme-switcher", "navbar-icon-links"],
- "footer_items": ["copyright", "sphinx-version", "funding"],
+ "footer_start": ["copyright", "sphinx-version", "funding"],
"icon_links": [
{
"name": "PyPI",
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -84,13 +84,13 @@\n html_show_sourcelink = True\n html_theme_options = {\n \"logo\": {\n- \"image_light\": \"image/logo-300px.png\",\n- \"image_dark\": \"image/logo-300px-white.png\",\n+ \"image_light\": \"_static/image/logo-300px.png\",\n+ \"image_dark\": \"_static/image/logo-300px-white.png\",\n },\n \"github_url\": \"https://github.com/scikit-hep/awkward\",\n # Add light/dark mode and documentation version switcher:\n \"navbar_end\": [\"theme-switcher\", \"navbar-icon-links\"],\n- \"footer_items\": [\"copyright\", \"sphinx-version\", \"funding\"],\n+ \"footer_start\": [\"copyright\", \"sphinx-version\", \"funding\"],\n \"icon_links\": [\n {\n \"name\": \"PyPI\",\n", "issue": "Tests are failing in \"Build Docs\", possibly due to new Sphinx theme\n Is pydata_sphinx_theme broken, perhaps? It had a 0.13 release 12 hours ago.\r\n\r\n_Originally posted by @henryiii in https://github.com/scikit-hep/awkward/issues/2268#issuecomment-1448934363_\r\n\r\nFor example, https://github.com/scikit-hep/awkward/actions/runs/4297198800/jobs/7489883737\r\n\r\nThis open issue, https://github.com/pydata/pydata-sphinx-theme/issues/1149, has the same error message and they say\r\n\r\n> I think it ought to be possible to run our basic dev commands (test, docs, docs-live) in the dev environment we recommend contributors to use\r\n\r\nwhich sounds like something we do.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport awkward\nimport datetime\nimport os\nimport runpy\nimport pathlib\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Awkward Array\"\ncopyright = f\"{datetime.datetime.now().year}, Awkward Array development team\"\nauthor = \"Jim Pivarski\"\n\nparts = awkward.__version__.split(\".\")\nversion = \".\".join(parts[:2])\nrelease = \".\".join(parts)\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named \"sphinx.ext.*\") or your custom\n# ones.\nextensions = [\n \"sphinx_copybutton\",\n \"sphinx_design\",\n \"sphinx_external_toc\",\n \"sphinx.ext.intersphinx\",\n \"myst_nb\",\n # Preserve old links\n \"jupyterlite_sphinx\",\n \"IPython.sphinxext.ipython_console_highlighting\",\n \"IPython.sphinxext.ipython_directive\",\n]\n\n# Allow the CI to set version_match=\"main\"\nif \"DOCS_VERSION\" in os.environ:\n version_match = os.environ[\"DOCS_VERSION\"]\nelse:\n version_match = version\n\n\n# Specify a canonical version\nif \"DOCS_CANONICAL_VERSION\" in os.environ:\n canonical_version = os.environ[\"DOCS_CANONICAL_VERSION\"]\n html_baseurl = f\"https://awkward-array.org/doc/{canonical_version}/\"\n\n # Build sitemap on main\n if version_match == canonical_version:\n extensions.append(\"sphinx_sitemap\")\n # Sitemap URLs are relative to `html_baseurl`\n sitemap_url_scheme = \"{link}\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"_templates\", \"Thumbs.db\", \"jupyter_execute\", \".*\"]\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\nhtml_context = {\n \"github_user\": \"scikit-hep\",\n \"github_repo\": \"awkward\",\n \"github_version\": \"main\",\n \"doc_path\": \"docs\",\n}\nhtml_theme = \"pydata_sphinx_theme\"\nhtml_show_sourcelink = True\nhtml_theme_options = {\n \"logo\": {\n \"image_light\": \"image/logo-300px.png\",\n \"image_dark\": \"image/logo-300px-white.png\",\n },\n \"github_url\": \"https://github.com/scikit-hep/awkward\",\n # Add light/dark mode and documentation version switcher:\n \"navbar_end\": [\"theme-switcher\", \"navbar-icon-links\"],\n \"footer_items\": [\"copyright\", \"sphinx-version\", \"funding\"],\n \"icon_links\": [\n {\n \"name\": \"PyPI\",\n \"url\": \"https://pypi.org/project/awkward\",\n \"icon\": \"fab fa-python\",\n }\n ],\n \"use_edit_page_button\": True,\n \"external_links\": [\n {\n \"name\": \"Contributor guide\",\n \"url\": \"https://github.com/scikit-hep/awkward/blob/main/CONTRIBUTING.md\",\n },\n {\n \"name\": \"Release history\",\n \"url\": \"https://github.com/scikit-hep/awkward/releases\",\n },\n ],\n}\n\n# Disable analytics for previews\nif \"DOCS_REPORT_ANALYTICS\" in os.environ:\n html_theme_options[\"analytics\"] = {\n \"plausible_analytics_domain\": \"awkward-array.org\",\n \"plausible_analytics_url\": \"https://views.scientific-python.org/js/plausible.js\",\n }\n\n# Don't show version for offline builds by default\nif \"DOCS_SHOW_VERSION\" in os.environ:\n html_theme_options[\"switcher\"] = {\n \"json_url\": \"https://awkward-array.org/doc/switcher.json\",\n \"version_match\": version_match,\n }\n html_theme_options[\"navbar_start\"] = [\"navbar-logo\", \"version-switcher\"]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"css/awkward.css\"]\n\n# MyST settings\nmyst_enable_extensions = [\"colon_fence\"]\n\nnb_execution_mode = \"cache\"\nnb_execution_raise_on_error = True\n# unpkg is currently _very_ slow\nnb_ipywidgets_js = {\n # Load RequireJS, used by the IPywidgets for dependency management\n \"https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js\": {\n \"integrity\": \"sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=\",\n \"crossorigin\": \"anonymous\",\n },\n # Load IPywidgets bundle for embedding.\n \"https://cdn.jsdelivr.net/npm/@jupyter-widgets/[email protected]/dist/embed-amd.js\": {\n \"data-jupyter-widgets-cdn\": \"https://cdn.jsdelivr.net/npm/\",\n \"crossorigin\": \"anonymous\",\n },\n}\nnb_execution_show_tb = True\n\n# Additional stuff\nmaster_doc = \"index\"\n\n# Cross-reference existing Python objects\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"pandas\": (\"https://pandas.pydata.org/pandas-docs/stable\", None),\n \"numpy\": (\"https://numpy.org/doc/stable\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy\", None),\n \"numba\": (\"https://numba.pydata.org/numba-doc/latest\", None),\n \"arrow\": (\"https://arrow.apache.org/docs/\", None),\n \"jax\": (\"https://jax.readthedocs.io/en/latest\", None),\n}\n\n\n# JupyterLite configuration\njupyterlite_dir = \"./lite\"\n# Don't override ipynb format\njupyterlite_bind_ipynb_suffix = False\n# We've disabled localstorage, so we must provide the contents explicitly\njupyterlite_contents = [\"getting-started/demo/*\"]\n\nlinkcheck_ignore = [\n r\"^https?:\\/\\/github\\.com\\/.*$\",\n r\"^getting-started\\/try-awkward-array\\.html$\", # Relative link won't resolve\n r\"^https?:\\/\\/$\", # Bare https:// allowed\n]\n# Eventually we need to revisit these\nif (datetime.date.today() - datetime.date(2022, 12, 13)) < datetime.timedelta(days=30):\n linkcheck_ignore.extend(\n [\n r\"^https:\\/\\/doi.org\\/10\\.1051\\/epjconf\\/202024505023$\",\n r\"^https:\\/\\/doi.org\\/10\\.1051\\/epjconf\\/202125103002$\",\n ]\n )\n\n# Generate Python docstrings\nHERE = pathlib.Path(__file__).parent\nrunpy.run_path(HERE / \"prepare_docstrings.py\", run_name=\"__main__\")\n\n\n# Sphinx doesn't usually want content to fit the screen, so we hack the styles for this page\ndef install_jupyterlite_styles(app, pagename, templatename, context, event_arg) -> None:\n if pagename != \"getting-started/try-awkward-array\":\n return\n\n app.add_css_file(\"css/try-awkward-array.css\")\n\n\ndef setup(app):\n app.connect(\"html-page-context\", install_jupyterlite_styles)\n", "path": "docs/conf.py"}]} | 3,066 | 225 |
gh_patches_debug_50321 | rasdani/github-patches | git_diff | pwndbg__pwndbg-979 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
add-symbol-file should not specify a base address
This happens when connecting to a remote target:
```
hacker@babyrop_level10:~$ gdb -ex 'target remote :1234'
...
add-symbol-file /tmp/tmp_yea7f3g/babyrop_level10.0 0x555555554000
...
pwndbg> b main
Breakpoint 1 at 0x555555555581 (2 locations)
pwndbg> info b
Num Type Disp Enb Address What
1 breakpoint keep y <MULTIPLE>
1.1 y 0x0000555555555581 <main>
1.2 y 0x00005555555567c1
```
This double breakpoint results in `\xcc` bytes incorrectly polluting memory, and I've seen this corrupt the GOT and crash my program as a result.
https://github.com/pwndbg/pwndbg/blob/05036defa01d4d47bfad56867f53470a29fcdc89/pwndbg/symbol.py#L261
Why is the base address being specified here? According to the help info for `add-symbol-file`, if anything is specified for `[ADDR]`, it should be the location of the `.text` section.
```
(gdb) help add-symbol-file
Load symbols from FILE, assuming FILE has been dynamically loaded.
Usage: add-symbol-file FILE [-readnow | -readnever] [-o OFF] [ADDR] [-s SECT-NAME SECT-ADDR]...
ADDR is the starting address of the file's text.
Each '-s' argument provides a section name and address, and
should be specified if the data and bss segments are not contiguous
with the text. SECT-NAME is a section name to be loaded at SECT-ADDR.
OFF is an optional offset which is added to the default load addresses
of all sections for which no other address was specified.
The '-readnow' option will cause GDB to read the entire symbol file
immediately. This makes the command slower, but may make future operations
faster.
The '-readnever' option will prevent GDB from reading the symbol file's
symbolic debug information.
```
If we just omit the address, `gdb` will automatically find the `.text` section and use that address. Things would probably fail if there isn't a `.text` section defined, but I'm not really sure what the correct solution would be in this case anyways.
</issue>
<code>
[start of pwndbg/symbol.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 Looking up addresses for function names / symbols, and
5 vice-versa.
6
7 Uses IDA when available if there isn't sufficient symbol
8 information available.
9 """
10 import os
11 import re
12 import shutil
13 import tempfile
14
15 import elftools.common.exceptions
16 import elftools.elf.constants
17 import elftools.elf.elffile
18 import elftools.elf.segments
19 import gdb
20
21 import pwndbg.arch
22 import pwndbg.elf
23 import pwndbg.events
24 import pwndbg.file
25 import pwndbg.ida
26 import pwndbg.memoize
27 import pwndbg.memory
28 import pwndbg.qemu
29 import pwndbg.remote
30 import pwndbg.stack
31 import pwndbg.vmmap
32
33
34 def get_directory():
35 """
36 Retrieve the debug file directory path.
37
38 The debug file directory path ('show debug-file-directory') is a comma-
39 separated list of directories which GDB will look in to find the binaries
40 currently loaded.
41 """
42 result = gdb.execute('show debug-file-directory', to_string=True, from_tty=False)
43 expr = r'The directory where separate debug symbols are searched for is "(.*)".\n'
44
45 match = re.search(expr, result)
46
47 if match:
48 return match.group(1)
49 return ''
50
51 def set_directory(d):
52 gdb.execute('set debug-file-directory %s' % d, to_string=True, from_tty=False)
53
54 def add_directory(d):
55 current = get_directory()
56 if current:
57 set_directory('%s:%s' % (current, d))
58 else:
59 set_directory(d)
60
61 remote_files = {}
62 remote_files_dir = None
63
64 @pwndbg.events.exit
65 def reset_remote_files():
66 global remote_files
67 global remote_files_dir
68 remote_files = {}
69 if remote_files_dir is not None:
70 shutil.rmtree(remote_files_dir)
71 remote_files_dir = None
72
73 @pwndbg.events.new_objfile
74 def autofetch():
75 """
76 """
77 global remote_files_dir
78 if not pwndbg.remote.is_remote():
79 return
80
81 if pwndbg.qemu.is_qemu_usermode():
82 return
83
84 if pwndbg.android.is_android():
85 return
86
87 if not remote_files_dir:
88 remote_files_dir = tempfile.mkdtemp()
89 add_directory(remote_files_dir)
90
91 searchpath = get_directory()
92
93 for mapping in pwndbg.vmmap.get():
94 objfile = mapping.objfile
95
96 # Don't attempt to download things like '[stack]' and '[heap]'
97 if not objfile.startswith('/'):
98 continue
99
100 # Don't re-download things that we have already downloaded
101 if not objfile or objfile in remote_files:
102 continue
103
104 msg = "Downloading %r from the remote server" % objfile
105 print(msg, end='')
106
107 try:
108 data = pwndbg.file.get(objfile)
109 print('\r' + msg + ': OK')
110 except OSError:
111 # The file could not be downloaded :(
112 print('\r' + msg + ': Failed')
113 return
114
115 filename = os.path.basename(objfile)
116 local_path = os.path.join(remote_files_dir, filename)
117
118 with open(local_path, 'wb+') as f:
119 f.write(data)
120
121 remote_files[objfile] = local_path
122
123 base = None
124 for mapping in pwndbg.vmmap.get():
125 if mapping.objfile != objfile:
126 continue
127
128 if base is None or mapping.vaddr < base.vaddr:
129 base = mapping
130
131 if not base:
132 continue
133
134 base = base.vaddr
135
136 try:
137 elf = elftools.elf.elffile.ELFFile(open(local_path, 'rb'))
138 except elftools.common.exceptions.ELFError:
139 continue
140
141 gdb_command = ['add-symbol-file', local_path, hex(int(base))]
142 for section in elf.iter_sections():
143 name = section.name #.decode('latin-1')
144 section = section.header
145 if not section.sh_flags & elftools.elf.constants.SH_FLAGS.SHF_ALLOC:
146 continue
147 gdb_command += ['-s', name, hex(int(base + section.sh_addr))]
148
149 print(' '.join(gdb_command))
150 # gdb.execute(' '.join(gdb_command), from_tty=False, to_string=True)
151
152 @pwndbg.memoize.reset_on_objfile
153 def get(address, gdb_only=False):
154 """
155 Retrieve the textual name for a symbol
156 """
157 # Fast path
158 if address < pwndbg.memory.MMAP_MIN_ADDR or address >= ((1 << 64)-1):
159 return ''
160
161 # Don't look up stack addresses
162 if pwndbg.stack.find(address):
163 return ''
164
165 # This sucks, but there's not a GDB API for this.
166 result = gdb.execute('info symbol %#x' % int(address), to_string=True, from_tty=False)
167
168 if not gdb_only and result.startswith('No symbol'):
169 address = int(address)
170 exe = pwndbg.elf.exe()
171 if exe:
172 exe_map = pwndbg.vmmap.find(exe.address)
173 if exe_map and address in exe_map:
174 res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)
175 return res or ''
176
177 # Expected format looks like this:
178 # main in section .text of /bin/bash
179 # main + 3 in section .text of /bin/bash
180 # system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6
181 # No symbol matches system-1.
182 a, b, c, _ = result.split(None, 3)
183
184
185 if b == '+':
186 return "%s+%s" % (a, c)
187 if b == 'in':
188 return a
189
190 return ''
191
192 @pwndbg.memoize.reset_on_objfile
193 def address(symbol, allow_unmapped=False):
194 if isinstance(symbol, int):
195 return symbol
196
197 try:
198 return int(symbol, 0)
199 except:
200 pass
201
202 try:
203 symbol_obj = gdb.lookup_symbol(symbol)[0]
204 if symbol_obj:
205 return int(symbol_obj.value().address)
206 except Exception:
207 pass
208
209 try:
210 result = gdb.execute('info address %s' % symbol, to_string=True, from_tty=False)
211 address = int(re.search('0x[0-9a-fA-F]+', result).group(), 0)
212
213 # The address found should lie in one of the memory maps
214 # There are cases when GDB shows offsets e.g.:
215 # pwndbg> info address tcache
216 # Symbol "tcache" is a thread-local variable at offset 0x40
217 # in the thread-local storage for `/lib/x86_64-linux-gnu/libc.so.6'.
218 if not allow_unmapped and not pwndbg.vmmap.find(address):
219 return None
220
221 return address
222
223 except gdb.error:
224 return None
225
226 try:
227 address = pwndbg.ida.LocByName(symbol)
228 if address:
229 return address
230 except Exception:
231 pass
232
233 @pwndbg.events.stop
234 @pwndbg.memoize.reset_on_start
235 def add_main_exe_to_symbols():
236 if not pwndbg.remote.is_remote():
237 return
238
239 if pwndbg.android.is_android():
240 return
241
242 exe = pwndbg.elf.exe()
243
244 if not exe:
245 return
246
247 addr = exe.address
248
249 if not addr:
250 return
251
252 addr = int(addr)
253
254 mmap = pwndbg.vmmap.find(addr)
255 if not mmap:
256 return
257
258 path = mmap.objfile
259 if path and (pwndbg.arch.endian == pwndbg.arch.native_endian):
260 try:
261 gdb.execute('add-symbol-file %s %#x' % (path, addr), from_tty=False, to_string=True)
262 except gdb.error:
263 pass
264
265
266 @pwndbg.memoize.reset_on_stop
267 @pwndbg.memoize.reset_on_start
268 def selected_frame_source_absolute_filename():
269 """
270 Retrieve the symbol table’s source absolute file name from the selected frame.
271
272 In case of missing symbol table or frame information, None is returned.
273 """
274 try:
275 frame = gdb.selected_frame()
276 except gdb.error:
277 return None
278
279 if not frame:
280 return None
281
282 sal = frame.find_sal()
283 if not sal:
284 return None
285
286 symtab = sal.symtab
287 if not symtab:
288 return None
289
290 return symtab.fullname()
291
292
293 if '/usr/lib/debug' not in get_directory():
294 set_directory(get_directory() + ':/usr/lib/debug')
295
[end of pwndbg/symbol.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/symbol.py b/pwndbg/symbol.py
--- a/pwndbg/symbol.py
+++ b/pwndbg/symbol.py
@@ -258,7 +258,7 @@
path = mmap.objfile
if path and (pwndbg.arch.endian == pwndbg.arch.native_endian):
try:
- gdb.execute('add-symbol-file %s %#x' % (path, addr), from_tty=False, to_string=True)
+ gdb.execute('add-symbol-file %s' % (path,), from_tty=False, to_string=True)
except gdb.error:
pass
| {"golden_diff": "diff --git a/pwndbg/symbol.py b/pwndbg/symbol.py\n--- a/pwndbg/symbol.py\n+++ b/pwndbg/symbol.py\n@@ -258,7 +258,7 @@\n path = mmap.objfile\n if path and (pwndbg.arch.endian == pwndbg.arch.native_endian):\n try:\n- gdb.execute('add-symbol-file %s %#x' % (path, addr), from_tty=False, to_string=True)\n+ gdb.execute('add-symbol-file %s' % (path,), from_tty=False, to_string=True)\n except gdb.error:\n pass\n", "issue": "add-symbol-file should not specify a base address\nThis happens when connecting to a remote target:\r\n```\r\nhacker@babyrop_level10:~$ gdb -ex 'target remote :1234'\r\n...\r\nadd-symbol-file /tmp/tmp_yea7f3g/babyrop_level10.0 0x555555554000\r\n...\r\npwndbg> b main\r\nBreakpoint 1 at 0x555555555581 (2 locations)\r\npwndbg> info b\r\nNum Type Disp Enb Address What\r\n1 breakpoint keep y <MULTIPLE> \r\n1.1 y 0x0000555555555581 <main>\r\n1.2 y 0x00005555555567c1 \r\n```\r\nThis double breakpoint results in `\\xcc` bytes incorrectly polluting memory, and I've seen this corrupt the GOT and crash my program as a result.\r\n\r\nhttps://github.com/pwndbg/pwndbg/blob/05036defa01d4d47bfad56867f53470a29fcdc89/pwndbg/symbol.py#L261\r\n\r\nWhy is the base address being specified here? According to the help info for `add-symbol-file`, if anything is specified for `[ADDR]`, it should be the location of the `.text` section.\r\n\r\n```\r\n(gdb) help add-symbol-file\r\nLoad symbols from FILE, assuming FILE has been dynamically loaded.\r\nUsage: add-symbol-file FILE [-readnow | -readnever] [-o OFF] [ADDR] [-s SECT-NAME SECT-ADDR]...\r\nADDR is the starting address of the file's text.\r\nEach '-s' argument provides a section name and address, and\r\nshould be specified if the data and bss segments are not contiguous\r\nwith the text. SECT-NAME is a section name to be loaded at SECT-ADDR.\r\nOFF is an optional offset which is added to the default load addresses\r\nof all sections for which no other address was specified.\r\nThe '-readnow' option will cause GDB to read the entire symbol file\r\nimmediately. This makes the command slower, but may make future operations\r\nfaster.\r\nThe '-readnever' option will prevent GDB from reading the symbol file's\r\nsymbolic debug information.\r\n```\r\n\r\nIf we just omit the address, `gdb` will automatically find the `.text` section and use that address. Things would probably fail if there isn't a `.text` section defined, but I'm not really sure what the correct solution would be in this case anyways.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nLooking up addresses for function names / symbols, and\nvice-versa.\n\nUses IDA when available if there isn't sufficient symbol\ninformation available.\n\"\"\"\nimport os\nimport re\nimport shutil\nimport tempfile\n\nimport elftools.common.exceptions\nimport elftools.elf.constants\nimport elftools.elf.elffile\nimport elftools.elf.segments\nimport gdb\n\nimport pwndbg.arch\nimport pwndbg.elf\nimport pwndbg.events\nimport pwndbg.file\nimport pwndbg.ida\nimport pwndbg.memoize\nimport pwndbg.memory\nimport pwndbg.qemu\nimport pwndbg.remote\nimport pwndbg.stack\nimport pwndbg.vmmap\n\n\ndef get_directory():\n \"\"\"\n Retrieve the debug file directory path.\n\n The debug file directory path ('show debug-file-directory') is a comma-\n separated list of directories which GDB will look in to find the binaries\n currently loaded.\n \"\"\"\n result = gdb.execute('show debug-file-directory', to_string=True, from_tty=False)\n expr = r'The directory where separate debug symbols are searched for is \"(.*)\".\\n'\n\n match = re.search(expr, result)\n\n if match:\n return match.group(1)\n return ''\n\ndef set_directory(d):\n gdb.execute('set debug-file-directory %s' % d, to_string=True, from_tty=False)\n\ndef add_directory(d):\n current = get_directory()\n if current:\n set_directory('%s:%s' % (current, d))\n else:\n set_directory(d)\n\nremote_files = {}\nremote_files_dir = None\n\[email protected]\ndef reset_remote_files():\n global remote_files\n global remote_files_dir\n remote_files = {}\n if remote_files_dir is not None:\n shutil.rmtree(remote_files_dir)\n remote_files_dir = None\n\[email protected]_objfile\ndef autofetch():\n \"\"\"\n \"\"\"\n global remote_files_dir\n if not pwndbg.remote.is_remote():\n return\n\n if pwndbg.qemu.is_qemu_usermode():\n return\n\n if pwndbg.android.is_android():\n return\n\n if not remote_files_dir:\n remote_files_dir = tempfile.mkdtemp()\n add_directory(remote_files_dir)\n\n searchpath = get_directory()\n\n for mapping in pwndbg.vmmap.get():\n objfile = mapping.objfile\n\n # Don't attempt to download things like '[stack]' and '[heap]'\n if not objfile.startswith('/'):\n continue\n\n # Don't re-download things that we have already downloaded\n if not objfile or objfile in remote_files:\n continue\n\n msg = \"Downloading %r from the remote server\" % objfile\n print(msg, end='')\n\n try:\n data = pwndbg.file.get(objfile)\n print('\\r' + msg + ': OK')\n except OSError:\n # The file could not be downloaded :(\n print('\\r' + msg + ': Failed')\n return\n\n filename = os.path.basename(objfile)\n local_path = os.path.join(remote_files_dir, filename)\n\n with open(local_path, 'wb+') as f:\n f.write(data)\n\n remote_files[objfile] = local_path\n\n base = None\n for mapping in pwndbg.vmmap.get():\n if mapping.objfile != objfile:\n continue\n\n if base is None or mapping.vaddr < base.vaddr:\n base = mapping\n\n if not base:\n continue\n\n base = base.vaddr\n\n try:\n elf = elftools.elf.elffile.ELFFile(open(local_path, 'rb'))\n except elftools.common.exceptions.ELFError:\n continue\n\n gdb_command = ['add-symbol-file', local_path, hex(int(base))]\n for section in elf.iter_sections():\n name = section.name #.decode('latin-1')\n section = section.header\n if not section.sh_flags & elftools.elf.constants.SH_FLAGS.SHF_ALLOC:\n continue\n gdb_command += ['-s', name, hex(int(base + section.sh_addr))]\n\n print(' '.join(gdb_command))\n # gdb.execute(' '.join(gdb_command), from_tty=False, to_string=True)\n\[email protected]_on_objfile\ndef get(address, gdb_only=False):\n \"\"\"\n Retrieve the textual name for a symbol\n \"\"\"\n # Fast path\n if address < pwndbg.memory.MMAP_MIN_ADDR or address >= ((1 << 64)-1):\n return ''\n\n # Don't look up stack addresses\n if pwndbg.stack.find(address):\n return ''\n\n # This sucks, but there's not a GDB API for this.\n result = gdb.execute('info symbol %#x' % int(address), to_string=True, from_tty=False)\n\n if not gdb_only and result.startswith('No symbol'):\n address = int(address)\n exe = pwndbg.elf.exe()\n if exe:\n exe_map = pwndbg.vmmap.find(exe.address)\n if exe_map and address in exe_map:\n res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)\n return res or ''\n\n # Expected format looks like this:\n # main in section .text of /bin/bash\n # main + 3 in section .text of /bin/bash\n # system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6\n # No symbol matches system-1.\n a, b, c, _ = result.split(None, 3)\n\n\n if b == '+':\n return \"%s+%s\" % (a, c)\n if b == 'in':\n return a\n\n return ''\n\[email protected]_on_objfile\ndef address(symbol, allow_unmapped=False):\n if isinstance(symbol, int):\n return symbol\n\n try:\n return int(symbol, 0)\n except:\n pass\n\n try:\n symbol_obj = gdb.lookup_symbol(symbol)[0]\n if symbol_obj:\n return int(symbol_obj.value().address)\n except Exception:\n pass\n\n try:\n result = gdb.execute('info address %s' % symbol, to_string=True, from_tty=False)\n address = int(re.search('0x[0-9a-fA-F]+', result).group(), 0)\n\n # The address found should lie in one of the memory maps\n # There are cases when GDB shows offsets e.g.:\n # pwndbg> info address tcache\n # Symbol \"tcache\" is a thread-local variable at offset 0x40\n # in the thread-local storage for `/lib/x86_64-linux-gnu/libc.so.6'.\n if not allow_unmapped and not pwndbg.vmmap.find(address):\n return None\n\n return address\n\n except gdb.error:\n return None\n\n try:\n address = pwndbg.ida.LocByName(symbol)\n if address:\n return address\n except Exception:\n pass\n\[email protected]\[email protected]_on_start\ndef add_main_exe_to_symbols():\n if not pwndbg.remote.is_remote():\n return\n\n if pwndbg.android.is_android():\n return\n\n exe = pwndbg.elf.exe()\n\n if not exe:\n return\n\n addr = exe.address\n\n if not addr:\n return\n\n addr = int(addr)\n\n mmap = pwndbg.vmmap.find(addr)\n if not mmap:\n return\n\n path = mmap.objfile\n if path and (pwndbg.arch.endian == pwndbg.arch.native_endian):\n try:\n gdb.execute('add-symbol-file %s %#x' % (path, addr), from_tty=False, to_string=True)\n except gdb.error:\n pass\n\n\[email protected]_on_stop\[email protected]_on_start\ndef selected_frame_source_absolute_filename():\n \"\"\"\n Retrieve the symbol table\u2019s source absolute file name from the selected frame.\n\n In case of missing symbol table or frame information, None is returned.\n \"\"\"\n try:\n frame = gdb.selected_frame()\n except gdb.error:\n return None\n\n if not frame:\n return None\n\n sal = frame.find_sal()\n if not sal:\n return None\n\n symtab = sal.symtab\n if not symtab:\n return None\n\n return symtab.fullname()\n\n\nif '/usr/lib/debug' not in get_directory():\n set_directory(get_directory() + ':/usr/lib/debug')\n", "path": "pwndbg/symbol.py"}]} | 3,810 | 139 |
gh_patches_debug_155 | rasdani/github-patches | git_diff | hylang__hy-1369 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Official support for evaluating strings of Hy code from Python
Is it possible to embed some hy code inside a python file? As opposed to having the whole file be full on hy?
</issue>
<code>
[start of hy/__init__.py]
1 __appname__ = 'hy'
2 try:
3 from hy.version import __version__
4 except ImportError:
5 __version__ = 'unknown'
6
7
8 from hy.models import HyExpression, HyInteger, HyKeyword, HyComplex, HyString, HyBytes, HySymbol, HyFloat, HyDict, HyList, HySet, HyCons # NOQA
9
10
11 import hy.importer # NOQA
12 # we import for side-effects.
13
[end of hy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hy/__init__.py b/hy/__init__.py
--- a/hy/__init__.py
+++ b/hy/__init__.py
@@ -10,3 +10,7 @@
import hy.importer # NOQA
# we import for side-effects.
+
+
+from hy.core.language import read, read_str # NOQA
+from hy.importer import hy_eval as eval # NOQA
| {"golden_diff": "diff --git a/hy/__init__.py b/hy/__init__.py\n--- a/hy/__init__.py\n+++ b/hy/__init__.py\n@@ -10,3 +10,7 @@\n \n import hy.importer # NOQA\n # we import for side-effects.\n+\n+\n+from hy.core.language import read, read_str # NOQA\n+from hy.importer import hy_eval as eval # NOQA\n", "issue": "Official support for evaluating strings of Hy code from Python\nIs it possible to embed some hy code inside a python file? As opposed to having the whole file be full on hy?\n", "before_files": [{"content": "__appname__ = 'hy'\ntry:\n from hy.version import __version__\nexcept ImportError:\n __version__ = 'unknown'\n\n\nfrom hy.models import HyExpression, HyInteger, HyKeyword, HyComplex, HyString, HyBytes, HySymbol, HyFloat, HyDict, HyList, HySet, HyCons # NOQA\n\n\nimport hy.importer # NOQA\n# we import for side-effects.\n", "path": "hy/__init__.py"}]} | 681 | 97 |
gh_patches_debug_22 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-3114 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Futures python module is included in Python3
Insights-core currently installs the [futures module](https://pypi.org/project/futures/) in all cases for the [development] target in [setup.py](https://github.com/RedHatInsights/insights-core/blob/7dc392df90a2535014cc1ec7f5df9c03a9d3d95d/setup.py#L64). This module is only necessary for Python2 since it is included in Python3. This is only used in one place in [collect.py](https://github.com/RedHatInsights/insights-core/blob/7dc392df90a2535014cc1ec7f5df9c03a9d3d95d/insights/collect.py#L286).
The `futures` module states:
> It **does not** work on Python 3 due to Python 2 syntax being used in the codebase. Python 3 users should not attempt to install it, since the package is already included in the standard library.
When installed it causes the latest version of `pip` to fail when installing into a virtual environment:
```python
Installing build dependencies ... error
ERROR: Command errored out with exit status 1:
command: /home/bfahr/work/insights/insights-core/venv36/bin/python3.6 /home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip install --ignore-installed --no-user --prefix /tmp/pip-build-env-vujizkqz/overlay --no-warn-script-location --no-binary :none: --only-binary :none: -i https://pypi.org/simple -- 'setuptools>=40.8.0' wheel
cwd: None
Complete output (29 lines):
Traceback (most recent call last):
File "/usr/lib64/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib64/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/__main__.py", line 29, in <module>
from pip._internal.cli.main import main as _main
File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/main.py", line 9, in <module>
from pip._internal.cli.autocompletion import autocomplete
File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/autocompletion.py", line 10, in <module>
from pip._internal.cli.main_parser import create_main_parser
File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/main_parser.py", line 8, in <module>
from pip._internal.cli import cmdoptions
File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/cmdoptions.py", line 23, in <module>
from pip._internal.cli.parser import ConfigOptionParser
File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/parser.py", line 12, in <module>
from pip._internal.configuration import Configuration, ConfigurationError
File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/configuration.py", line 27, in <module>
from pip._internal.utils.misc import ensure_dir, enum
File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/utils/misc.py", line 38, in <module>
from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed
File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_vendor/tenacity/__init__.py", line 35, in <module>
from concurrent import futures
File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/concurrent/futures/__init__.py", line 8, in <module>
from concurrent.futures._base import (FIRST_COMPLETED,
File "/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/concurrent/futures/_base.py", line 357
raise type(self._exception), self._exception, self._traceback
^
SyntaxError: invalid syntax
----------------------------------------
```
It was only used to create a thread pool for parallel collection in the client. We don't currently use this feature and since `futures` is not installed by the client RPM it would never be used. It is included in the default python on RHEL8 so it could be used if so desired, but again we don't currently use it.
</issue>
<code>
[start of setup.py]
1 import os
2 import sys
3 from setuptools import setup, find_packages
4
5 __here__ = os.path.dirname(os.path.abspath(__file__))
6
7 package_info = dict.fromkeys(["RELEASE", "COMMIT", "VERSION", "NAME"])
8
9 for name in package_info:
10 with open(os.path.join(__here__, "insights", name)) as f:
11 package_info[name] = f.read().strip()
12
13 entry_points = {
14 'console_scripts': [
15 'insights-collect = insights.collect:main',
16 'insights-run = insights:main',
17 'insights = insights.command_parser:main',
18 'insights-cat = insights.tools.cat:main',
19 'insights-dupkeycheck = insights.tools.dupkeycheck:main',
20 'insights-inspect = insights.tools.insights_inspect:main',
21 'insights-info = insights.tools.query:main',
22 'insights-ocpshell= insights.ocpshell:main',
23 'client = insights.client:run',
24 'mangle = insights.util.mangle:main'
25 ]
26 }
27
28 runtime = set([
29 'six',
30 'requests',
31 'redis',
32 'cachecontrol',
33 'cachecontrol[redis]',
34 'cachecontrol[filecache]',
35 'defusedxml',
36 'lockfile',
37 'jinja2<=2.11.3',
38 ])
39
40 if (sys.version_info < (2, 7)):
41 runtime.add('pyyaml>=3.10,<=3.13')
42 else:
43 runtime.add('pyyaml')
44
45
46 def maybe_require(pkg):
47 try:
48 __import__(pkg)
49 except ImportError:
50 runtime.add(pkg)
51
52
53 maybe_require("importlib")
54 maybe_require("argparse")
55
56
57 client = set([
58 'requests',
59 'python-gnupg==0.4.6',
60 'oyaml'
61 ])
62
63 develop = set([
64 'futures==3.0.5',
65 'wheel',
66 ])
67
68 docs = set([
69 'docutils',
70 'Sphinx',
71 'nbsphinx',
72 'sphinx_rtd_theme',
73 'ipython',
74 'colorama',
75 'jinja2<=2.11.3',
76 'Pygments',
77 'jedi<0.18.0', # Open issue with jedi 0.18.0 and iPython <= 7.19
78 # https://github.com/davidhalter/jedi/issues/1714
79 ])
80
81 testing = set([
82 'coverage==4.3.4',
83 'pytest==3.0.6',
84 'pytest-cov==2.4.0',
85 'mock==2.0.0',
86 ])
87
88 cluster = set([
89 'ansible',
90 'pandas',
91 'colorama',
92 ])
93
94 openshift = set([
95 'openshift'
96 ])
97
98 linting = set([
99 'flake8==2.6.2',
100 ])
101
102 optional = set([
103 'python-cjson',
104 'python-logstash',
105 'python-statsd',
106 'watchdog',
107 ])
108
109 if __name__ == "__main__":
110 # allows for runtime modification of rpm name
111 name = os.environ.get("INSIGHTS_CORE_NAME", package_info["NAME"])
112
113 setup(
114 name=name,
115 version=package_info["VERSION"],
116 description="Insights Core is a data collection and analysis framework",
117 long_description=open("README.rst").read(),
118 url="https://github.com/redhatinsights/insights-core",
119 author="Red Hat, Inc.",
120 author_email="[email protected]",
121 packages=find_packages(),
122 install_requires=list(runtime),
123 package_data={'': ['LICENSE']},
124 license='Apache 2.0',
125 extras_require={
126 'develop': list(runtime | develop | client | docs | linting | testing | cluster),
127 'develop26': list(runtime | develop | client | linting | testing | cluster),
128 'client': list(runtime | client),
129 'client-develop': list(runtime | develop | client | linting | testing),
130 'cluster': list(runtime | cluster),
131 'openshift': list(runtime | openshift),
132 'optional': list(optional),
133 'docs': list(docs),
134 'linting': list(linting | client),
135 'testing': list(testing | client)
136 },
137 classifiers=[
138 'Development Status :: 5 - Production/Stable',
139 'Intended Audience :: Developers',
140 'Natural Language :: English',
141 'License :: OSI Approved :: Apache Software License',
142 'Programming Language :: Python',
143 'Programming Language :: Python :: 2.6',
144 'Programming Language :: Python :: 2.7',
145 'Programming Language :: Python :: 3.3',
146 'Programming Language :: Python :: 3.4',
147 'Programming Language :: Python :: 3.5',
148 'Programming Language :: Python :: 3.6'
149 ],
150 entry_points=entry_points,
151 include_package_data=True
152 )
153
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,6 @@
])
develop = set([
- 'futures==3.0.5',
'wheel',
])
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,7 +61,6 @@\n ])\n \n develop = set([\n- 'futures==3.0.5',\n 'wheel',\n ])\n", "issue": "Futures python module is included in Python3\nInsights-core currently installs the [futures module](https://pypi.org/project/futures/) in all cases for the [development] target in [setup.py](https://github.com/RedHatInsights/insights-core/blob/7dc392df90a2535014cc1ec7f5df9c03a9d3d95d/setup.py#L64). This module is only necessary for Python2 since it is included in Python3. This is only used in one place in [collect.py](https://github.com/RedHatInsights/insights-core/blob/7dc392df90a2535014cc1ec7f5df9c03a9d3d95d/insights/collect.py#L286).\r\n\r\nThe `futures` module states:\r\n\r\n> It **does not** work on Python 3 due to Python 2 syntax being used in the codebase. Python 3 users should not attempt to install it, since the package is already included in the standard library.\r\n\r\nWhen installed it causes the latest version of `pip` to fail when installing into a virtual environment:\r\n\r\n```python\r\n Installing build dependencies ... error\r\n ERROR: Command errored out with exit status 1:\r\n command: /home/bfahr/work/insights/insights-core/venv36/bin/python3.6 /home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip install --ignore-installed --no-user --prefix /tmp/pip-build-env-vujizkqz/overlay --no-warn-script-location --no-binary :none: --only-binary :none: -i https://pypi.org/simple -- 'setuptools>=40.8.0' wheel\r\n cwd: None\r\n Complete output (29 lines):\r\n Traceback (most recent call last):\r\n File \"/usr/lib64/python3.6/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/lib64/python3.6/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/__main__.py\", line 29, in <module>\r\n from pip._internal.cli.main import main as _main\r\n File \"/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/main.py\", line 9, in <module>\r\n from pip._internal.cli.autocompletion import autocomplete\r\n File \"/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/autocompletion.py\", line 10, in <module>\r\n from pip._internal.cli.main_parser import create_main_parser\r\n File \"/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/main_parser.py\", line 8, in <module>\r\n from pip._internal.cli import cmdoptions\r\n File \"/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/cmdoptions.py\", line 23, in <module>\r\n from pip._internal.cli.parser import ConfigOptionParser\r\n File \"/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/cli/parser.py\", line 12, in <module>\r\n from pip._internal.configuration import Configuration, ConfigurationError\r\n File \"/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/configuration.py\", line 27, in <module>\r\n from pip._internal.utils.misc import ensure_dir, enum\r\n File \"/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_internal/utils/misc.py\", line 38, in <module>\r\n from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed\r\n File \"/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/pip/_vendor/tenacity/__init__.py\", line 35, in <module>\r\n from concurrent import futures\r\n File \"/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/concurrent/futures/__init__.py\", line 8, in <module>\r\n from concurrent.futures._base import (FIRST_COMPLETED,\r\n File \"/home/bfahr/work/insights/insights-core/venv36/lib64/python3.6/site-packages/concurrent/futures/_base.py\", line 357\r\n raise type(self._exception), self._exception, self._traceback\r\n ^\r\n SyntaxError: invalid syntax\r\n ----------------------------------------\r\n```\r\n\r\nIt was only used to create a thread pool for parallel collection in the client. We don't currently use this feature and since `futures` is not installed by the client RPM it would never be used. It is included in the default python on RHEL8 so it could be used if so desired, but again we don't currently use it.\n", "before_files": [{"content": "import os\nimport sys\nfrom setuptools import setup, find_packages\n\n__here__ = os.path.dirname(os.path.abspath(__file__))\n\npackage_info = dict.fromkeys([\"RELEASE\", \"COMMIT\", \"VERSION\", \"NAME\"])\n\nfor name in package_info:\n with open(os.path.join(__here__, \"insights\", name)) as f:\n package_info[name] = f.read().strip()\n\nentry_points = {\n 'console_scripts': [\n 'insights-collect = insights.collect:main',\n 'insights-run = insights:main',\n 'insights = insights.command_parser:main',\n 'insights-cat = insights.tools.cat:main',\n 'insights-dupkeycheck = insights.tools.dupkeycheck:main',\n 'insights-inspect = insights.tools.insights_inspect:main',\n 'insights-info = insights.tools.query:main',\n 'insights-ocpshell= insights.ocpshell:main',\n 'client = insights.client:run',\n 'mangle = insights.util.mangle:main'\n ]\n}\n\nruntime = set([\n 'six',\n 'requests',\n 'redis',\n 'cachecontrol',\n 'cachecontrol[redis]',\n 'cachecontrol[filecache]',\n 'defusedxml',\n 'lockfile',\n 'jinja2<=2.11.3',\n])\n\nif (sys.version_info < (2, 7)):\n runtime.add('pyyaml>=3.10,<=3.13')\nelse:\n runtime.add('pyyaml')\n\n\ndef maybe_require(pkg):\n try:\n __import__(pkg)\n except ImportError:\n runtime.add(pkg)\n\n\nmaybe_require(\"importlib\")\nmaybe_require(\"argparse\")\n\n\nclient = set([\n 'requests',\n 'python-gnupg==0.4.6',\n 'oyaml'\n])\n\ndevelop = set([\n 'futures==3.0.5',\n 'wheel',\n])\n\ndocs = set([\n 'docutils',\n 'Sphinx',\n 'nbsphinx',\n 'sphinx_rtd_theme',\n 'ipython',\n 'colorama',\n 'jinja2<=2.11.3',\n 'Pygments',\n 'jedi<0.18.0', # Open issue with jedi 0.18.0 and iPython <= 7.19\n # https://github.com/davidhalter/jedi/issues/1714\n])\n\ntesting = set([\n 'coverage==4.3.4',\n 'pytest==3.0.6',\n 'pytest-cov==2.4.0',\n 'mock==2.0.0',\n])\n\ncluster = set([\n 'ansible',\n 'pandas',\n 'colorama',\n])\n\nopenshift = set([\n 'openshift'\n])\n\nlinting = set([\n 'flake8==2.6.2',\n])\n\noptional = set([\n 'python-cjson',\n 'python-logstash',\n 'python-statsd',\n 'watchdog',\n])\n\nif __name__ == \"__main__\":\n # allows for runtime modification of rpm name\n name = os.environ.get(\"INSIGHTS_CORE_NAME\", package_info[\"NAME\"])\n\n setup(\n name=name,\n version=package_info[\"VERSION\"],\n description=\"Insights Core is a data collection and analysis framework\",\n long_description=open(\"README.rst\").read(),\n url=\"https://github.com/redhatinsights/insights-core\",\n author=\"Red Hat, Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n install_requires=list(runtime),\n package_data={'': ['LICENSE']},\n license='Apache 2.0',\n extras_require={\n 'develop': list(runtime | develop | client | docs | linting | testing | cluster),\n 'develop26': list(runtime | develop | client | linting | testing | cluster),\n 'client': list(runtime | client),\n 'client-develop': list(runtime | develop | client | linting | testing),\n 'cluster': list(runtime | cluster),\n 'openshift': list(runtime | openshift),\n 'optional': list(optional),\n 'docs': list(docs),\n 'linting': list(linting | client),\n 'testing': list(testing | client)\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ],\n entry_points=entry_points,\n include_package_data=True\n )\n", "path": "setup.py"}]} | 3,176 | 55 |
gh_patches_debug_1165 | rasdani/github-patches | git_diff | AnalogJ__lexicon-1356 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug in create action for glesys provider
When creating an A record with the glesys provider, the full name is added instead of the host name.
```
lexicon_config = {
"provider_name" : "glesys",
"action": "create",
"domain": "somedomain.com",
"type": "A",
"name": "lexicon",
"content": "1.2.3.4",
"glesys": {
}
}
```
Results in the A-record:
`{'id': 2723410, 'type': 'A', 'name': 'lexicon.somedomain.com', 'ttl': 3600, 'content': '1.2.3.4'}`
While the expected result is:
`{'id': 2723410, 'type': 'A', 'name': 'lexicon', 'ttl': 3600, 'content': '1.2.3.4'}`
The request data sent to `domain/addrecord` :
`{'domainname': 'somedomain.com', 'host': 'lexicon.somedomain.com', 'type': 'A', 'data': '1.2.3.4', 'ttl': 3600}`
Expected request data to `domain/addrecord`:
`{'domainname': 'somedomain.com', 'host': 'lexicon', 'type': 'A', 'data': '1.2.3.4', 'ttl': 3600}`
Glesys API documentation:
```
domain/addrecord
Url: https://api.glesys.com/domain/addrecord
Method: Only Https POST
Required arguments: domainname , host , type , data
Optional arguments: ttl
Description: Adds a dns record to a domain
```
</issue>
<code>
[start of lexicon/providers/glesys.py]
1 """Module provider for Glesys"""
2 import json
3
4 import requests
5
6 from lexicon.exceptions import AuthenticationError
7 from lexicon.providers.base import Provider as BaseProvider
8
9 NAMESERVER_DOMAINS = ["glesys.com"]
10
11
12 def provider_parser(subparser):
13 """Generate a subparser for Glesys"""
14 subparser.add_argument("--auth-username", help="specify username (CL12345)")
15 subparser.add_argument("--auth-token", help="specify API key")
16
17
18 class Provider(BaseProvider):
19 """Provider class for Glesys"""
20
21 def __init__(self, config):
22 super(Provider, self).__init__(config)
23 self.domain_id = None
24 self.api_endpoint = "https://api.glesys.com"
25
26 def _authenticate(self):
27 payload = self._get("/domain/list")
28 domains = payload["response"]["domains"]
29 for record in domains:
30 if record["domainname"] == self.domain:
31 # Domain records do not have any id.
32 # Since domain_id cannot be None, use domain name as id instead.
33 self.domain_id = record["domainname"]
34 break
35 else:
36 raise AuthenticationError("No domain found")
37
38 # Create record. If record already exists with the same content, do nothing.
39 def _create_record(self, rtype, name, content):
40 existing = self.list_records(rtype, name, content)
41 if existing:
42 # Already exists, do nothing.
43 return True
44
45 request_data = {
46 "domainname": self.domain,
47 "host": self._full_name(name),
48 "type": rtype,
49 "data": content,
50 }
51 self._addttl(request_data)
52
53 self._post("/domain/addrecord", data=request_data)
54 return True
55
56 # List all records. Return an empty list if no records found
57 # type, name and content are used to filter records.
58 # If possible filter during the query, otherwise filter after response is received.
59 def _list_records(self, rtype=None, name=None, content=None):
60 request_data = {"domainname": self.domain}
61 payload = self._post("/domain/listrecords", data=request_data)
62
63 # Convert from Glesys record structure to Lexicon structure.
64 processed_records = [
65 self._glesysrecord2lexiconrecord(r) for r in payload["response"]["records"]
66 ]
67
68 if rtype:
69 processed_records = [
70 record for record in processed_records if record["type"] == rtype
71 ]
72 if name:
73 processed_records = [
74 record
75 for record in processed_records
76 if record["name"] == self._full_name(name)
77 ]
78 if content:
79 processed_records = [
80 record
81 for record in processed_records
82 if record["content"].lower() == content.lower()
83 ]
84
85 return processed_records
86
87 # Update a record. Identifier must be specified.
88 def _update_record(self, identifier, rtype=None, name=None, content=None):
89 request_data = {"recordid": identifier}
90 if name:
91 request_data["host"] = name
92 if rtype:
93 request_data["type"] = rtype
94 if content:
95 request_data["data"] = content
96
97 self._addttl(request_data)
98 self._post("/domain/updaterecord", data=request_data)
99 return True
100
101 # Delete an existing record.
102 # If record does not exist, do nothing.
103 # If an identifier is specified, use it, otherwise do a lookup using type, name and content.
104 def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
105 delete_record_id = []
106 if not identifier:
107 records = self._list_records(rtype, name, content)
108 delete_record_id = [record["id"] for record in records]
109 else:
110 delete_record_id.append(identifier)
111
112 for record_id in delete_record_id:
113 request_data = {"recordid": record_id}
114 self._post("/domain/deleterecord", data=request_data)
115
116 return True
117
118 # Helpers.
119 def _request(self, action="GET", url="/", data=None, query_params=None):
120 if data is None:
121 data = {}
122 if query_params is None:
123 query_params = {}
124
125 query_params["format"] = "json"
126 default_headers = {
127 "Accept": "application/json",
128 "Content-Type": "application/json",
129 }
130
131 credentials = (
132 self._get_provider_option("auth_username"),
133 self._get_provider_option("auth_token"),
134 )
135 response = requests.request(
136 action,
137 self.api_endpoint + url,
138 params=query_params,
139 data=json.dumps(data),
140 headers=default_headers,
141 auth=credentials,
142 )
143
144 # if the request fails for any reason, throw an error.
145 response.raise_for_status()
146 return response.json()
147
148 # Adds TTL parameter if passed as argument to lexicon.
149 def _addttl(self, request_data):
150 if self._get_lexicon_option("ttl"):
151 request_data["ttl"] = self._get_lexicon_option("ttl")
152
153 # From Glesys record structure: [u'domainname', u'recordid', u'type', u'host', u'ttl', u'data']
154 def _glesysrecord2lexiconrecord(self, glesys_record):
155 return {
156 "id": glesys_record["recordid"],
157 "type": glesys_record["type"],
158 "name": glesys_record["host"],
159 "ttl": glesys_record["ttl"],
160 "content": glesys_record["data"],
161 }
162
[end of lexicon/providers/glesys.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lexicon/providers/glesys.py b/lexicon/providers/glesys.py
--- a/lexicon/providers/glesys.py
+++ b/lexicon/providers/glesys.py
@@ -44,7 +44,7 @@
request_data = {
"domainname": self.domain,
- "host": self._full_name(name),
+ "host": name,
"type": rtype,
"data": content,
}
| {"golden_diff": "diff --git a/lexicon/providers/glesys.py b/lexicon/providers/glesys.py\n--- a/lexicon/providers/glesys.py\n+++ b/lexicon/providers/glesys.py\n@@ -44,7 +44,7 @@\n \n request_data = {\n \"domainname\": self.domain,\n- \"host\": self._full_name(name),\n+ \"host\": name,\n \"type\": rtype,\n \"data\": content,\n }\n", "issue": "Bug in create action for glesys provider\nWhen creating an A record with the glesys provider, the full name is added instead of the host name. \r\n```\r\nlexicon_config = {\r\n \"provider_name\" : \"glesys\",\r\n \"action\": \"create\", \r\n \"domain\": \"somedomain.com\",\r\n \"type\": \"A\",\r\n \"name\": \"lexicon\",\r\n \"content\": \"1.2.3.4\",\r\n \"glesys\": {\r\n }\r\n}\r\n```\r\nResults in the A-record:\r\n`{'id': 2723410, 'type': 'A', 'name': 'lexicon.somedomain.com', 'ttl': 3600, 'content': '1.2.3.4'}`\r\n\r\nWhile the expected result is:\r\n`{'id': 2723410, 'type': 'A', 'name': 'lexicon', 'ttl': 3600, 'content': '1.2.3.4'}`\r\n\r\nThe request data sent to `domain/addrecord` :\r\n`{'domainname': 'somedomain.com', 'host': 'lexicon.somedomain.com', 'type': 'A', 'data': '1.2.3.4', 'ttl': 3600}`\r\n\r\nExpected request data to `domain/addrecord`: \r\n`{'domainname': 'somedomain.com', 'host': 'lexicon', 'type': 'A', 'data': '1.2.3.4', 'ttl': 3600}`\r\n\r\nGlesys API documentation:\r\n```\r\ndomain/addrecord\r\n\r\nUrl: https://api.glesys.com/domain/addrecord\r\n\r\nMethod: Only Https POST\r\n\r\nRequired arguments: domainname , host , type , data\r\n\r\nOptional arguments: ttl\r\n\r\nDescription: Adds a dns record to a domain\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Module provider for Glesys\"\"\"\nimport json\n\nimport requests\n\nfrom lexicon.exceptions import AuthenticationError\nfrom lexicon.providers.base import Provider as BaseProvider\n\nNAMESERVER_DOMAINS = [\"glesys.com\"]\n\n\ndef provider_parser(subparser):\n \"\"\"Generate a subparser for Glesys\"\"\"\n subparser.add_argument(\"--auth-username\", help=\"specify username (CL12345)\")\n subparser.add_argument(\"--auth-token\", help=\"specify API key\")\n\n\nclass Provider(BaseProvider):\n \"\"\"Provider class for Glesys\"\"\"\n\n def __init__(self, config):\n super(Provider, self).__init__(config)\n self.domain_id = None\n self.api_endpoint = \"https://api.glesys.com\"\n\n def _authenticate(self):\n payload = self._get(\"/domain/list\")\n domains = payload[\"response\"][\"domains\"]\n for record in domains:\n if record[\"domainname\"] == self.domain:\n # Domain records do not have any id.\n # Since domain_id cannot be None, use domain name as id instead.\n self.domain_id = record[\"domainname\"]\n break\n else:\n raise AuthenticationError(\"No domain found\")\n\n # Create record. If record already exists with the same content, do nothing.\n def _create_record(self, rtype, name, content):\n existing = self.list_records(rtype, name, content)\n if existing:\n # Already exists, do nothing.\n return True\n\n request_data = {\n \"domainname\": self.domain,\n \"host\": self._full_name(name),\n \"type\": rtype,\n \"data\": content,\n }\n self._addttl(request_data)\n\n self._post(\"/domain/addrecord\", data=request_data)\n return True\n\n # List all records. Return an empty list if no records found\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is received.\n def _list_records(self, rtype=None, name=None, content=None):\n request_data = {\"domainname\": self.domain}\n payload = self._post(\"/domain/listrecords\", data=request_data)\n\n # Convert from Glesys record structure to Lexicon structure.\n processed_records = [\n self._glesysrecord2lexiconrecord(r) for r in payload[\"response\"][\"records\"]\n ]\n\n if rtype:\n processed_records = [\n record for record in processed_records if record[\"type\"] == rtype\n ]\n if name:\n processed_records = [\n record\n for record in processed_records\n if record[\"name\"] == self._full_name(name)\n ]\n if content:\n processed_records = [\n record\n for record in processed_records\n if record[\"content\"].lower() == content.lower()\n ]\n\n return processed_records\n\n # Update a record. Identifier must be specified.\n def _update_record(self, identifier, rtype=None, name=None, content=None):\n request_data = {\"recordid\": identifier}\n if name:\n request_data[\"host\"] = name\n if rtype:\n request_data[\"type\"] = rtype\n if content:\n request_data[\"data\"] = content\n\n self._addttl(request_data)\n self._post(\"/domain/updaterecord\", data=request_data)\n return True\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n # If an identifier is specified, use it, otherwise do a lookup using type, name and content.\n def _delete_record(self, identifier=None, rtype=None, name=None, content=None):\n delete_record_id = []\n if not identifier:\n records = self._list_records(rtype, name, content)\n delete_record_id = [record[\"id\"] for record in records]\n else:\n delete_record_id.append(identifier)\n\n for record_id in delete_record_id:\n request_data = {\"recordid\": record_id}\n self._post(\"/domain/deleterecord\", data=request_data)\n\n return True\n\n # Helpers.\n def _request(self, action=\"GET\", url=\"/\", data=None, query_params=None):\n if data is None:\n data = {}\n if query_params is None:\n query_params = {}\n\n query_params[\"format\"] = \"json\"\n default_headers = {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n }\n\n credentials = (\n self._get_provider_option(\"auth_username\"),\n self._get_provider_option(\"auth_token\"),\n )\n response = requests.request(\n action,\n self.api_endpoint + url,\n params=query_params,\n data=json.dumps(data),\n headers=default_headers,\n auth=credentials,\n )\n\n # if the request fails for any reason, throw an error.\n response.raise_for_status()\n return response.json()\n\n # Adds TTL parameter if passed as argument to lexicon.\n def _addttl(self, request_data):\n if self._get_lexicon_option(\"ttl\"):\n request_data[\"ttl\"] = self._get_lexicon_option(\"ttl\")\n\n # From Glesys record structure: [u'domainname', u'recordid', u'type', u'host', u'ttl', u'data']\n def _glesysrecord2lexiconrecord(self, glesys_record):\n return {\n \"id\": glesys_record[\"recordid\"],\n \"type\": glesys_record[\"type\"],\n \"name\": glesys_record[\"host\"],\n \"ttl\": glesys_record[\"ttl\"],\n \"content\": glesys_record[\"data\"],\n }\n", "path": "lexicon/providers/glesys.py"}]} | 2,536 | 103 |
gh_patches_debug_19577 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-1182 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PubSub subscription with ack_deadline set causes HTTP 400
Page Name: pubsub-usage
Release: 0.7.1
This appears to return an API error:
```
subscription = topic.subscription('subscription_name', ack_deadline=600)
```
Here is what I am seeing:
```
...
File "/home/greg.taylor/workspace/aclima/sig-cassandra-extractor/aclima/cass_extractor/queue.py", line 42, in run
self.subscription.create()
File "/home/greg.taylor/.virtualenvs/cas-e/lib/python3.4/site-packages/gcloud/pubsub/subscription.py", line 121, in create
client.connection.api_request(method='PUT', path=self.path, data=data)
File "/home/greg.taylor/.virtualenvs/cas-e/lib/python3.4/site-packages/gcloud/connection.py", line 419, in api_request
error_info=method + ' ' + url)
gcloud.exceptions.BadRequest: 400 Invalid JSON payload received. Unknown name "ack_deadline": Cannot find field. (PUT https://pubsub.googleapis.com/v1/projects/aclima-gsa/subscriptions/cassandra_extractor)
```
If I remove the `ack_deadline` kwarg, all is well. We definitely want the ack_deadline, thoguh.
</issue>
<code>
[start of gcloud/pubsub/subscription.py]
1 # Copyright 2015 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Define API Subscriptions."""
16
17 from gcloud.exceptions import NotFound
18 from gcloud.pubsub._helpers import topic_name_from_path
19 from gcloud.pubsub.message import Message
20
21
22 class Subscription(object):
23 """Subscriptions receive messages published to their topics.
24
25 See:
26 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions
27
28 :type name: string
29 :param name: the name of the subscription
30
31 :type topic: :class:`gcloud.pubsub.topic.Topic`
32 :param topic: the topic to which the subscription belongs..
33
34 :type ack_deadline: int
35 :param ack_deadline: the deadline (in seconds) by which messages pulled
36 from the back-end must be acknowledged.
37
38 :type push_endpoint: string
39 :param push_endpoint: URL to which messages will be pushed by the back-end.
40 If not set, the application must pull messages.
41 """
42 def __init__(self, name, topic, ack_deadline=None, push_endpoint=None):
43 self.name = name
44 self.topic = topic
45 self.ack_deadline = ack_deadline
46 self.push_endpoint = push_endpoint
47
48 @classmethod
49 def from_api_repr(cls, resource, client, topics=None):
50 """Factory: construct a topic given its API representation
51
52 :type resource: dict
53 :param resource: topic resource representation returned from the API
54
55 :type client: :class:`gcloud.pubsub.client.Client`
56 :param client: Client which holds credentials and project
57 configuration for a topic.
58
59 :type topics: dict or None
60 :param topics: A mapping of topic names -> topics. If not passed,
61 the subscription will have a newly-created topic.
62
63 :rtype: :class:`gcloud.pubsub.subscription.Subscription`
64 :returns: Subscription parsed from ``resource``.
65 """
66 if topics is None:
67 topics = {}
68 topic_path = resource['topic']
69 topic = topics.get(topic_path)
70 if topic is None:
71 # NOTE: This duplicates behavior from Topic.from_api_repr to avoid
72 # an import cycle.
73 topic_name = topic_name_from_path(topic_path, client.project)
74 topic = topics[topic_path] = client.topic(topic_name)
75 _, _, _, name = resource['name'].split('/')
76 ack_deadline = resource.get('ackDeadlineSeconds')
77 push_config = resource.get('pushConfig', {})
78 push_endpoint = push_config.get('pushEndpoint')
79 return cls(name, topic, ack_deadline, push_endpoint)
80
81 @property
82 def path(self):
83 """URL path for the subscription's APIs"""
84 project = self.topic.project
85 return '/projects/%s/subscriptions/%s' % (project, self.name)
86
87 def _require_client(self, client):
88 """Check client or verify over-ride.
89
90 :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
91 :param client: the client to use. If not passed, falls back to the
92 ``client`` stored on the topic of the
93 current subscription.
94
95 :rtype: :class:`gcloud.pubsub.client.Client`
96 :returns: The client passed in or the currently bound client.
97 """
98 if client is None:
99 client = self.topic._client
100 return client
101
102 def create(self, client=None):
103 """API call: create the subscription via a PUT request
104
105 See:
106 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/create
107
108 :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
109 :param client: the client to use. If not passed, falls back to the
110 ``client`` stored on the current subscription's topic.
111 """
112 data = {'topic': self.topic.full_name}
113
114 if self.ack_deadline is not None:
115 data['ackDeadline'] = self.ack_deadline
116
117 if self.push_endpoint is not None:
118 data['pushConfig'] = {'pushEndpoint': self.push_endpoint}
119
120 client = self._require_client(client)
121 client.connection.api_request(method='PUT', path=self.path, data=data)
122
123 def exists(self, client=None):
124 """API call: test existence of the subscription via a GET request
125
126 See
127 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/get
128
129 :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
130 :param client: the client to use. If not passed, falls back to the
131 ``client`` stored on the current subscription's topic.
132 """
133 client = self._require_client(client)
134 try:
135 client.connection.api_request(method='GET', path=self.path)
136 except NotFound:
137 return False
138 else:
139 return True
140
141 def reload(self, client=None):
142 """API call: sync local subscription configuration via a GET request
143
144 See
145 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/get
146
147 :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
148 :param client: the client to use. If not passed, falls back to the
149 ``client`` stored on the current subscription's topic.
150 """
151 client = self._require_client(client)
152 data = client.connection.api_request(method='GET', path=self.path)
153 self.ack_deadline = data.get('ackDeadline')
154 push_config = data.get('pushConfig', {})
155 self.push_endpoint = push_config.get('pushEndpoint')
156
157 def modify_push_configuration(self, push_endpoint, client=None):
158 """API call: update the push endpoint for the subscription.
159
160 See:
161 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/modifyPushConfig
162
163 :type push_endpoint: string
164 :param push_endpoint: URL to which messages will be pushed by the
165 back-end. If None, the application must pull
166 messages.
167
168 :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
169 :param client: the client to use. If not passed, falls back to the
170 ``client`` stored on the current subscription's topic.
171 """
172 client = self._require_client(client)
173 data = {}
174 config = data['pushConfig'] = {}
175 if push_endpoint is not None:
176 config['pushEndpoint'] = push_endpoint
177 client.connection.api_request(
178 method='POST', path='%s:modifyPushConfig' % (self.path,),
179 data=data)
180 self.push_endpoint = push_endpoint
181
182 def pull(self, return_immediately=False, max_messages=1, client=None):
183 """API call: retrieve messages for the subscription.
184
185 See:
186 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/pull
187
188 :type return_immediately: boolean
189 :param return_immediately: if True, the back-end returns even if no
190 messages are available; if False, the API
191 call blocks until one or more messages are
192 available.
193
194 :type max_messages: int
195 :param max_messages: the maximum number of messages to return.
196
197 :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
198 :param client: the client to use. If not passed, falls back to the
199 ``client`` stored on the current subscription's topic.
200
201 :rtype: list of (ack_id, message) tuples
202 :returns: sequence of tuples: ``ack_id`` is the ID to be used in a
203 subsequent call to :meth:`acknowledge`, and ``message``
204 is an instance of :class:`gcloud.pubsub.message.Message`.
205 """
206 client = self._require_client(client)
207 data = {'returnImmediately': return_immediately,
208 'maxMessages': max_messages}
209 response = client.connection.api_request(
210 method='POST', path='%s:pull' % (self.path,), data=data)
211 return [(info['ackId'], Message.from_api_repr(info['message']))
212 for info in response.get('receivedMessages', ())]
213
214 def acknowledge(self, ack_ids, client=None):
215 """API call: acknowledge retrieved messages for the subscription.
216
217 See:
218 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/acknowledge
219
220 :type ack_ids: list of string
221 :param ack_ids: ack IDs of messages being acknowledged
222
223 :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
224 :param client: the client to use. If not passed, falls back to the
225 ``client`` stored on the current subscription's topic.
226 """
227 client = self._require_client(client)
228 data = {'ackIds': ack_ids}
229 client.connection.api_request(
230 method='POST', path='%s:acknowledge' % (self.path,), data=data)
231
232 def modify_ack_deadline(self, ack_id, ack_deadline, client=None):
233 """API call: update acknowledgement deadline for a retrieved message.
234
235 See:
236 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/acknowledge
237
238 :type ack_id: string
239 :param ack_id: ack ID of message being updated
240
241 :type ack_deadline: int
242 :param ack_deadline: new deadline for the message, in seconds
243
244 :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
245 :param client: the client to use. If not passed, falls back to the
246 ``client`` stored on the current subscription's topic.
247 """
248 client = self._require_client(client)
249 data = {'ackIds': [ack_id], 'ackDeadlineSeconds': ack_deadline}
250 client.connection.api_request(
251 method='POST', path='%s:modifyAckDeadline' % (self.path,),
252 data=data)
253
254 def delete(self, client=None):
255 """API call: delete the subscription via a DELETE request.
256
257 See:
258 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/delete
259
260 :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
261 :param client: the client to use. If not passed, falls back to the
262 ``client`` stored on the current subscription's topic.
263 """
264 client = self._require_client(client)
265 client.connection.api_request(method='DELETE', path=self.path)
266
[end of gcloud/pubsub/subscription.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gcloud/pubsub/subscription.py b/gcloud/pubsub/subscription.py
--- a/gcloud/pubsub/subscription.py
+++ b/gcloud/pubsub/subscription.py
@@ -112,7 +112,7 @@
data = {'topic': self.topic.full_name}
if self.ack_deadline is not None:
- data['ackDeadline'] = self.ack_deadline
+ data['ackDeadlineSeconds'] = self.ack_deadline
if self.push_endpoint is not None:
data['pushConfig'] = {'pushEndpoint': self.push_endpoint}
@@ -150,7 +150,7 @@
"""
client = self._require_client(client)
data = client.connection.api_request(method='GET', path=self.path)
- self.ack_deadline = data.get('ackDeadline')
+ self.ack_deadline = data.get('ackDeadlineSeconds')
push_config = data.get('pushConfig', {})
self.push_endpoint = push_config.get('pushEndpoint')
| {"golden_diff": "diff --git a/gcloud/pubsub/subscription.py b/gcloud/pubsub/subscription.py\n--- a/gcloud/pubsub/subscription.py\n+++ b/gcloud/pubsub/subscription.py\n@@ -112,7 +112,7 @@\n data = {'topic': self.topic.full_name}\n \n if self.ack_deadline is not None:\n- data['ackDeadline'] = self.ack_deadline\n+ data['ackDeadlineSeconds'] = self.ack_deadline\n \n if self.push_endpoint is not None:\n data['pushConfig'] = {'pushEndpoint': self.push_endpoint}\n@@ -150,7 +150,7 @@\n \"\"\"\n client = self._require_client(client)\n data = client.connection.api_request(method='GET', path=self.path)\n- self.ack_deadline = data.get('ackDeadline')\n+ self.ack_deadline = data.get('ackDeadlineSeconds')\n push_config = data.get('pushConfig', {})\n self.push_endpoint = push_config.get('pushEndpoint')\n", "issue": "PubSub subscription with ack_deadline set causes HTTP 400\nPage Name: pubsub-usage\nRelease: 0.7.1\n\nThis appears to return an API error:\n\n```\nsubscription = topic.subscription('subscription_name', ack_deadline=600)\n```\n\nHere is what I am seeing:\n\n```\n...\n File \"/home/greg.taylor/workspace/aclima/sig-cassandra-extractor/aclima/cass_extractor/queue.py\", line 42, in run\n self.subscription.create()\n File \"/home/greg.taylor/.virtualenvs/cas-e/lib/python3.4/site-packages/gcloud/pubsub/subscription.py\", line 121, in create\n client.connection.api_request(method='PUT', path=self.path, data=data)\n File \"/home/greg.taylor/.virtualenvs/cas-e/lib/python3.4/site-packages/gcloud/connection.py\", line 419, in api_request\n error_info=method + ' ' + url)\ngcloud.exceptions.BadRequest: 400 Invalid JSON payload received. Unknown name \"ack_deadline\": Cannot find field. (PUT https://pubsub.googleapis.com/v1/projects/aclima-gsa/subscriptions/cassandra_extractor)\n```\n\nIf I remove the `ack_deadline` kwarg, all is well. We definitely want the ack_deadline, thoguh.\n\n", "before_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Subscriptions.\"\"\"\n\nfrom gcloud.exceptions import NotFound\nfrom gcloud.pubsub._helpers import topic_name_from_path\nfrom gcloud.pubsub.message import Message\n\n\nclass Subscription(object):\n \"\"\"Subscriptions receive messages published to their topics.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions\n\n :type name: string\n :param name: the name of the subscription\n\n :type topic: :class:`gcloud.pubsub.topic.Topic`\n :param topic: the topic to which the subscription belongs..\n\n :type ack_deadline: int\n :param ack_deadline: the deadline (in seconds) by which messages pulled\n from the back-end must be acknowledged.\n\n :type push_endpoint: string\n :param push_endpoint: URL to which messages will be pushed by the back-end.\n If not set, the application must pull messages.\n \"\"\"\n def __init__(self, name, topic, ack_deadline=None, push_endpoint=None):\n self.name = name\n self.topic = topic\n self.ack_deadline = ack_deadline\n self.push_endpoint = push_endpoint\n\n @classmethod\n def from_api_repr(cls, resource, client, topics=None):\n \"\"\"Factory: construct a topic given its API representation\n\n :type resource: dict\n :param resource: topic resource representation returned from the API\n\n :type client: :class:`gcloud.pubsub.client.Client`\n :param client: Client which holds credentials and project\n configuration for a topic.\n\n :type topics: dict or None\n :param topics: A mapping of topic names -> topics. If not passed,\n the subscription will have a newly-created topic.\n\n :rtype: :class:`gcloud.pubsub.subscription.Subscription`\n :returns: Subscription parsed from ``resource``.\n \"\"\"\n if topics is None:\n topics = {}\n topic_path = resource['topic']\n topic = topics.get(topic_path)\n if topic is None:\n # NOTE: This duplicates behavior from Topic.from_api_repr to avoid\n # an import cycle.\n topic_name = topic_name_from_path(topic_path, client.project)\n topic = topics[topic_path] = client.topic(topic_name)\n _, _, _, name = resource['name'].split('/')\n ack_deadline = resource.get('ackDeadlineSeconds')\n push_config = resource.get('pushConfig', {})\n push_endpoint = push_config.get('pushEndpoint')\n return cls(name, topic, ack_deadline, push_endpoint)\n\n @property\n def path(self):\n \"\"\"URL path for the subscription's APIs\"\"\"\n project = self.topic.project\n return '/projects/%s/subscriptions/%s' % (project, self.name)\n\n def _require_client(self, client):\n \"\"\"Check client or verify over-ride.\n\n :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the topic of the\n current subscription.\n\n :rtype: :class:`gcloud.pubsub.client.Client`\n :returns: The client passed in or the currently bound client.\n \"\"\"\n if client is None:\n client = self.topic._client\n return client\n\n def create(self, client=None):\n \"\"\"API call: create the subscription via a PUT request\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/create\n\n :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current subscription's topic.\n \"\"\"\n data = {'topic': self.topic.full_name}\n\n if self.ack_deadline is not None:\n data['ackDeadline'] = self.ack_deadline\n\n if self.push_endpoint is not None:\n data['pushConfig'] = {'pushEndpoint': self.push_endpoint}\n\n client = self._require_client(client)\n client.connection.api_request(method='PUT', path=self.path, data=data)\n\n def exists(self, client=None):\n \"\"\"API call: test existence of the subscription via a GET request\n\n See\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/get\n\n :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current subscription's topic.\n \"\"\"\n client = self._require_client(client)\n try:\n client.connection.api_request(method='GET', path=self.path)\n except NotFound:\n return False\n else:\n return True\n\n def reload(self, client=None):\n \"\"\"API call: sync local subscription configuration via a GET request\n\n See\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/get\n\n :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current subscription's topic.\n \"\"\"\n client = self._require_client(client)\n data = client.connection.api_request(method='GET', path=self.path)\n self.ack_deadline = data.get('ackDeadline')\n push_config = data.get('pushConfig', {})\n self.push_endpoint = push_config.get('pushEndpoint')\n\n def modify_push_configuration(self, push_endpoint, client=None):\n \"\"\"API call: update the push endpoint for the subscription.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/modifyPushConfig\n\n :type push_endpoint: string\n :param push_endpoint: URL to which messages will be pushed by the\n back-end. If None, the application must pull\n messages.\n\n :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current subscription's topic.\n \"\"\"\n client = self._require_client(client)\n data = {}\n config = data['pushConfig'] = {}\n if push_endpoint is not None:\n config['pushEndpoint'] = push_endpoint\n client.connection.api_request(\n method='POST', path='%s:modifyPushConfig' % (self.path,),\n data=data)\n self.push_endpoint = push_endpoint\n\n def pull(self, return_immediately=False, max_messages=1, client=None):\n \"\"\"API call: retrieve messages for the subscription.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/pull\n\n :type return_immediately: boolean\n :param return_immediately: if True, the back-end returns even if no\n messages are available; if False, the API\n call blocks until one or more messages are\n available.\n\n :type max_messages: int\n :param max_messages: the maximum number of messages to return.\n\n :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current subscription's topic.\n\n :rtype: list of (ack_id, message) tuples\n :returns: sequence of tuples: ``ack_id`` is the ID to be used in a\n subsequent call to :meth:`acknowledge`, and ``message``\n is an instance of :class:`gcloud.pubsub.message.Message`.\n \"\"\"\n client = self._require_client(client)\n data = {'returnImmediately': return_immediately,\n 'maxMessages': max_messages}\n response = client.connection.api_request(\n method='POST', path='%s:pull' % (self.path,), data=data)\n return [(info['ackId'], Message.from_api_repr(info['message']))\n for info in response.get('receivedMessages', ())]\n\n def acknowledge(self, ack_ids, client=None):\n \"\"\"API call: acknowledge retrieved messages for the subscription.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/acknowledge\n\n :type ack_ids: list of string\n :param ack_ids: ack IDs of messages being acknowledged\n\n :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current subscription's topic.\n \"\"\"\n client = self._require_client(client)\n data = {'ackIds': ack_ids}\n client.connection.api_request(\n method='POST', path='%s:acknowledge' % (self.path,), data=data)\n\n def modify_ack_deadline(self, ack_id, ack_deadline, client=None):\n \"\"\"API call: update acknowledgement deadline for a retrieved message.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/acknowledge\n\n :type ack_id: string\n :param ack_id: ack ID of message being updated\n\n :type ack_deadline: int\n :param ack_deadline: new deadline for the message, in seconds\n\n :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current subscription's topic.\n \"\"\"\n client = self._require_client(client)\n data = {'ackIds': [ack_id], 'ackDeadlineSeconds': ack_deadline}\n client.connection.api_request(\n method='POST', path='%s:modifyAckDeadline' % (self.path,),\n data=data)\n\n def delete(self, client=None):\n \"\"\"API call: delete the subscription via a DELETE request.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/delete\n\n :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current subscription's topic.\n \"\"\"\n client = self._require_client(client)\n client.connection.api_request(method='DELETE', path=self.path)\n", "path": "gcloud/pubsub/subscription.py"}]} | 3,941 | 222 |
gh_patches_debug_11274 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1021 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove Python 3.5 support
Python 3.5 hit EOL September 13, 2020. Support will be removed in our next major release.
</issue>
<code>
[start of elasticapm/__init__.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 import sys
31
32 from elasticapm.base import Client
33 from elasticapm.conf import setup_logging # noqa: F401
34 from elasticapm.instrumentation.control import instrument, uninstrument # noqa: F401
35 from elasticapm.traces import ( # noqa: F401
36 capture_span,
37 get_span_id,
38 get_trace_id,
39 get_transaction_id,
40 get_trace_parent_header,
41 label,
42 set_context,
43 set_custom_context,
44 set_transaction_name,
45 set_transaction_outcome,
46 set_transaction_result,
47 set_user_context,
48 tag,
49 )
50 from elasticapm.utils.disttracing import trace_parent_from_headers, trace_parent_from_string # noqa: F401
51
52 __all__ = ("VERSION", "Client")
53
54 try:
55 try:
56 VERSION = __import__("importlib.metadata").metadata.version("elastic-apm")
57 except ImportError:
58 VERSION = __import__("pkg_resources").get_distribution("elastic-apm").version
59 except Exception:
60 VERSION = "unknown"
61
62
63 if sys.version_info >= (3, 5):
64 from elasticapm.contrib.asyncio.traces import async_capture_span # noqa: F401
65
[end of elasticapm/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/__init__.py b/elasticapm/__init__.py
--- a/elasticapm/__init__.py
+++ b/elasticapm/__init__.py
@@ -36,8 +36,8 @@
capture_span,
get_span_id,
get_trace_id,
- get_transaction_id,
get_trace_parent_header,
+ get_transaction_id,
label,
set_context,
set_custom_context,
@@ -60,5 +60,7 @@
VERSION = "unknown"
-if sys.version_info >= (3, 5):
- from elasticapm.contrib.asyncio.traces import async_capture_span # noqa: F401
+if sys.version_info <= (3, 5):
+ raise DeprecationWarning("The Elastic APM agent requires Python 3.6+")
+
+from elasticapm.contrib.asyncio.traces import async_capture_span # noqa: F401
| {"golden_diff": "diff --git a/elasticapm/__init__.py b/elasticapm/__init__.py\n--- a/elasticapm/__init__.py\n+++ b/elasticapm/__init__.py\n@@ -36,8 +36,8 @@\n capture_span,\n get_span_id,\n get_trace_id,\n- get_transaction_id,\n get_trace_parent_header,\n+ get_transaction_id,\n label,\n set_context,\n set_custom_context,\n@@ -60,5 +60,7 @@\n VERSION = \"unknown\"\n \n \n-if sys.version_info >= (3, 5):\n- from elasticapm.contrib.asyncio.traces import async_capture_span # noqa: F401\n+if sys.version_info <= (3, 5):\n+ raise DeprecationWarning(\"The Elastic APM agent requires Python 3.6+\")\n+\n+from elasticapm.contrib.asyncio.traces import async_capture_span # noqa: F401\n", "issue": "Remove Python 3.5 support\nPython 3.5 hit EOL September 13, 2020. Support will be removed in our next major release.\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nimport sys\n\nfrom elasticapm.base import Client\nfrom elasticapm.conf import setup_logging # noqa: F401\nfrom elasticapm.instrumentation.control import instrument, uninstrument # noqa: F401\nfrom elasticapm.traces import ( # noqa: F401\n capture_span,\n get_span_id,\n get_trace_id,\n get_transaction_id,\n get_trace_parent_header,\n label,\n set_context,\n set_custom_context,\n set_transaction_name,\n set_transaction_outcome,\n set_transaction_result,\n set_user_context,\n tag,\n)\nfrom elasticapm.utils.disttracing import trace_parent_from_headers, trace_parent_from_string # noqa: F401\n\n__all__ = (\"VERSION\", \"Client\")\n\ntry:\n try:\n VERSION = __import__(\"importlib.metadata\").metadata.version(\"elastic-apm\")\n except ImportError:\n VERSION = __import__(\"pkg_resources\").get_distribution(\"elastic-apm\").version\nexcept Exception:\n VERSION = \"unknown\"\n\n\nif sys.version_info >= (3, 5):\n from elasticapm.contrib.asyncio.traces import async_capture_span # noqa: F401\n", "path": "elasticapm/__init__.py"}]} | 1,298 | 211 |
gh_patches_debug_20263 | rasdani/github-patches | git_diff | learningequality__kolibri-1754 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
imports get stuck when server is restarted
* begin importing channel
* stop server
* start server
Transfer is stuck partway. It does not continue, and due to #1673 cannot be canceled:

</issue>
<code>
[start of kolibri/tasks/api.py]
1 import logging as logger
2
3 from django.apps.registry import AppRegistryNotReady
4
5 try:
6 from django.apps import apps
7
8 apps.check_apps_ready()
9 except AppRegistryNotReady:
10 import django
11
12 django.setup()
13
14 import requests
15 from django.core.management import call_command
16 from django.conf import settings
17 from django.http import Http404
18 from django.utils.translation import ugettext as _
19 from kolibri.content.models import ChannelMetadataCache
20 from kolibri.content.utils.channels import get_mounted_drives_with_channel_info
21 from kolibri.content.utils.paths import get_content_database_file_url
22 from rest_framework import serializers, viewsets
23 from rest_framework.decorators import list_route
24 from rest_framework.response import Response
25 from barbequeue.common.classes import State
26 from barbequeue.client import SimpleClient
27
28 from .permissions import IsDeviceOwnerOnly
29
30 logging = logger.getLogger(__name__)
31
32 client = SimpleClient(
33 app="kolibri", storage_path=settings.QUEUE_JOB_STORAGE_PATH)
34
35 # all tasks are marked as remote imports for nwo
36 TASKTYPE = "remoteimport"
37
38
39 class TasksViewSet(viewsets.ViewSet):
40 permission_classes = (IsDeviceOwnerOnly, )
41
42 def list(self, request):
43 jobs_response = [_job_to_response(j) for j in client.all_jobs()]
44 return Response(jobs_response)
45
46 def create(self, request):
47 # unimplemented. Call out to the task-specific APIs for now.
48 pass
49
50 def retrieve(self, request, pk=None):
51 task = _job_to_response(client.status(pk))
52 return Response(task)
53
54 def destroy(self, request, pk=None):
55 # unimplemented for now.
56 pass
57
58 @list_route(methods=['post'])
59 def startremoteimport(self, request):
60 '''Download a channel's database from the main curation server, and then
61 download its content.
62
63 '''
64
65 if "channel_id" not in request.data:
66 raise serializers.ValidationError(
67 "The 'channel_id' field is required.")
68
69 channel_id = request.data['channel_id']
70
71 # ensure the requested channel_id can be found on the central server, otherwise error
72 status = requests.head(
73 get_content_database_file_url(channel_id)).status_code
74 if status == 404:
75 raise Http404(
76 _("The requested channel does not exist on the content server")
77 )
78
79 task_id = client.schedule(
80 _networkimport, channel_id, track_progress=True)
81
82 # attempt to get the created Task, otherwise return pending status
83 resp = _job_to_response(client.status(task_id))
84
85 return Response(resp)
86
87 @list_route(methods=['post'])
88 def startlocalimport(self, request):
89 """
90 Import a channel from a local drive, and copy content to the local machine.
91 """
92 # Importing django/running setup because Windows...
93
94 if "drive_id" not in request.data:
95 raise serializers.ValidationError(
96 "The 'drive_id' field is required.")
97
98 job_id = client.schedule(
99 _localimport, request.data['drive_id'], track_progress=True)
100
101 # attempt to get the created Task, otherwise return pending status
102 resp = _job_to_response(client.status(job_id))
103
104 return Response(resp)
105
106 @list_route(methods=['post'])
107 def startlocalexport(self, request):
108 '''
109 Export a channel to a local drive, and copy content to the drive.
110
111 '''
112
113 if "drive_id" not in request.data:
114 raise serializers.ValidationError(
115 "The 'drive_id' field is required.")
116
117 job_id = client.schedule(
118 _localexport, request.data['drive_id'], track_progress=True)
119
120 # attempt to get the created Task, otherwise return pending status
121 resp = _job_to_response(client.status(job_id))
122
123 return Response(resp)
124
125 @list_route(methods=['post'])
126 def cleartask(self, request):
127 '''
128 Clears a task with its task id given in the task_id parameter.
129 '''
130
131 if 'task_id' not in request.data:
132 raise serializers.ValidationError(
133 "The 'task_id' field is required.")
134
135 client.clear(force=True)
136 return Response({})
137
138 @list_route(methods=['get'])
139 def localdrive(self, request):
140 drives = get_mounted_drives_with_channel_info()
141
142 # make sure everything is a dict, before converting to JSON
143 assert isinstance(drives, dict)
144 out = [mountdata._asdict() for mountdata in drives.values()]
145
146 return Response(out)
147
148
149 def _networkimport(channel_id, update_progress=None):
150 call_command("importchannel", "network", channel_id)
151 call_command(
152 "importcontent",
153 "network",
154 channel_id,
155 update_progress=update_progress)
156
157
158 def _localimport(drive_id, update_progress=None):
159 drives = get_mounted_drives_with_channel_info()
160 drive = drives[drive_id]
161 for channel in drive.metadata["channels"]:
162 call_command("importchannel", "local", channel["id"], drive.datafolder)
163 call_command(
164 "importcontent",
165 "local",
166 channel["id"],
167 drive.datafolder,
168 update_progress=update_progress)
169
170
171 def _localexport(drive_id, update_progress=None):
172 drives = get_mounted_drives_with_channel_info()
173 drive = drives[drive_id]
174 for channel in ChannelMetadataCache.objects.all():
175 call_command("exportchannel", channel.id, drive.datafolder)
176 call_command(
177 "exportcontent",
178 channel.id,
179 drive.datafolder,
180 update_progress=update_progress)
181
182
183 def _job_to_response(job):
184 if not job:
185 return {
186 "type": TASKTYPE,
187 "status": State.SCHEDULED,
188 "percentage": 0,
189 "progress": [],
190 "id": job.job_id,
191 }
192 else:
193 return {
194 "type": TASKTYPE,
195 "status": job.state,
196 "exception": str(job.exception),
197 "traceback": str(job.traceback),
198 "percentage": job.percentage_progress,
199 "id": job.job_id,
200 }
201
[end of kolibri/tasks/api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/tasks/api.py b/kolibri/tasks/api.py
--- a/kolibri/tasks/api.py
+++ b/kolibri/tasks/api.py
@@ -13,7 +13,6 @@
import requests
from django.core.management import call_command
-from django.conf import settings
from django.http import Http404
from django.utils.translation import ugettext as _
from kolibri.content.models import ChannelMetadataCache
@@ -29,15 +28,14 @@
logging = logger.getLogger(__name__)
-client = SimpleClient(
- app="kolibri", storage_path=settings.QUEUE_JOB_STORAGE_PATH)
+client = SimpleClient(app="kolibri")
# all tasks are marked as remote imports for nwo
TASKTYPE = "remoteimport"
class TasksViewSet(viewsets.ViewSet):
- permission_classes = (IsDeviceOwnerOnly, )
+ permission_classes = (IsDeviceOwnerOnly,)
def list(self, request):
jobs_response = [_job_to_response(j) for j in client.all_jobs()]
| {"golden_diff": "diff --git a/kolibri/tasks/api.py b/kolibri/tasks/api.py\n--- a/kolibri/tasks/api.py\n+++ b/kolibri/tasks/api.py\n@@ -13,7 +13,6 @@\n \n import requests\n from django.core.management import call_command\n-from django.conf import settings\n from django.http import Http404\n from django.utils.translation import ugettext as _\n from kolibri.content.models import ChannelMetadataCache\n@@ -29,15 +28,14 @@\n \n logging = logger.getLogger(__name__)\n \n-client = SimpleClient(\n- app=\"kolibri\", storage_path=settings.QUEUE_JOB_STORAGE_PATH)\n+client = SimpleClient(app=\"kolibri\")\n \n # all tasks are marked as remote imports for nwo\n TASKTYPE = \"remoteimport\"\n \n \n class TasksViewSet(viewsets.ViewSet):\n- permission_classes = (IsDeviceOwnerOnly, )\n+ permission_classes = (IsDeviceOwnerOnly,)\n \n def list(self, request):\n jobs_response = [_job_to_response(j) for j in client.all_jobs()]\n", "issue": "imports get stuck when server is restarted\n\r\n* begin importing channel\r\n* stop server\r\n* start server\r\n\r\nTransfer is stuck partway. It does not continue, and due to #1673 cannot be canceled:\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import logging as logger\n\nfrom django.apps.registry import AppRegistryNotReady\n\ntry:\n from django.apps import apps\n\n apps.check_apps_ready()\nexcept AppRegistryNotReady:\n import django\n\n django.setup()\n\nimport requests\nfrom django.core.management import call_command\nfrom django.conf import settings\nfrom django.http import Http404\nfrom django.utils.translation import ugettext as _\nfrom kolibri.content.models import ChannelMetadataCache\nfrom kolibri.content.utils.channels import get_mounted_drives_with_channel_info\nfrom kolibri.content.utils.paths import get_content_database_file_url\nfrom rest_framework import serializers, viewsets\nfrom rest_framework.decorators import list_route\nfrom rest_framework.response import Response\nfrom barbequeue.common.classes import State\nfrom barbequeue.client import SimpleClient\n\nfrom .permissions import IsDeviceOwnerOnly\n\nlogging = logger.getLogger(__name__)\n\nclient = SimpleClient(\n app=\"kolibri\", storage_path=settings.QUEUE_JOB_STORAGE_PATH)\n\n# all tasks are marked as remote imports for nwo\nTASKTYPE = \"remoteimport\"\n\n\nclass TasksViewSet(viewsets.ViewSet):\n permission_classes = (IsDeviceOwnerOnly, )\n\n def list(self, request):\n jobs_response = [_job_to_response(j) for j in client.all_jobs()]\n return Response(jobs_response)\n\n def create(self, request):\n # unimplemented. Call out to the task-specific APIs for now.\n pass\n\n def retrieve(self, request, pk=None):\n task = _job_to_response(client.status(pk))\n return Response(task)\n\n def destroy(self, request, pk=None):\n # unimplemented for now.\n pass\n\n @list_route(methods=['post'])\n def startremoteimport(self, request):\n '''Download a channel's database from the main curation server, and then\n download its content.\n\n '''\n\n if \"channel_id\" not in request.data:\n raise serializers.ValidationError(\n \"The 'channel_id' field is required.\")\n\n channel_id = request.data['channel_id']\n\n # ensure the requested channel_id can be found on the central server, otherwise error\n status = requests.head(\n get_content_database_file_url(channel_id)).status_code\n if status == 404:\n raise Http404(\n _(\"The requested channel does not exist on the content server\")\n )\n\n task_id = client.schedule(\n _networkimport, channel_id, track_progress=True)\n\n # attempt to get the created Task, otherwise return pending status\n resp = _job_to_response(client.status(task_id))\n\n return Response(resp)\n\n @list_route(methods=['post'])\n def startlocalimport(self, request):\n \"\"\"\n Import a channel from a local drive, and copy content to the local machine.\n \"\"\"\n # Importing django/running setup because Windows...\n\n if \"drive_id\" not in request.data:\n raise serializers.ValidationError(\n \"The 'drive_id' field is required.\")\n\n job_id = client.schedule(\n _localimport, request.data['drive_id'], track_progress=True)\n\n # attempt to get the created Task, otherwise return pending status\n resp = _job_to_response(client.status(job_id))\n\n return Response(resp)\n\n @list_route(methods=['post'])\n def startlocalexport(self, request):\n '''\n Export a channel to a local drive, and copy content to the drive.\n\n '''\n\n if \"drive_id\" not in request.data:\n raise serializers.ValidationError(\n \"The 'drive_id' field is required.\")\n\n job_id = client.schedule(\n _localexport, request.data['drive_id'], track_progress=True)\n\n # attempt to get the created Task, otherwise return pending status\n resp = _job_to_response(client.status(job_id))\n\n return Response(resp)\n\n @list_route(methods=['post'])\n def cleartask(self, request):\n '''\n Clears a task with its task id given in the task_id parameter.\n '''\n\n if 'task_id' not in request.data:\n raise serializers.ValidationError(\n \"The 'task_id' field is required.\")\n\n client.clear(force=True)\n return Response({})\n\n @list_route(methods=['get'])\n def localdrive(self, request):\n drives = get_mounted_drives_with_channel_info()\n\n # make sure everything is a dict, before converting to JSON\n assert isinstance(drives, dict)\n out = [mountdata._asdict() for mountdata in drives.values()]\n\n return Response(out)\n\n\ndef _networkimport(channel_id, update_progress=None):\n call_command(\"importchannel\", \"network\", channel_id)\n call_command(\n \"importcontent\",\n \"network\",\n channel_id,\n update_progress=update_progress)\n\n\ndef _localimport(drive_id, update_progress=None):\n drives = get_mounted_drives_with_channel_info()\n drive = drives[drive_id]\n for channel in drive.metadata[\"channels\"]:\n call_command(\"importchannel\", \"local\", channel[\"id\"], drive.datafolder)\n call_command(\n \"importcontent\",\n \"local\",\n channel[\"id\"],\n drive.datafolder,\n update_progress=update_progress)\n\n\ndef _localexport(drive_id, update_progress=None):\n drives = get_mounted_drives_with_channel_info()\n drive = drives[drive_id]\n for channel in ChannelMetadataCache.objects.all():\n call_command(\"exportchannel\", channel.id, drive.datafolder)\n call_command(\n \"exportcontent\",\n channel.id,\n drive.datafolder,\n update_progress=update_progress)\n\n\ndef _job_to_response(job):\n if not job:\n return {\n \"type\": TASKTYPE,\n \"status\": State.SCHEDULED,\n \"percentage\": 0,\n \"progress\": [],\n \"id\": job.job_id,\n }\n else:\n return {\n \"type\": TASKTYPE,\n \"status\": job.state,\n \"exception\": str(job.exception),\n \"traceback\": str(job.traceback),\n \"percentage\": job.percentage_progress,\n \"id\": job.job_id,\n }\n", "path": "kolibri/tasks/api.py"}]} | 2,447 | 225 |
gh_patches_debug_36865 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-834 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Simplify cookiecutter.hooks.find_hooks
We should rename `cookiecutter.hooks.find_hooks` to `find_hook(hook_name)` and explicitly look for the requested hook, instead of processing all the files in the hooks directory.
See https://github.com/audreyr/cookiecutter/pull/768/files/9a94484093ca23e9d55d42a53f096f67535b0b63#r68646614
</issue>
<code>
[start of cookiecutter/hooks.py]
1 # -*- coding: utf-8 -*-
2
3 """Functions for discovering and executing various cookiecutter hooks."""
4
5 import io
6 import logging
7 import os
8 import subprocess
9 import sys
10 import tempfile
11
12 from jinja2 import Template
13
14 from cookiecutter import utils
15 from .exceptions import FailedHookException
16
17 logger = logging.getLogger(__name__)
18
19
20 _HOOKS = [
21 'pre_gen_project',
22 'post_gen_project',
23 # TODO: other hooks should be listed here
24 ]
25 EXIT_SUCCESS = 0
26
27
28 def find_hooks():
29 """Return a dict of all hook scripts provided.
30
31 Must be called with the project template as the current working directory.
32 Dict's key will be the hook/script's name, without extension, while values
33 will be the absolute path to the script. Missing scripts will not be
34 included in the returned dict.
35 """
36 hooks_dir = 'hooks'
37 hooks = {}
38 logger.debug('hooks_dir is {}'.format(hooks_dir))
39
40 if not os.path.isdir(hooks_dir):
41 logger.debug('No hooks/ dir in template_dir')
42 return hooks
43
44 for f in os.listdir(hooks_dir):
45 filename = os.path.basename(f)
46 basename = os.path.splitext(filename)[0]
47
48 if basename in _HOOKS and not filename.endswith('~'):
49 hooks[basename] = os.path.abspath(os.path.join(hooks_dir, f))
50 return hooks
51
52
53 def run_script(script_path, cwd='.'):
54 """Execute a script from a working directory.
55
56 :param script_path: Absolute path to the script to run.
57 :param cwd: The directory to run the script from.
58 """
59 run_thru_shell = sys.platform.startswith('win')
60 if script_path.endswith('.py'):
61 script_command = [sys.executable, script_path]
62 else:
63 script_command = [script_path]
64
65 utils.make_executable(script_path)
66
67 proc = subprocess.Popen(
68 script_command,
69 shell=run_thru_shell,
70 cwd=cwd
71 )
72 exit_status = proc.wait()
73 if exit_status != EXIT_SUCCESS:
74 raise FailedHookException(
75 "Hook script failed (exit status: %d)" % exit_status)
76
77
78 def run_script_with_context(script_path, cwd, context):
79 """Execute a script after rendering it with Jinja.
80
81 :param script_path: Absolute path to the script to run.
82 :param cwd: The directory to run the script from.
83 :param context: Cookiecutter project template context.
84 """
85 _, extension = os.path.splitext(script_path)
86
87 contents = io.open(script_path, 'r', encoding='utf-8').read()
88
89 with tempfile.NamedTemporaryFile(
90 delete=False,
91 mode='wb',
92 suffix=extension
93 ) as temp:
94 output = Template(contents).render(**context)
95 temp.write(output.encode('utf-8'))
96
97 run_script(temp.name, cwd)
98
99
100 def run_hook(hook_name, project_dir, context):
101 """
102 Try to find and execute a hook from the specified project directory.
103
104 :param hook_name: The hook to execute.
105 :param project_dir: The directory to execute the script from.
106 :param context: Cookiecutter project context.
107 """
108 script = find_hooks().get(hook_name)
109 if script is None:
110 logger.debug('No hooks found')
111 return
112 logger.debug('Running hook {}'.format(hook_name))
113 run_script_with_context(script, project_dir, context)
114
[end of cookiecutter/hooks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py
--- a/cookiecutter/hooks.py
+++ b/cookiecutter/hooks.py
@@ -16,38 +16,53 @@
logger = logging.getLogger(__name__)
-
_HOOKS = [
'pre_gen_project',
'post_gen_project',
- # TODO: other hooks should be listed here
]
EXIT_SUCCESS = 0
-def find_hooks():
+def valid_hook(hook_file, hook_name):
+ """Determine if a hook file is valid.
+
+ :param hook_file: The hook file to consider for validity
+ :param hook_name: The hook to find
+ :return: The hook file validity
+ """
+ filename = os.path.basename(hook_file)
+ basename = os.path.splitext(filename)[0]
+
+ matching_hook = basename == hook_name
+ supported_hook = basename in _HOOKS
+ backup_file = filename.endswith('~')
+
+ return matching_hook and supported_hook and not backup_file
+
+
+def find_hook(hook_name, hooks_dir='hooks'):
"""Return a dict of all hook scripts provided.
Must be called with the project template as the current working directory.
Dict's key will be the hook/script's name, without extension, while values
will be the absolute path to the script. Missing scripts will not be
included in the returned dict.
+
+ :param hook_name: The hook to find
+ :param hooks_dir: The hook directory in the template
+ :return: The absolute path to the hook script or None
"""
- hooks_dir = 'hooks'
- hooks = {}
- logger.debug('hooks_dir is {}'.format(hooks_dir))
+ logger.debug('hooks_dir is {}'.format(os.path.abspath(hooks_dir)))
if not os.path.isdir(hooks_dir):
logger.debug('No hooks/ dir in template_dir')
- return hooks
+ return None
- for f in os.listdir(hooks_dir):
- filename = os.path.basename(f)
- basename = os.path.splitext(filename)[0]
+ for hook_file in os.listdir(hooks_dir):
+ if valid_hook(hook_file, hook_name):
+ return os.path.abspath(os.path.join(hooks_dir, hook_file))
- if basename in _HOOKS and not filename.endswith('~'):
- hooks[basename] = os.path.abspath(os.path.join(hooks_dir, f))
- return hooks
+ return None
def run_script(script_path, cwd='.'):
@@ -105,7 +120,7 @@
:param project_dir: The directory to execute the script from.
:param context: Cookiecutter project context.
"""
- script = find_hooks().get(hook_name)
+ script = find_hook(hook_name)
if script is None:
logger.debug('No hooks found')
return
| {"golden_diff": "diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py\n--- a/cookiecutter/hooks.py\n+++ b/cookiecutter/hooks.py\n@@ -16,38 +16,53 @@\n \n logger = logging.getLogger(__name__)\n \n-\n _HOOKS = [\n 'pre_gen_project',\n 'post_gen_project',\n- # TODO: other hooks should be listed here\n ]\n EXIT_SUCCESS = 0\n \n \n-def find_hooks():\n+def valid_hook(hook_file, hook_name):\n+ \"\"\"Determine if a hook file is valid.\n+\n+ :param hook_file: The hook file to consider for validity\n+ :param hook_name: The hook to find\n+ :return: The hook file validity\n+ \"\"\"\n+ filename = os.path.basename(hook_file)\n+ basename = os.path.splitext(filename)[0]\n+\n+ matching_hook = basename == hook_name\n+ supported_hook = basename in _HOOKS\n+ backup_file = filename.endswith('~')\n+\n+ return matching_hook and supported_hook and not backup_file\n+\n+\n+def find_hook(hook_name, hooks_dir='hooks'):\n \"\"\"Return a dict of all hook scripts provided.\n \n Must be called with the project template as the current working directory.\n Dict's key will be the hook/script's name, without extension, while values\n will be the absolute path to the script. Missing scripts will not be\n included in the returned dict.\n+\n+ :param hook_name: The hook to find\n+ :param hooks_dir: The hook directory in the template\n+ :return: The absolute path to the hook script or None\n \"\"\"\n- hooks_dir = 'hooks'\n- hooks = {}\n- logger.debug('hooks_dir is {}'.format(hooks_dir))\n+ logger.debug('hooks_dir is {}'.format(os.path.abspath(hooks_dir)))\n \n if not os.path.isdir(hooks_dir):\n logger.debug('No hooks/ dir in template_dir')\n- return hooks\n+ return None\n \n- for f in os.listdir(hooks_dir):\n- filename = os.path.basename(f)\n- basename = os.path.splitext(filename)[0]\n+ for hook_file in os.listdir(hooks_dir):\n+ if valid_hook(hook_file, hook_name):\n+ return os.path.abspath(os.path.join(hooks_dir, hook_file))\n \n- if basename in _HOOKS and not filename.endswith('~'):\n- hooks[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n- return hooks\n+ return None\n \n \n def run_script(script_path, cwd='.'):\n@@ -105,7 +120,7 @@\n :param project_dir: The directory to execute the script from.\n :param context: Cookiecutter project context.\n \"\"\"\n- script = find_hooks().get(hook_name)\n+ script = find_hook(hook_name)\n if script is None:\n logger.debug('No hooks found')\n return\n", "issue": "Simplify cookiecutter.hooks.find_hooks\nWe should rename `cookiecutter.hooks.find_hooks` to `find_hook(hook_name)` and explicitly look for the requested hook, instead of processing all the files in the hooks directory.\n\nSee https://github.com/audreyr/cookiecutter/pull/768/files/9a94484093ca23e9d55d42a53f096f67535b0b63#r68646614\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Functions for discovering and executing various cookiecutter hooks.\"\"\"\n\nimport io\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom jinja2 import Template\n\nfrom cookiecutter import utils\nfrom .exceptions import FailedHookException\n\nlogger = logging.getLogger(__name__)\n\n\n_HOOKS = [\n 'pre_gen_project',\n 'post_gen_project',\n # TODO: other hooks should be listed here\n]\nEXIT_SUCCESS = 0\n\n\ndef find_hooks():\n \"\"\"Return a dict of all hook scripts provided.\n\n Must be called with the project template as the current working directory.\n Dict's key will be the hook/script's name, without extension, while values\n will be the absolute path to the script. Missing scripts will not be\n included in the returned dict.\n \"\"\"\n hooks_dir = 'hooks'\n hooks = {}\n logger.debug('hooks_dir is {}'.format(hooks_dir))\n\n if not os.path.isdir(hooks_dir):\n logger.debug('No hooks/ dir in template_dir')\n return hooks\n\n for f in os.listdir(hooks_dir):\n filename = os.path.basename(f)\n basename = os.path.splitext(filename)[0]\n\n if basename in _HOOKS and not filename.endswith('~'):\n hooks[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n return hooks\n\n\ndef run_script(script_path, cwd='.'):\n \"\"\"Execute a script from a working directory.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n \"\"\"\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n proc = subprocess.Popen(\n script_command,\n shell=run_thru_shell,\n cwd=cwd\n )\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n \"Hook script failed (exit status: %d)\" % exit_status)\n\n\ndef run_script_with_context(script_path, cwd, context):\n \"\"\"Execute a script after rendering it with Jinja.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n :param context: Cookiecutter project template context.\n \"\"\"\n _, extension = os.path.splitext(script_path)\n\n contents = io.open(script_path, 'r', encoding='utf-8').read()\n\n with tempfile.NamedTemporaryFile(\n delete=False,\n mode='wb',\n suffix=extension\n ) as temp:\n output = Template(contents).render(**context)\n temp.write(output.encode('utf-8'))\n\n run_script(temp.name, cwd)\n\n\ndef run_hook(hook_name, project_dir, context):\n \"\"\"\n Try to find and execute a hook from the specified project directory.\n\n :param hook_name: The hook to execute.\n :param project_dir: The directory to execute the script from.\n :param context: Cookiecutter project context.\n \"\"\"\n script = find_hooks().get(hook_name)\n if script is None:\n logger.debug('No hooks found')\n return\n logger.debug('Running hook {}'.format(hook_name))\n run_script_with_context(script, project_dir, context)\n", "path": "cookiecutter/hooks.py"}]} | 1,630 | 640 |
gh_patches_debug_8339 | rasdani/github-patches | git_diff | google__turbinia-1012 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'message' referenced before assignment in recipe_helpers.validate_recipe
https://github.com/google/turbinia/blob/a756f4c625cf3796fc82d160f3c794c7e2039437/turbinia/lib/recipe_helpers.py#L169
</issue>
<code>
[start of turbinia/lib/recipe_helpers.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2021 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Library to contain recipe validation logic."""
16
17 import copy
18 import logging
19 import yaml
20 import os
21
22 from yaml import Loader
23 from yaml import load
24 from turbinia import config
25 from turbinia.lib.file_helpers import file_to_str
26 from turbinia.lib.file_helpers import file_to_list
27 from turbinia.task_utils import TaskLoader
28
29 log = logging.getLogger('turbinia')
30
31 #Attributes allowed on the 'globals' task recipe
32 DEFAULT_GLOBALS_RECIPE = {
33 'debug_tasks': False,
34 'jobs_allowlist': [],
35 'jobs_denylist': [],
36 'yara_rules': '',
37 'filter_patterns': [],
38 'sketch_id': None,
39 'group_id': ''
40 }
41
42 #Default recipes dict
43 DEFAULT_RECIPE = {'globals': DEFAULT_GLOBALS_RECIPE}
44
45
46 def load_recipe_from_file(recipe_file, validate=True):
47 """Load recipe from file.
48
49 Args:
50 recipe_file(str): Name of the recipe file to be read.
51
52 Returns:
53 dict: Validated and corrected recipe dictionary.
54 Empty dict if recipe is invalid.
55 """
56 if not recipe_file:
57 return copy.deepcopy(DEFAULT_RECIPE)
58 try:
59 log.info('Loading recipe file from {0:s}'.format(recipe_file))
60 with open(recipe_file, 'r') as r_file:
61 recipe_file_contents = r_file.read()
62 recipe_dict = load(recipe_file_contents, Loader=Loader)
63 if validate:
64 success, _ = validate_recipe(recipe_dict)
65 if success:
66 return recipe_dict
67 else:
68 return recipe_dict
69 except yaml.parser.ParserError as exception:
70 message = (
71 'Invalid YAML on recipe file {0:s}: {1!s}.'.format(
72 recipe_file, exception))
73 log.error(message)
74 except IOError as exception:
75 log.error(
76 'Failed to read recipe file {0:s}: {1!s}'.format(
77 recipe_file, exception))
78 return {}
79
80
81 def validate_globals_recipe(proposed_globals_recipe):
82 """Validate the 'globals' special task recipe.
83
84 Args:
85 proposed_globals_recipe(dict): globals task recipe in need of validation.
86
87 Returns:
88 Tuple(
89 bool: Whether the recipe has a valid format.
90 str: Error message if validation failed.
91 )
92 """
93 reference_globals_recipe = copy.deepcopy(DEFAULT_GLOBALS_RECIPE)
94 reference_globals_recipe.update(proposed_globals_recipe)
95
96 filter_patterns_file = proposed_globals_recipe.get(
97 'filter_patterns_file', None)
98 yara_rules_file = proposed_globals_recipe.get('yara_rules_file', None)
99 if filter_patterns_file:
100 proposed_globals_recipe['filter_patterns'] = file_to_list(
101 filter_patterns_file)
102 if yara_rules_file:
103 proposed_globals_recipe['yara_rules'] = file_to_str(yara_rules_file)
104 diff = set(proposed_globals_recipe) - set(DEFAULT_GLOBALS_RECIPE)
105 if diff:
106 message = (
107 'Invalid recipe: Unknown keys [{0:s}] found in globals recipe'.format(
108 str(diff)))
109 log.error(message)
110 return (False, message)
111
112 if (proposed_globals_recipe.get('jobs_allowlist') and
113 proposed_globals_recipe.get('jobs_denylist')):
114 message = 'Invalid recipe: Jobs cannot be in both the allow and deny lists'
115 log.error(message)
116 return (False, message)
117 return (True, '')
118
119
120 def validate_recipe(recipe_dict):
121 """Validate the 'recipe' dict supplied by the request recipe.
122
123 Args:
124 recipe_dict(dict): Turbinia recipe in need of validation
125 submitted along with the evidence.
126
127 Returns:
128 Tuple(
129 bool: Whether the recipe has a valid format.
130 str: Error message if validation failed.
131 )
132 """
133 tasks_with_recipe = []
134 #If not globals task recipe is specified create one.
135 if 'globals' not in recipe_dict:
136 recipe_dict['globals'] = copy.deepcopy(DEFAULT_RECIPE)
137 log.warning(
138 'No globals recipe specified, all recipes should include '
139 'a globals entry, the default values will be used')
140 else:
141 success, message = validate_globals_recipe(recipe_dict['globals'])
142 if not success:
143 log.error(message)
144 return (False, message)
145
146 for recipe_item, recipe_item_contents in recipe_dict.items():
147 if recipe_item in tasks_with_recipe:
148 message = (
149 'Two recipe items with the same name \"{0:s}\" have been found. '
150 'If you wish to specify several task runs of the same tool, '
151 'please include them in separate recipes.'.format(recipe_item))
152 log.error(message)
153 return (False, message)
154 if recipe_item != 'globals':
155 if 'task' not in recipe_item_contents:
156 message = (
157 'Recipe item \"{0:s}\" has no "task" key. All recipe items '
158 'must have a "task" key indicating the TurbiniaTask '
159 'to which it relates.'.format(recipe_item))
160 log.error(message)
161 return (False, message)
162 proposed_task = recipe_item_contents['task']
163
164 task_loader = TaskLoader()
165 if not task_loader.check_task_name(proposed_task):
166 log.error(
167 'Task {0:s} defined for task recipe {1:s} does not exist.'.format(
168 proposed_task, recipe_item))
169 return (False, message)
170 tasks_with_recipe.append(recipe_item)
171
172 return (True, '')
173
174
175 def get_recipe_path_from_name(recipe_name):
176 """Returns a recipe's path from a recipe name.
177
178 Args:
179 recipe_name (str): A recipe name.
180
181 Returns:
182 str: a recipe's file system path.
183 """
184 recipe_path = ''
185 if not recipe_name.endswith('.yaml'):
186 recipe_name = recipe_name + '.yaml'
187
188 if hasattr(config, 'RECIPE_FILE_DIR') and config.RECIPE_FILE_DIR:
189 recipe_path = os.path.join(config.RECIPE_FILE_DIR, recipe_name)
190 else:
191 recipe_path = os.path.realpath(__file__)
192 recipe_path = os.path.dirname(recipe_path)
193 recipe_path = os.path.join(recipe_path, 'config', 'recipes')
194 recipe_path = os.path.join(recipe_path, recipe_name)
195
196 return recipe_path
197
[end of turbinia/lib/recipe_helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/turbinia/lib/recipe_helpers.py b/turbinia/lib/recipe_helpers.py
--- a/turbinia/lib/recipe_helpers.py
+++ b/turbinia/lib/recipe_helpers.py
@@ -163,9 +163,10 @@
task_loader = TaskLoader()
if not task_loader.check_task_name(proposed_task):
- log.error(
- 'Task {0:s} defined for task recipe {1:s} does not exist.'.format(
- proposed_task, recipe_item))
+ message = (
+ 'Task {0:s} defined for task recipe {1:s} does not '
+ 'exist.'.format(proposed_task, recipe_item))
+ log.error(message)
return (False, message)
tasks_with_recipe.append(recipe_item)
| {"golden_diff": "diff --git a/turbinia/lib/recipe_helpers.py b/turbinia/lib/recipe_helpers.py\n--- a/turbinia/lib/recipe_helpers.py\n+++ b/turbinia/lib/recipe_helpers.py\n@@ -163,9 +163,10 @@\n \n task_loader = TaskLoader()\n if not task_loader.check_task_name(proposed_task):\n- log.error(\n- 'Task {0:s} defined for task recipe {1:s} does not exist.'.format(\n- proposed_task, recipe_item))\n+ message = (\n+ 'Task {0:s} defined for task recipe {1:s} does not '\n+ 'exist.'.format(proposed_task, recipe_item))\n+ log.error(message)\n return (False, message)\n tasks_with_recipe.append(recipe_item)\n", "issue": "'message' referenced before assignment in recipe_helpers.validate_recipe\nhttps://github.com/google/turbinia/blob/a756f4c625cf3796fc82d160f3c794c7e2039437/turbinia/lib/recipe_helpers.py#L169\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2021 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Library to contain recipe validation logic.\"\"\"\n\nimport copy\nimport logging\nimport yaml\nimport os\n\nfrom yaml import Loader\nfrom yaml import load\nfrom turbinia import config\nfrom turbinia.lib.file_helpers import file_to_str\nfrom turbinia.lib.file_helpers import file_to_list\nfrom turbinia.task_utils import TaskLoader\n\nlog = logging.getLogger('turbinia')\n\n#Attributes allowed on the 'globals' task recipe\nDEFAULT_GLOBALS_RECIPE = {\n 'debug_tasks': False,\n 'jobs_allowlist': [],\n 'jobs_denylist': [],\n 'yara_rules': '',\n 'filter_patterns': [],\n 'sketch_id': None,\n 'group_id': ''\n}\n\n#Default recipes dict\nDEFAULT_RECIPE = {'globals': DEFAULT_GLOBALS_RECIPE}\n\n\ndef load_recipe_from_file(recipe_file, validate=True):\n \"\"\"Load recipe from file.\n\n Args:\n recipe_file(str): Name of the recipe file to be read.\n\n Returns:\n dict: Validated and corrected recipe dictionary.\n Empty dict if recipe is invalid.\n \"\"\"\n if not recipe_file:\n return copy.deepcopy(DEFAULT_RECIPE)\n try:\n log.info('Loading recipe file from {0:s}'.format(recipe_file))\n with open(recipe_file, 'r') as r_file:\n recipe_file_contents = r_file.read()\n recipe_dict = load(recipe_file_contents, Loader=Loader)\n if validate:\n success, _ = validate_recipe(recipe_dict)\n if success:\n return recipe_dict\n else:\n return recipe_dict\n except yaml.parser.ParserError as exception:\n message = (\n 'Invalid YAML on recipe file {0:s}: {1!s}.'.format(\n recipe_file, exception))\n log.error(message)\n except IOError as exception:\n log.error(\n 'Failed to read recipe file {0:s}: {1!s}'.format(\n recipe_file, exception))\n return {}\n\n\ndef validate_globals_recipe(proposed_globals_recipe):\n \"\"\"Validate the 'globals' special task recipe.\n\n Args:\n proposed_globals_recipe(dict): globals task recipe in need of validation.\n\n Returns:\n Tuple(\n bool: Whether the recipe has a valid format.\n str: Error message if validation failed.\n )\n \"\"\"\n reference_globals_recipe = copy.deepcopy(DEFAULT_GLOBALS_RECIPE)\n reference_globals_recipe.update(proposed_globals_recipe)\n\n filter_patterns_file = proposed_globals_recipe.get(\n 'filter_patterns_file', None)\n yara_rules_file = proposed_globals_recipe.get('yara_rules_file', None)\n if filter_patterns_file:\n proposed_globals_recipe['filter_patterns'] = file_to_list(\n filter_patterns_file)\n if yara_rules_file:\n proposed_globals_recipe['yara_rules'] = file_to_str(yara_rules_file)\n diff = set(proposed_globals_recipe) - set(DEFAULT_GLOBALS_RECIPE)\n if diff:\n message = (\n 'Invalid recipe: Unknown keys [{0:s}] found in globals recipe'.format(\n str(diff)))\n log.error(message)\n return (False, message)\n\n if (proposed_globals_recipe.get('jobs_allowlist') and\n proposed_globals_recipe.get('jobs_denylist')):\n message = 'Invalid recipe: Jobs cannot be in both the allow and deny lists'\n log.error(message)\n return (False, message)\n return (True, '')\n\n\ndef validate_recipe(recipe_dict):\n \"\"\"Validate the 'recipe' dict supplied by the request recipe.\n\n Args:\n recipe_dict(dict): Turbinia recipe in need of validation\n submitted along with the evidence.\n\n Returns:\n Tuple(\n bool: Whether the recipe has a valid format.\n str: Error message if validation failed.\n )\n \"\"\"\n tasks_with_recipe = []\n #If not globals task recipe is specified create one.\n if 'globals' not in recipe_dict:\n recipe_dict['globals'] = copy.deepcopy(DEFAULT_RECIPE)\n log.warning(\n 'No globals recipe specified, all recipes should include '\n 'a globals entry, the default values will be used')\n else:\n success, message = validate_globals_recipe(recipe_dict['globals'])\n if not success:\n log.error(message)\n return (False, message)\n\n for recipe_item, recipe_item_contents in recipe_dict.items():\n if recipe_item in tasks_with_recipe:\n message = (\n 'Two recipe items with the same name \\\"{0:s}\\\" have been found. '\n 'If you wish to specify several task runs of the same tool, '\n 'please include them in separate recipes.'.format(recipe_item))\n log.error(message)\n return (False, message)\n if recipe_item != 'globals':\n if 'task' not in recipe_item_contents:\n message = (\n 'Recipe item \\\"{0:s}\\\" has no \"task\" key. All recipe items '\n 'must have a \"task\" key indicating the TurbiniaTask '\n 'to which it relates.'.format(recipe_item))\n log.error(message)\n return (False, message)\n proposed_task = recipe_item_contents['task']\n\n task_loader = TaskLoader()\n if not task_loader.check_task_name(proposed_task):\n log.error(\n 'Task {0:s} defined for task recipe {1:s} does not exist.'.format(\n proposed_task, recipe_item))\n return (False, message)\n tasks_with_recipe.append(recipe_item)\n\n return (True, '')\n\n\ndef get_recipe_path_from_name(recipe_name):\n \"\"\"Returns a recipe's path from a recipe name.\n\n Args:\n recipe_name (str): A recipe name.\n\n Returns:\n str: a recipe's file system path.\n \"\"\"\n recipe_path = ''\n if not recipe_name.endswith('.yaml'):\n recipe_name = recipe_name + '.yaml'\n\n if hasattr(config, 'RECIPE_FILE_DIR') and config.RECIPE_FILE_DIR:\n recipe_path = os.path.join(config.RECIPE_FILE_DIR, recipe_name)\n else:\n recipe_path = os.path.realpath(__file__)\n recipe_path = os.path.dirname(recipe_path)\n recipe_path = os.path.join(recipe_path, 'config', 'recipes')\n recipe_path = os.path.join(recipe_path, recipe_name)\n\n return recipe_path\n", "path": "turbinia/lib/recipe_helpers.py"}]} | 2,563 | 176 |
gh_patches_debug_22238 | rasdani/github-patches | git_diff | tensorflow__addons-2274 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Significant LazyAdam optimizer performance degradation since PR#1988
**System information**
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Reproducible on Colab
- TensorFlow version and how it was installed (source or binary): TF 2.3.0
- TensorFlow-Addons version and how it was installed (source or binary): TF 0.11.2
- Python version: 3.6.9
- Is GPU used? (yes/no): no (but issue observed on GPU as well)
**Describe the bug**
PR [#1988](https://github.com/tensorflow/addons/pull/1988/files) replaces calls to resource scatter update/sub/add from `tf.raw_ops` with calls to similar methods from ancestor class [OptimizerV2](https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/keras/optimizer_v2/optimizer_v2.py#L1149-L1157).
These differ in that the OptimizerV2 method calls `.value()` on the input resource and returns a `Tensor`, whereas the `raw_ops` method returns an `Operation`.
The result is a major performance penalty with both CPU and GPU runtimes (in fact when using a GPU I have observed GPU utilization to drop to near 0%).
**Code to reproduce the issue**
Issue reproduced in this Colab on a CPU runtime with both with the Keras API and the Estimator API:
https://colab.research.google.com/drive/1IxPrQiGQn9Wgn9MtMhVTh0rdLlgZkMYo?usp=sharing
</issue>
<code>
[start of tensorflow_addons/optimizers/lazy_adam.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Variant of the Adam optimizer that handles sparse updates more efficiently.
16
17 Compared with the original Adam optimizer, the one in this file can
18 provide a large improvement in model training throughput for some
19 applications. However, it provides slightly different semantics than the
20 original Adam algorithm, and may lead to different empirical results.
21 """
22
23 import tensorflow as tf
24 from tensorflow_addons.utils.types import FloatTensorLike
25
26 from typeguard import typechecked
27 from typing import Union, Callable
28
29
30 @tf.keras.utils.register_keras_serializable(package="Addons")
31 class LazyAdam(tf.keras.optimizers.Adam):
32 """Variant of the Adam optimizer that handles sparse updates more
33 efficiently.
34
35 The original Adam algorithm maintains two moving-average accumulators for
36 each trainable variable; the accumulators are updated at every step.
37 This class provides lazier handling of gradient updates for sparse
38 variables. It only updates moving-average accumulators for sparse variable
39 indices that appear in the current batch, rather than updating the
40 accumulators for all indices. Compared with the original Adam optimizer,
41 it can provide large improvements in model training throughput for some
42 applications. However, it provides slightly different semantics than the
43 original Adam algorithm, and may lead to different empirical results.
44
45 Note, amsgrad is currently not supported and the argument can only be
46 False.
47 """
48
49 @typechecked
50 def __init__(
51 self,
52 learning_rate: Union[FloatTensorLike, Callable] = 0.001,
53 beta_1: FloatTensorLike = 0.9,
54 beta_2: FloatTensorLike = 0.999,
55 epsilon: FloatTensorLike = 1e-7,
56 amsgrad: bool = False,
57 name: str = "LazyAdam",
58 **kwargs,
59 ):
60 """Constructs a new LazyAdam optimizer.
61
62 Args:
63 learning_rate: A `Tensor` or a floating point value. or a schedule
64 that is a `tf.keras.optimizers.schedules.LearningRateSchedule`
65 The learning rate.
66 beta_1: A `float` value or a constant `float` tensor.
67 The exponential decay rate for the 1st moment estimates.
68 beta_2: A `float` value or a constant `float` tensor.
69 The exponential decay rate for the 2nd moment estimates.
70 epsilon: A small constant for numerical stability.
71 This epsilon is "epsilon hat" in
72 [Adam: A Method for Stochastic Optimization. Kingma et al., 2014]
73 (http://arxiv.org/abs/1412.6980) (in the formula just
74 before Section 2.1), not the epsilon in Algorithm 1 of the paper.
75 amsgrad: `boolean`. Whether to apply AMSGrad variant of this
76 algorithm from the paper "On the Convergence of Adam and beyond".
77 Note that this argument is currently not supported and the
78 argument can only be `False`.
79 name: Optional name for the operations created when applying
80 gradients. Defaults to "LazyAdam".
81 **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`,
82 `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue`
83 is clip gradients by value, `decay` is included for backward
84 compatibility to allow time inverse decay of learning rate. `lr`
85 is included for backward compatibility, recommended to use
86 `learning_rate` instead.
87 """
88 super().__init__(
89 learning_rate=learning_rate,
90 beta_1=beta_1,
91 beta_2=beta_2,
92 epsilon=epsilon,
93 amsgrad=amsgrad,
94 name=name,
95 **kwargs,
96 )
97
98 def _resource_apply_sparse(self, grad, var, indices):
99 var_dtype = var.dtype.base_dtype
100 lr_t = self._decayed_lr(var_dtype)
101 beta_1_t = self._get_hyper("beta_1", var_dtype)
102 beta_2_t = self._get_hyper("beta_2", var_dtype)
103 local_step = tf.cast(self.iterations + 1, var_dtype)
104 beta_1_power = tf.math.pow(beta_1_t, local_step)
105 beta_2_power = tf.math.pow(beta_2_t, local_step)
106 epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype)
107 lr = lr_t * tf.math.sqrt(1 - beta_2_power) / (1 - beta_1_power)
108
109 # \\(m := beta1 * m + (1 - beta1) * g_t\\)
110 m = self.get_slot(var, "m")
111 m_t_slice = beta_1_t * tf.gather(m, indices) + (1 - beta_1_t) * grad
112 m_update_op = self._resource_scatter_update(m, indices, m_t_slice)
113
114 # \\(v := beta2 * v + (1 - beta2) * (g_t * g_t)\\)
115 v = self.get_slot(var, "v")
116 v_t_slice = beta_2_t * tf.gather(v, indices) + (1 - beta_2_t) * tf.math.square(
117 grad
118 )
119 v_update_op = self._resource_scatter_update(v, indices, v_t_slice)
120
121 # \\(variable += -learning_rate * m_t / (epsilon_t + sqrt(v_t))\\)
122 var_slice = -lr * m_t_slice / (tf.math.sqrt(v_t_slice) + epsilon_t)
123 var_update_op = self._resource_scatter_add(var, indices, var_slice)
124
125 return tf.group(*[var_update_op, m_update_op, v_update_op])
126
[end of tensorflow_addons/optimizers/lazy_adam.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tensorflow_addons/optimizers/lazy_adam.py b/tensorflow_addons/optimizers/lazy_adam.py
--- a/tensorflow_addons/optimizers/lazy_adam.py
+++ b/tensorflow_addons/optimizers/lazy_adam.py
@@ -119,7 +119,26 @@
v_update_op = self._resource_scatter_update(v, indices, v_t_slice)
# \\(variable += -learning_rate * m_t / (epsilon_t + sqrt(v_t))\\)
- var_slice = -lr * m_t_slice / (tf.math.sqrt(v_t_slice) + epsilon_t)
- var_update_op = self._resource_scatter_add(var, indices, var_slice)
+ var_slice = lr * m_t_slice / (tf.math.sqrt(v_t_slice) + epsilon_t)
+ var_update_op = self._resource_scatter_sub(var, indices, var_slice)
return tf.group(*[var_update_op, m_update_op, v_update_op])
+
+ def _resource_scatter_update(self, resource, indices, update):
+ return self._resource_scatter_operate(
+ resource, indices, update, tf.raw_ops.ResourceScatterUpdate
+ )
+
+ def _resource_scatter_sub(self, resource, indices, update):
+ return self._resource_scatter_operate(
+ resource, indices, update, tf.raw_ops.ResourceScatterSub
+ )
+
+ def _resource_scatter_operate(self, resource, indices, update, resource_scatter_op):
+ resource_update_kwargs = {
+ "resource": resource.handle,
+ "indices": indices,
+ "updates": update,
+ }
+
+ return resource_scatter_op(**resource_update_kwargs)
| {"golden_diff": "diff --git a/tensorflow_addons/optimizers/lazy_adam.py b/tensorflow_addons/optimizers/lazy_adam.py\n--- a/tensorflow_addons/optimizers/lazy_adam.py\n+++ b/tensorflow_addons/optimizers/lazy_adam.py\n@@ -119,7 +119,26 @@\n v_update_op = self._resource_scatter_update(v, indices, v_t_slice)\n \n # \\\\(variable += -learning_rate * m_t / (epsilon_t + sqrt(v_t))\\\\)\n- var_slice = -lr * m_t_slice / (tf.math.sqrt(v_t_slice) + epsilon_t)\n- var_update_op = self._resource_scatter_add(var, indices, var_slice)\n+ var_slice = lr * m_t_slice / (tf.math.sqrt(v_t_slice) + epsilon_t)\n+ var_update_op = self._resource_scatter_sub(var, indices, var_slice)\n \n return tf.group(*[var_update_op, m_update_op, v_update_op])\n+\n+ def _resource_scatter_update(self, resource, indices, update):\n+ return self._resource_scatter_operate(\n+ resource, indices, update, tf.raw_ops.ResourceScatterUpdate\n+ )\n+\n+ def _resource_scatter_sub(self, resource, indices, update):\n+ return self._resource_scatter_operate(\n+ resource, indices, update, tf.raw_ops.ResourceScatterSub\n+ )\n+\n+ def _resource_scatter_operate(self, resource, indices, update, resource_scatter_op):\n+ resource_update_kwargs = {\n+ \"resource\": resource.handle,\n+ \"indices\": indices,\n+ \"updates\": update,\n+ }\n+\n+ return resource_scatter_op(**resource_update_kwargs)\n", "issue": "Significant LazyAdam optimizer performance degradation since PR#1988\n**System information**\r\n- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Reproducible on Colab\r\n- TensorFlow version and how it was installed (source or binary): TF 2.3.0\r\n- TensorFlow-Addons version and how it was installed (source or binary): TF 0.11.2\r\n- Python version: 3.6.9\r\n- Is GPU used? (yes/no): no (but issue observed on GPU as well)\r\n\r\n**Describe the bug**\r\n\r\nPR [#1988](https://github.com/tensorflow/addons/pull/1988/files) replaces calls to resource scatter update/sub/add from `tf.raw_ops` with calls to similar methods from ancestor class [OptimizerV2](https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/keras/optimizer_v2/optimizer_v2.py#L1149-L1157).\r\nThese differ in that the OptimizerV2 method calls `.value()` on the input resource and returns a `Tensor`, whereas the `raw_ops` method returns an `Operation`.\r\n\r\nThe result is a major performance penalty with both CPU and GPU runtimes (in fact when using a GPU I have observed GPU utilization to drop to near 0%).\r\n\r\n**Code to reproduce the issue**\r\n\r\nIssue reproduced in this Colab on a CPU runtime with both with the Keras API and the Estimator API:\r\nhttps://colab.research.google.com/drive/1IxPrQiGQn9Wgn9MtMhVTh0rdLlgZkMYo?usp=sharing\r\n\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Variant of the Adam optimizer that handles sparse updates more efficiently.\n\nCompared with the original Adam optimizer, the one in this file can\nprovide a large improvement in model training throughput for some\napplications. However, it provides slightly different semantics than the\noriginal Adam algorithm, and may lead to different empirical results.\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow_addons.utils.types import FloatTensorLike\n\nfrom typeguard import typechecked\nfrom typing import Union, Callable\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass LazyAdam(tf.keras.optimizers.Adam):\n \"\"\"Variant of the Adam optimizer that handles sparse updates more\n efficiently.\n\n The original Adam algorithm maintains two moving-average accumulators for\n each trainable variable; the accumulators are updated at every step.\n This class provides lazier handling of gradient updates for sparse\n variables. It only updates moving-average accumulators for sparse variable\n indices that appear in the current batch, rather than updating the\n accumulators for all indices. Compared with the original Adam optimizer,\n it can provide large improvements in model training throughput for some\n applications. However, it provides slightly different semantics than the\n original Adam algorithm, and may lead to different empirical results.\n\n Note, amsgrad is currently not supported and the argument can only be\n False.\n \"\"\"\n\n @typechecked\n def __init__(\n self,\n learning_rate: Union[FloatTensorLike, Callable] = 0.001,\n beta_1: FloatTensorLike = 0.9,\n beta_2: FloatTensorLike = 0.999,\n epsilon: FloatTensorLike = 1e-7,\n amsgrad: bool = False,\n name: str = \"LazyAdam\",\n **kwargs,\n ):\n \"\"\"Constructs a new LazyAdam optimizer.\n\n Args:\n learning_rate: A `Tensor` or a floating point value. or a schedule\n that is a `tf.keras.optimizers.schedules.LearningRateSchedule`\n The learning rate.\n beta_1: A `float` value or a constant `float` tensor.\n The exponential decay rate for the 1st moment estimates.\n beta_2: A `float` value or a constant `float` tensor.\n The exponential decay rate for the 2nd moment estimates.\n epsilon: A small constant for numerical stability.\n This epsilon is \"epsilon hat\" in\n [Adam: A Method for Stochastic Optimization. Kingma et al., 2014]\n (http://arxiv.org/abs/1412.6980) (in the formula just\n before Section 2.1), not the epsilon in Algorithm 1 of the paper.\n amsgrad: `boolean`. Whether to apply AMSGrad variant of this\n algorithm from the paper \"On the Convergence of Adam and beyond\".\n Note that this argument is currently not supported and the\n argument can only be `False`.\n name: Optional name for the operations created when applying\n gradients. Defaults to \"LazyAdam\".\n **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`,\n `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue`\n is clip gradients by value, `decay` is included for backward\n compatibility to allow time inverse decay of learning rate. `lr`\n is included for backward compatibility, recommended to use\n `learning_rate` instead.\n \"\"\"\n super().__init__(\n learning_rate=learning_rate,\n beta_1=beta_1,\n beta_2=beta_2,\n epsilon=epsilon,\n amsgrad=amsgrad,\n name=name,\n **kwargs,\n )\n\n def _resource_apply_sparse(self, grad, var, indices):\n var_dtype = var.dtype.base_dtype\n lr_t = self._decayed_lr(var_dtype)\n beta_1_t = self._get_hyper(\"beta_1\", var_dtype)\n beta_2_t = self._get_hyper(\"beta_2\", var_dtype)\n local_step = tf.cast(self.iterations + 1, var_dtype)\n beta_1_power = tf.math.pow(beta_1_t, local_step)\n beta_2_power = tf.math.pow(beta_2_t, local_step)\n epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype)\n lr = lr_t * tf.math.sqrt(1 - beta_2_power) / (1 - beta_1_power)\n\n # \\\\(m := beta1 * m + (1 - beta1) * g_t\\\\)\n m = self.get_slot(var, \"m\")\n m_t_slice = beta_1_t * tf.gather(m, indices) + (1 - beta_1_t) * grad\n m_update_op = self._resource_scatter_update(m, indices, m_t_slice)\n\n # \\\\(v := beta2 * v + (1 - beta2) * (g_t * g_t)\\\\)\n v = self.get_slot(var, \"v\")\n v_t_slice = beta_2_t * tf.gather(v, indices) + (1 - beta_2_t) * tf.math.square(\n grad\n )\n v_update_op = self._resource_scatter_update(v, indices, v_t_slice)\n\n # \\\\(variable += -learning_rate * m_t / (epsilon_t + sqrt(v_t))\\\\)\n var_slice = -lr * m_t_slice / (tf.math.sqrt(v_t_slice) + epsilon_t)\n var_update_op = self._resource_scatter_add(var, indices, var_slice)\n\n return tf.group(*[var_update_op, m_update_op, v_update_op])\n", "path": "tensorflow_addons/optimizers/lazy_adam.py"}]} | 2,546 | 388 |
gh_patches_debug_26507 | rasdani/github-patches | git_diff | airctic__icevision-960 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add more logging to the pytorch lighning models.
The feature consists of two parts:
1. Add the validation loss to the progress bar by default
2. Create boolean parameter for extended progress bar logging (showing the different components of the loss)
</issue>
<code>
[start of icevision/engines/lightning/lightning_model_adapter.py]
1 __all__ = ["LightningModelAdapter"]
2
3 import pytorch_lightning as pl
4 from icevision.imports import *
5 from icevision.metrics import *
6
7
8 class LightningModelAdapter(pl.LightningModule, ABC):
9 def __init__(self, metrics: List[Metric] = None):
10 super().__init__()
11 self.metrics = metrics or []
12
13 def accumulate_metrics(self, preds):
14 for metric in self.metrics:
15 metric.accumulate(preds=preds)
16
17 def finalize_metrics(self) -> None:
18 for metric in self.metrics:
19 metric_logs = metric.finalize()
20 for k, v in metric_logs.items():
21 self.log(f"{metric.name}/{k}", v)
22
[end of icevision/engines/lightning/lightning_model_adapter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/icevision/engines/lightning/lightning_model_adapter.py b/icevision/engines/lightning/lightning_model_adapter.py
--- a/icevision/engines/lightning/lightning_model_adapter.py
+++ b/icevision/engines/lightning/lightning_model_adapter.py
@@ -6,9 +6,21 @@
class LightningModelAdapter(pl.LightningModule, ABC):
- def __init__(self, metrics: List[Metric] = None):
+ def __init__(
+ self,
+ metrics: List[Metric] = None,
+ metrics_keys_to_log_to_prog_bar: List[tuple] = None,
+ ):
+ """
+ To show a metric in the progressbar a list of tupels can be provided for metrics_keys_to_log_to_prog_bar, the first
+ entry has to be the name of the metric to log and the second entry the display name in the progressbar. By default the
+ mAP is logged to the progressbar.
+ """
super().__init__()
self.metrics = metrics or []
+ self.metrics_keys_to_log_to_prog_bar = metrics_keys_to_log_to_prog_bar or [
+ ("AP (IoU=0.50:0.95) area=all", "COCOMetric")
+ ]
def accumulate_metrics(self, preds):
for metric in self.metrics:
@@ -18,4 +30,9 @@
for metric in self.metrics:
metric_logs = metric.finalize()
for k, v in metric_logs.items():
- self.log(f"{metric.name}/{k}", v)
+ for entry in self.metrics_keys_to_log_to_prog_bar:
+ if entry[0] == k:
+ self.log(entry[1], v, prog_bar=True)
+ self.log(f"{metric.name}/{k}", v)
+ else:
+ self.log(f"{metric.name}/{k}", v)
| {"golden_diff": "diff --git a/icevision/engines/lightning/lightning_model_adapter.py b/icevision/engines/lightning/lightning_model_adapter.py\n--- a/icevision/engines/lightning/lightning_model_adapter.py\n+++ b/icevision/engines/lightning/lightning_model_adapter.py\n@@ -6,9 +6,21 @@\n \n \n class LightningModelAdapter(pl.LightningModule, ABC):\n- def __init__(self, metrics: List[Metric] = None):\n+ def __init__(\n+ self,\n+ metrics: List[Metric] = None,\n+ metrics_keys_to_log_to_prog_bar: List[tuple] = None,\n+ ):\n+ \"\"\"\n+ To show a metric in the progressbar a list of tupels can be provided for metrics_keys_to_log_to_prog_bar, the first\n+ entry has to be the name of the metric to log and the second entry the display name in the progressbar. By default the\n+ mAP is logged to the progressbar.\n+ \"\"\"\n super().__init__()\n self.metrics = metrics or []\n+ self.metrics_keys_to_log_to_prog_bar = metrics_keys_to_log_to_prog_bar or [\n+ (\"AP (IoU=0.50:0.95) area=all\", \"COCOMetric\")\n+ ]\n \n def accumulate_metrics(self, preds):\n for metric in self.metrics:\n@@ -18,4 +30,9 @@\n for metric in self.metrics:\n metric_logs = metric.finalize()\n for k, v in metric_logs.items():\n- self.log(f\"{metric.name}/{k}\", v)\n+ for entry in self.metrics_keys_to_log_to_prog_bar:\n+ if entry[0] == k:\n+ self.log(entry[1], v, prog_bar=True)\n+ self.log(f\"{metric.name}/{k}\", v)\n+ else:\n+ self.log(f\"{metric.name}/{k}\", v)\n", "issue": "Add more logging to the pytorch lighning models.\nThe feature consists of two parts:\r\n 1. Add the validation loss to the progress bar by default\r\n 2. Create boolean parameter for extended progress bar logging (showing the different components of the loss)\n", "before_files": [{"content": "__all__ = [\"LightningModelAdapter\"]\n\nimport pytorch_lightning as pl\nfrom icevision.imports import *\nfrom icevision.metrics import *\n\n\nclass LightningModelAdapter(pl.LightningModule, ABC):\n def __init__(self, metrics: List[Metric] = None):\n super().__init__()\n self.metrics = metrics or []\n\n def accumulate_metrics(self, preds):\n for metric in self.metrics:\n metric.accumulate(preds=preds)\n\n def finalize_metrics(self) -> None:\n for metric in self.metrics:\n metric_logs = metric.finalize()\n for k, v in metric_logs.items():\n self.log(f\"{metric.name}/{k}\", v)\n", "path": "icevision/engines/lightning/lightning_model_adapter.py"}]} | 785 | 417 |
gh_patches_debug_13791 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-3409 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
app_key not passed to aiohttp_jinja2
When using aiohttp_admin the app_key value for the templating module differs from the default one.
This causes an error executing:
https://github.com/DataDog/dd-trace-py/blob/ec191a4a71ae71017b70d26111bba4489e617ae5/ddtrace/contrib/aiohttp/template.py#L21
As far as I understand this would solve the problem.
`env = aiohttp_jinja2.get_env(request.app, app_key=kwargs["app_key"])`
</issue>
<code>
[start of ddtrace/contrib/aiohttp_jinja2/patch.py]
1 from ddtrace import Pin
2 from ddtrace import config
3
4 from ...ext import SpanTypes
5 from ...internal.utils import get_argument_value
6 from ..trace_utils import unwrap
7 from ..trace_utils import with_traced_module
8 from ..trace_utils import wrap
9
10
11 config._add(
12 "aiohttp_jinja2",
13 dict(),
14 )
15
16
17 @with_traced_module
18 def traced_render_template(aiohttp_jinja2, pin, func, instance, args, kwargs):
19 # original signature:
20 # render_template(template_name, request, context, *, app_key=APP_KEY, encoding='utf-8')
21 template_name = get_argument_value(args, kwargs, 0, "template_name")
22 request = get_argument_value(args, kwargs, 1, "request")
23 env = aiohttp_jinja2.get_env(request.app)
24
25 # the prefix is available only on PackageLoader
26 template_prefix = getattr(env.loader, "package_path", "")
27 template_meta = "%s/%s" % (template_prefix, template_name)
28
29 with pin.tracer.trace("aiohttp.template", span_type=SpanTypes.TEMPLATE) as span:
30 span.set_tag("aiohttp.template", template_meta)
31 return func(*args, **kwargs)
32
33
34 def _patch(aiohttp_jinja2):
35 Pin().onto(aiohttp_jinja2)
36 wrap("aiohttp_jinja2", "render_template", traced_render_template(aiohttp_jinja2))
37
38
39 def patch():
40 import aiohttp_jinja2
41
42 if getattr(aiohttp_jinja2, "_datadog_patch", False):
43 return
44
45 _patch(aiohttp_jinja2)
46
47 setattr(aiohttp_jinja2, "_datadog_patch", True)
48
49
50 def _unpatch(aiohttp_jinja2):
51 unwrap(aiohttp_jinja2, "render_template")
52
53
54 def unpatch():
55 import aiohttp_jinja2
56
57 if not getattr(aiohttp_jinja2, "_datadog_patch", False):
58 return
59
60 _unpatch(aiohttp_jinja2)
61
62 setattr(aiohttp_jinja2, "_datadog_patch", False)
63
[end of ddtrace/contrib/aiohttp_jinja2/patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/contrib/aiohttp_jinja2/patch.py b/ddtrace/contrib/aiohttp_jinja2/patch.py
--- a/ddtrace/contrib/aiohttp_jinja2/patch.py
+++ b/ddtrace/contrib/aiohttp_jinja2/patch.py
@@ -20,7 +20,10 @@
# render_template(template_name, request, context, *, app_key=APP_KEY, encoding='utf-8')
template_name = get_argument_value(args, kwargs, 0, "template_name")
request = get_argument_value(args, kwargs, 1, "request")
- env = aiohttp_jinja2.get_env(request.app)
+ get_env_kwargs = {}
+ if "app_key" in kwargs:
+ get_env_kwargs["app_key"] = kwargs["app_key"]
+ env = aiohttp_jinja2.get_env(request.app, **get_env_kwargs)
# the prefix is available only on PackageLoader
template_prefix = getattr(env.loader, "package_path", "")
| {"golden_diff": "diff --git a/ddtrace/contrib/aiohttp_jinja2/patch.py b/ddtrace/contrib/aiohttp_jinja2/patch.py\n--- a/ddtrace/contrib/aiohttp_jinja2/patch.py\n+++ b/ddtrace/contrib/aiohttp_jinja2/patch.py\n@@ -20,7 +20,10 @@\n # render_template(template_name, request, context, *, app_key=APP_KEY, encoding='utf-8')\n template_name = get_argument_value(args, kwargs, 0, \"template_name\")\n request = get_argument_value(args, kwargs, 1, \"request\")\n- env = aiohttp_jinja2.get_env(request.app)\n+ get_env_kwargs = {}\n+ if \"app_key\" in kwargs:\n+ get_env_kwargs[\"app_key\"] = kwargs[\"app_key\"]\n+ env = aiohttp_jinja2.get_env(request.app, **get_env_kwargs)\n \n # the prefix is available only on PackageLoader\n template_prefix = getattr(env.loader, \"package_path\", \"\")\n", "issue": "app_key not passed to aiohttp_jinja2 \nWhen using aiohttp_admin the app_key value for the templating module differs from the default one.\r\n\r\nThis causes an error executing:\r\nhttps://github.com/DataDog/dd-trace-py/blob/ec191a4a71ae71017b70d26111bba4489e617ae5/ddtrace/contrib/aiohttp/template.py#L21\r\n\r\nAs far as I understand this would solve the problem.\r\n`env = aiohttp_jinja2.get_env(request.app, app_key=kwargs[\"app_key\"])`\n", "before_files": [{"content": "from ddtrace import Pin\nfrom ddtrace import config\n\nfrom ...ext import SpanTypes\nfrom ...internal.utils import get_argument_value\nfrom ..trace_utils import unwrap\nfrom ..trace_utils import with_traced_module\nfrom ..trace_utils import wrap\n\n\nconfig._add(\n \"aiohttp_jinja2\",\n dict(),\n)\n\n\n@with_traced_module\ndef traced_render_template(aiohttp_jinja2, pin, func, instance, args, kwargs):\n # original signature:\n # render_template(template_name, request, context, *, app_key=APP_KEY, encoding='utf-8')\n template_name = get_argument_value(args, kwargs, 0, \"template_name\")\n request = get_argument_value(args, kwargs, 1, \"request\")\n env = aiohttp_jinja2.get_env(request.app)\n\n # the prefix is available only on PackageLoader\n template_prefix = getattr(env.loader, \"package_path\", \"\")\n template_meta = \"%s/%s\" % (template_prefix, template_name)\n\n with pin.tracer.trace(\"aiohttp.template\", span_type=SpanTypes.TEMPLATE) as span:\n span.set_tag(\"aiohttp.template\", template_meta)\n return func(*args, **kwargs)\n\n\ndef _patch(aiohttp_jinja2):\n Pin().onto(aiohttp_jinja2)\n wrap(\"aiohttp_jinja2\", \"render_template\", traced_render_template(aiohttp_jinja2))\n\n\ndef patch():\n import aiohttp_jinja2\n\n if getattr(aiohttp_jinja2, \"_datadog_patch\", False):\n return\n\n _patch(aiohttp_jinja2)\n\n setattr(aiohttp_jinja2, \"_datadog_patch\", True)\n\n\ndef _unpatch(aiohttp_jinja2):\n unwrap(aiohttp_jinja2, \"render_template\")\n\n\ndef unpatch():\n import aiohttp_jinja2\n\n if not getattr(aiohttp_jinja2, \"_datadog_patch\", False):\n return\n\n _unpatch(aiohttp_jinja2)\n\n setattr(aiohttp_jinja2, \"_datadog_patch\", False)\n", "path": "ddtrace/contrib/aiohttp_jinja2/patch.py"}]} | 1,269 | 225 |
gh_patches_debug_4173 | rasdani/github-patches | git_diff | statsmodels__statsmodels-779 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OLS residuals returned as Pandas series when endog and exog are Pandas series
When I fit OLS model with pandas series and try to do a Durbin-Watson test, the function returns nan. In that case the RegressionResult.resid attribute is a pandas series, rather than a numpy array- converting to a numpy array explicitly, the durbin_watson function works like a charm.
My instinct is this is something that should probably be changed in OLS (to guarantee the type of resid), hence the title of the issue, but I leave that to the judgement of our fearless leaders.
``` python
import statsmodels.api as sm
import numpy as np
from pandas import DataFrame
x=np.arange(1,11)
y=[num+np.random.normal() for num in np.arange(0,5, .5)]
linmod=sm.OLS(y, x).fit()
dw=sm.stats.stattools.durbin_watson(linmod.resid)
data=DataFrame({'x':x, 'y':y}, index=x)
linmod_pandas=sm.OLS(data.y, data.x).fit()
dw_pandas=sm.stats.stattools.durbin_watson(linmod_pandas.resid)
dw_pandas1=sm.stats.stattools.durbin_watson(array(linmod_pandas.resid))
print type(linmod_pandas.resid)
print dw, dw_pandas, dw_pandas1
```
</issue>
<code>
[start of statsmodels/stats/stattools.py]
1 """
2 Statistical tests to be used in conjunction with the models
3
4 Notes
5 -----
6 These functions haven't been formally tested.
7 """
8
9 from scipy import stats
10 import numpy as np
11
12
13 #TODO: these are pretty straightforward but they should be tested
14 def durbin_watson(resids):
15 """
16 Calculates the Durbin-Watson statistic
17
18 Parameters
19 -----------
20 resids : array-like
21
22 Returns
23 --------
24 Durbin Watson statistic. This is defined as
25 sum_(t=2)^(T)((e_t - e_(t-1))^(2))/sum_(t=1)^(T)e_t^(2)
26 """
27 diff_resids = np.diff(resids, 1)
28 dw = np.dot(diff_resids, diff_resids) / np.dot(resids, resids)
29 return dw
30
31 def omni_normtest(resids, axis=0):
32 """
33 Omnibus test for normality
34
35 Parameters
36 -----------
37 resid : array-like
38 axis : int, optional
39 Default is 0
40
41 Returns
42 -------
43 Chi^2 score, two-tail probability
44 """
45 #TODO: change to exception in summary branch and catch in summary()
46 #behavior changed between scipy 0.9 and 0.10
47 resids = np.asarray(resids)
48 n = resids.shape[axis]
49 if n < 8:
50 return np.nan, np.nan
51 return_shape = list(resids.shape)
52 del return_shape[axis]
53 return np.nan * np.zeros(return_shape), np.nan * np.zeros(return_shape)
54 raise ValueError(
55 "skewtest is not valid with less than 8 observations; %i samples"
56 " were given." % int(n))
57
58 return stats.normaltest(resids, axis=axis)
59
60 def jarque_bera(resids):
61 """
62 Calculate residual skewness, kurtosis, and do the JB test for normality
63
64 Parameters
65 -----------
66 resids : array-like
67
68 Returns
69 -------
70 JB, JBpv, skew, kurtosis
71
72 JB = n/6*(S^2 + (K-3)^2/4)
73
74 JBpv is the Chi^2 two-tail probability value
75
76 skew is the measure of skewness
77
78 kurtosis is the measure of kurtosis
79
80 """
81 resids = np.asarray(resids)
82 # Calculate residual skewness and kurtosis
83 skew = stats.skew(resids)
84 kurtosis = 3 + stats.kurtosis(resids)
85
86 # Calculate the Jarque-Bera test for normality
87 JB = (resids.shape[0] / 6.) * (skew**2 + (1 / 4.) * (kurtosis-3)**2)
88 JBpv = stats.chi2.sf(JB,2)
89
90 return JB, JBpv, skew, kurtosis
91
92
[end of statsmodels/stats/stattools.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/statsmodels/stats/stattools.py b/statsmodels/stats/stattools.py
--- a/statsmodels/stats/stattools.py
+++ b/statsmodels/stats/stattools.py
@@ -24,6 +24,7 @@
Durbin Watson statistic. This is defined as
sum_(t=2)^(T)((e_t - e_(t-1))^(2))/sum_(t=1)^(T)e_t^(2)
"""
+ resids=np.asarray(resids)
diff_resids = np.diff(resids, 1)
dw = np.dot(diff_resids, diff_resids) / np.dot(resids, resids)
return dw
| {"golden_diff": "diff --git a/statsmodels/stats/stattools.py b/statsmodels/stats/stattools.py\n--- a/statsmodels/stats/stattools.py\n+++ b/statsmodels/stats/stattools.py\n@@ -24,6 +24,7 @@\n Durbin Watson statistic. This is defined as\n sum_(t=2)^(T)((e_t - e_(t-1))^(2))/sum_(t=1)^(T)e_t^(2)\n \"\"\"\n+ resids=np.asarray(resids)\n diff_resids = np.diff(resids, 1)\n dw = np.dot(diff_resids, diff_resids) / np.dot(resids, resids)\n return dw\n", "issue": "OLS residuals returned as Pandas series when endog and exog are Pandas series\nWhen I fit OLS model with pandas series and try to do a Durbin-Watson test, the function returns nan. In that case the RegressionResult.resid attribute is a pandas series, rather than a numpy array- converting to a numpy array explicitly, the durbin_watson function works like a charm. \n\nMy instinct is this is something that should probably be changed in OLS (to guarantee the type of resid), hence the title of the issue, but I leave that to the judgement of our fearless leaders.\n\n``` python\nimport statsmodels.api as sm\nimport numpy as np\nfrom pandas import DataFrame\nx=np.arange(1,11)\ny=[num+np.random.normal() for num in np.arange(0,5, .5)]\nlinmod=sm.OLS(y, x).fit()\ndw=sm.stats.stattools.durbin_watson(linmod.resid)\ndata=DataFrame({'x':x, 'y':y}, index=x)\nlinmod_pandas=sm.OLS(data.y, data.x).fit()\ndw_pandas=sm.stats.stattools.durbin_watson(linmod_pandas.resid)\ndw_pandas1=sm.stats.stattools.durbin_watson(array(linmod_pandas.resid))\nprint type(linmod_pandas.resid)\nprint dw, dw_pandas, dw_pandas1\n```\n\n", "before_files": [{"content": "\"\"\"\nStatistical tests to be used in conjunction with the models\n\nNotes\n-----\nThese functions haven't been formally tested.\n\"\"\"\n\nfrom scipy import stats\nimport numpy as np\n\n\n#TODO: these are pretty straightforward but they should be tested\ndef durbin_watson(resids):\n \"\"\"\n Calculates the Durbin-Watson statistic\n\n Parameters\n -----------\n resids : array-like\n\n Returns\n --------\n Durbin Watson statistic. This is defined as\n sum_(t=2)^(T)((e_t - e_(t-1))^(2))/sum_(t=1)^(T)e_t^(2)\n \"\"\"\n diff_resids = np.diff(resids, 1)\n dw = np.dot(diff_resids, diff_resids) / np.dot(resids, resids)\n return dw\n\ndef omni_normtest(resids, axis=0):\n \"\"\"\n Omnibus test for normality\n\n Parameters\n -----------\n resid : array-like\n axis : int, optional\n Default is 0\n\n Returns\n -------\n Chi^2 score, two-tail probability\n \"\"\"\n #TODO: change to exception in summary branch and catch in summary()\n #behavior changed between scipy 0.9 and 0.10\n resids = np.asarray(resids)\n n = resids.shape[axis]\n if n < 8:\n return np.nan, np.nan\n return_shape = list(resids.shape)\n del return_shape[axis]\n return np.nan * np.zeros(return_shape), np.nan * np.zeros(return_shape)\n raise ValueError(\n \"skewtest is not valid with less than 8 observations; %i samples\"\n \" were given.\" % int(n))\n\n return stats.normaltest(resids, axis=axis)\n\ndef jarque_bera(resids):\n \"\"\"\n Calculate residual skewness, kurtosis, and do the JB test for normality\n\n Parameters\n -----------\n resids : array-like\n\n Returns\n -------\n JB, JBpv, skew, kurtosis\n\n JB = n/6*(S^2 + (K-3)^2/4)\n\n JBpv is the Chi^2 two-tail probability value\n\n skew is the measure of skewness\n\n kurtosis is the measure of kurtosis\n\n \"\"\"\n resids = np.asarray(resids)\n # Calculate residual skewness and kurtosis\n skew = stats.skew(resids)\n kurtosis = 3 + stats.kurtosis(resids)\n\n # Calculate the Jarque-Bera test for normality\n JB = (resids.shape[0] / 6.) * (skew**2 + (1 / 4.) * (kurtosis-3)**2)\n JBpv = stats.chi2.sf(JB,2)\n\n return JB, JBpv, skew, kurtosis\n\n", "path": "statsmodels/stats/stattools.py"}]} | 1,657 | 145 |
gh_patches_debug_2846 | rasdani/github-patches | git_diff | ESMCI__cime-3605 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NLCOMP fails with python3 because dictionaries no longer support `has_key`
When using python3, I get:
```
$ ./case.cmpgen_namelists
Comparing namelists with baselines 'lilac_0703a'
Generating namelists to baselines 'lilac_0703b'
Exception during namelist operations:
'dict' object has no attribute 'has_key'
Traceback (most recent call last):
File "/Users/sacks/ctsm/ctsm3/cime/scripts/Tools/../../scripts/lib/CIME/case/case_cmpgen_namelists.py", line 123, in case_cmpgen_namelists
success, output = _do_full_nl_comp(self, test_name, compare_name, baseline_root)
File "/Users/sacks/ctsm/ctsm3/cime/scripts/Tools/../../scripts/lib/CIME/case/case_cmpgen_namelists.py", line 45, in _do_full_nl_comp
success, current_comments = compare_runconfigfiles(baseline_counterpart, item, test)
File "/Users/sacks/ctsm/ctsm3/cime/scripts/Tools/../../scripts/lib/CIME/simple_compare.py", line 171, in compare_runconfigfiles
comments = findDiff(gold_dict, compare_dict, case=case)
File "/Users/sacks/ctsm/ctsm3/cime/scripts/Tools/../../scripts/lib/CIME/simple_compare.py", line 215, in findDiff
if not d2.has_key(k):
AttributeError: 'dict' object has no attribute 'has_key'
```
I have a fix incoming.
</issue>
<code>
[start of scripts/lib/CIME/simple_compare.py]
1 import os, re
2
3 from CIME.utils import expect
4
5 ###############################################################################
6 def _normalize_string_value(value, case):
7 ###############################################################################
8 """
9 Some of the strings are inherently prone to diffs, like file
10 paths, etc. This function attempts to normalize that data so that
11 it will not cause diffs.
12 """
13 # Any occurance of case must be normalized because test-ids might not match
14 if (case is not None):
15 case_re = re.compile(r'{}[.]([GC])[.]([^./\s]+)'.format(case))
16 value = case_re.sub("{}.ACTION.TESTID".format(case), value)
17
18 if ("/" in value):
19 # File path, just return the basename
20 return os.path.basename(value)
21 elif ("username" in value):
22 return ''
23 elif (".log." in value):
24 # Remove the part that's prone to diff
25 components = value.split(".")
26 return os.path.basename(".".join(components[0:-1]))
27 else:
28 return value
29
30 ###############################################################################
31 def _skip_comments_and_whitespace(lines, idx):
32 ###############################################################################
33 """
34 Starting at idx, return next valid idx of lines that contains real data
35 """
36 if (idx == len(lines)):
37 return idx
38
39 comment_re = re.compile(r'^[#!]')
40
41 lines_slice = lines[idx:]
42 for line in lines_slice:
43 line = line.strip()
44 if (comment_re.match(line) is not None or line == ""):
45 idx += 1
46 else:
47 return idx
48
49 return idx
50
51 ###############################################################################
52 def _compare_data(gold_lines, comp_lines, case, offset_method=False):
53 ###############################################################################
54 """
55 >>> teststr = '''
56 ... data1
57 ... data2 data3
58 ... data4 data5 data6
59 ...
60 ... # Comment
61 ... data7 data8 data9 data10
62 ... '''
63 >>> _compare_data(teststr.splitlines(), teststr.splitlines(), None)
64 ('', 0)
65
66 >>> teststr2 = '''
67 ... data1
68 ... data2 data30
69 ... data4 data5 data6
70 ... data7 data8 data9 data10
71 ... data00
72 ... '''
73 >>> results,_ = _compare_data(teststr.splitlines(), teststr2.splitlines(), None)
74 >>> print(results)
75 Inequivalent lines data2 data3 != data2 data30
76 NORMALIZED: data2 data3 != data2 data30
77 Found extra lines
78 data00
79 <BLANKLINE>
80 >>> teststr3 = '''
81 ... data1
82 ... data4 data5 data6
83 ... data7 data8 data9 data10
84 ... data00
85 ... '''
86 >>> results,_ = _compare_data(teststr3.splitlines(), teststr2.splitlines(), None, offset_method=True)
87 >>> print(results)
88 Inequivalent lines data4 data5 data6 != data2 data30
89 NORMALIZED: data4 data5 data6 != data2 data30
90 <BLANKLINE>
91 """
92 comments = ""
93 cnt = 0
94 gidx, cidx = 0, 0
95 gnum, cnum = len(gold_lines), len(comp_lines)
96 while (gidx < gnum or cidx < cnum):
97 gidx = _skip_comments_and_whitespace(gold_lines, gidx)
98 cidx = _skip_comments_and_whitespace(comp_lines, cidx)
99
100 if (gidx == gnum):
101 if (cidx == cnum):
102 return comments, cnt
103 else:
104 comments += "Found extra lines\n"
105 comments += "\n".join(comp_lines[cidx:]) + "\n"
106 return comments, cnt
107 elif (cidx == cnum):
108 comments += "Missing lines\n"
109 comments += "\n".join(gold_lines[gidx:1]) + "\n"
110 return comments, cnt
111
112 gold_value = gold_lines[gidx].strip()
113 gold_value = gold_value.replace('"',"'")
114 comp_value = comp_lines[cidx].strip()
115 comp_value = comp_value.replace('"',"'")
116
117 norm_gold_value = _normalize_string_value(gold_value, case)
118 norm_comp_value = _normalize_string_value(comp_value, case)
119
120 if (norm_gold_value != norm_comp_value):
121 comments += "Inequivalent lines {} != {}\n".format(gold_value, comp_value)
122 comments += " NORMALIZED: {} != {}\n".format(norm_gold_value, norm_comp_value)
123 cnt += 1
124 if offset_method and (norm_gold_value != norm_comp_value):
125 if gnum > cnum:
126 gidx += 1
127 else:
128 cidx += 1
129 else:
130 gidx += 1
131 cidx += 1
132
133 return comments, cnt
134
135 ###############################################################################
136 def compare_files(gold_file, compare_file, case=None):
137 ###############################################################################
138 """
139 Returns true if files are the same, comments are returned too:
140 (success, comments)
141 """
142 expect(os.path.exists(gold_file), "File not found: {}".format(gold_file))
143 expect(os.path.exists(compare_file), "File not found: {}".format(compare_file))
144
145 comments, cnt = _compare_data(open(gold_file, "r").readlines(),
146 open(compare_file, "r").readlines(), case)
147
148 if cnt > 0:
149 comments2, cnt2 = _compare_data(open(gold_file, "r").readlines(),
150 open(compare_file, "r").readlines(),
151 case, offset_method=True)
152 if cnt2 < cnt:
153 comments = comments2
154
155 return comments == "", comments
156
157 ###############################################################################
158 def compare_runconfigfiles(gold_file, compare_file, case=None):
159 ###############################################################################
160 """
161 Returns true if files are the same, comments are returned too:
162 (success, comments)
163 """
164 expect(os.path.exists(gold_file), "File not found: {}".format(gold_file))
165 expect(os.path.exists(compare_file), "File not found: {}".format(compare_file))
166
167 #create dictionary's of the runconfig files and compare them
168 gold_dict = _parse_runconfig(gold_file)
169 compare_dict = _parse_runconfig(compare_file)
170
171 comments = findDiff(gold_dict, compare_dict, case=case)
172 comments = comments.replace(" d1", " " + gold_file)
173 comments = comments.replace(" d2", " " + compare_file)
174 # this picks up the case that an entry in compare is not in gold
175 if comments == "":
176 comments = findDiff(compare_dict, gold_dict, case=case)
177 comments = comments.replace(" d2", " " + gold_file)
178 comments = comments.replace(" d1", " " + compare_file)
179
180 return comments == "", comments
181
182 def _parse_runconfig(filename):
183 runconfig = {}
184 inrunseq = False
185 insubsection = None
186 subsection_re = re.compile(r'\s*(\S+)::')
187 group_re = re.compile(r'\s*(\S+)\s*:\s*(\S+)')
188 var_re = re.compile(r'\s*(\S+)\s*=\s*(\S+)')
189 with open(filename, "r") as fd:
190 for line in fd:
191 # remove comments
192 line = line.split('#')[0]
193 subsection_match = subsection_re.match(line)
194 group_match = group_re.match(line)
195 var_match = var_re.match(line)
196 if re.match(r'\s*runSeq\s*::', line):
197 runconfig['runSeq'] = []
198 inrunseq = True
199 elif re.match(r'\s*::\s*', line):
200 inrunseq = False
201 elif inrunseq:
202 runconfig['runSeq'].append(line)
203 elif subsection_match:
204 insubsection = subsection_match.group(1)
205 runconfig[insubsection] = {}
206 elif group_match:
207 runconfig[group_match.group(1)] = group_match.group(2)
208 elif insubsection and var_match:
209 runconfig[insubsection][var_match.group(1)] = var_match.group(2)
210 return runconfig
211
212 def findDiff(d1, d2, path="", case=None):
213 comment = ""
214 for k in d1.keys():
215 if not d2.has_key(k):
216 comment += path + ":\n"
217 comment += k + " as key not in d2\n"
218 else:
219 if type(d1[k]) is dict:
220 if path == "":
221 path = k
222 else:
223 path = path + "->" + k
224 comment += findDiff(d1[k],d2[k], path=path, case=case)
225 else:
226 if case in d1[k]:
227 pass
228 elif "username" in k:
229 pass
230 elif "logfile" in k:
231 pass
232 elif d1[k] != d2[k]:
233 comment += path+":\n"
234 comment += " - {} : {}\n".format(k,d1[k])
235 comment += " + {} : {}\n".format(k,d2[k])
236 return comment
237
[end of scripts/lib/CIME/simple_compare.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/lib/CIME/simple_compare.py b/scripts/lib/CIME/simple_compare.py
--- a/scripts/lib/CIME/simple_compare.py
+++ b/scripts/lib/CIME/simple_compare.py
@@ -212,7 +212,7 @@
def findDiff(d1, d2, path="", case=None):
comment = ""
for k in d1.keys():
- if not d2.has_key(k):
+ if not k in d2:
comment += path + ":\n"
comment += k + " as key not in d2\n"
else:
| {"golden_diff": "diff --git a/scripts/lib/CIME/simple_compare.py b/scripts/lib/CIME/simple_compare.py\n--- a/scripts/lib/CIME/simple_compare.py\n+++ b/scripts/lib/CIME/simple_compare.py\n@@ -212,7 +212,7 @@\n def findDiff(d1, d2, path=\"\", case=None):\n comment = \"\"\n for k in d1.keys():\n- if not d2.has_key(k):\n+ if not k in d2:\n comment += path + \":\\n\"\n comment += k + \" as key not in d2\\n\"\n else:\n", "issue": "NLCOMP fails with python3 because dictionaries no longer support `has_key`\nWhen using python3, I get:\r\n\r\n```\r\n$ ./case.cmpgen_namelists\r\nComparing namelists with baselines 'lilac_0703a'\r\nGenerating namelists to baselines 'lilac_0703b'\r\nException during namelist operations:\r\n'dict' object has no attribute 'has_key'\r\nTraceback (most recent call last):\r\n File \"/Users/sacks/ctsm/ctsm3/cime/scripts/Tools/../../scripts/lib/CIME/case/case_cmpgen_namelists.py\", line 123, in case_cmpgen_namelists\r\n success, output = _do_full_nl_comp(self, test_name, compare_name, baseline_root)\r\n File \"/Users/sacks/ctsm/ctsm3/cime/scripts/Tools/../../scripts/lib/CIME/case/case_cmpgen_namelists.py\", line 45, in _do_full_nl_comp\r\n success, current_comments = compare_runconfigfiles(baseline_counterpart, item, test)\r\n File \"/Users/sacks/ctsm/ctsm3/cime/scripts/Tools/../../scripts/lib/CIME/simple_compare.py\", line 171, in compare_runconfigfiles\r\n comments = findDiff(gold_dict, compare_dict, case=case)\r\n File \"/Users/sacks/ctsm/ctsm3/cime/scripts/Tools/../../scripts/lib/CIME/simple_compare.py\", line 215, in findDiff\r\n if not d2.has_key(k):\r\nAttributeError: 'dict' object has no attribute 'has_key'\r\n```\r\n\r\nI have a fix incoming.\n", "before_files": [{"content": "import os, re\n\nfrom CIME.utils import expect\n\n###############################################################################\ndef _normalize_string_value(value, case):\n###############################################################################\n \"\"\"\n Some of the strings are inherently prone to diffs, like file\n paths, etc. This function attempts to normalize that data so that\n it will not cause diffs.\n \"\"\"\n # Any occurance of case must be normalized because test-ids might not match\n if (case is not None):\n case_re = re.compile(r'{}[.]([GC])[.]([^./\\s]+)'.format(case))\n value = case_re.sub(\"{}.ACTION.TESTID\".format(case), value)\n\n if (\"/\" in value):\n # File path, just return the basename\n return os.path.basename(value)\n elif (\"username\" in value):\n return ''\n elif (\".log.\" in value):\n # Remove the part that's prone to diff\n components = value.split(\".\")\n return os.path.basename(\".\".join(components[0:-1]))\n else:\n return value\n\n###############################################################################\ndef _skip_comments_and_whitespace(lines, idx):\n###############################################################################\n \"\"\"\n Starting at idx, return next valid idx of lines that contains real data\n \"\"\"\n if (idx == len(lines)):\n return idx\n\n comment_re = re.compile(r'^[#!]')\n\n lines_slice = lines[idx:]\n for line in lines_slice:\n line = line.strip()\n if (comment_re.match(line) is not None or line == \"\"):\n idx += 1\n else:\n return idx\n\n return idx\n\n###############################################################################\ndef _compare_data(gold_lines, comp_lines, case, offset_method=False):\n###############################################################################\n \"\"\"\n >>> teststr = '''\n ... data1\n ... data2 data3\n ... data4 data5 data6\n ...\n ... # Comment\n ... data7 data8 data9 data10\n ... '''\n >>> _compare_data(teststr.splitlines(), teststr.splitlines(), None)\n ('', 0)\n\n >>> teststr2 = '''\n ... data1\n ... data2 data30\n ... data4 data5 data6\n ... data7 data8 data9 data10\n ... data00\n ... '''\n >>> results,_ = _compare_data(teststr.splitlines(), teststr2.splitlines(), None)\n >>> print(results)\n Inequivalent lines data2 data3 != data2 data30\n NORMALIZED: data2 data3 != data2 data30\n Found extra lines\n data00\n <BLANKLINE>\n >>> teststr3 = '''\n ... data1\n ... data4 data5 data6\n ... data7 data8 data9 data10\n ... data00\n ... '''\n >>> results,_ = _compare_data(teststr3.splitlines(), teststr2.splitlines(), None, offset_method=True)\n >>> print(results)\n Inequivalent lines data4 data5 data6 != data2 data30\n NORMALIZED: data4 data5 data6 != data2 data30\n <BLANKLINE>\n \"\"\"\n comments = \"\"\n cnt = 0\n gidx, cidx = 0, 0\n gnum, cnum = len(gold_lines), len(comp_lines)\n while (gidx < gnum or cidx < cnum):\n gidx = _skip_comments_and_whitespace(gold_lines, gidx)\n cidx = _skip_comments_and_whitespace(comp_lines, cidx)\n\n if (gidx == gnum):\n if (cidx == cnum):\n return comments, cnt\n else:\n comments += \"Found extra lines\\n\"\n comments += \"\\n\".join(comp_lines[cidx:]) + \"\\n\"\n return comments, cnt\n elif (cidx == cnum):\n comments += \"Missing lines\\n\"\n comments += \"\\n\".join(gold_lines[gidx:1]) + \"\\n\"\n return comments, cnt\n\n gold_value = gold_lines[gidx].strip()\n gold_value = gold_value.replace('\"',\"'\")\n comp_value = comp_lines[cidx].strip()\n comp_value = comp_value.replace('\"',\"'\")\n\n norm_gold_value = _normalize_string_value(gold_value, case)\n norm_comp_value = _normalize_string_value(comp_value, case)\n\n if (norm_gold_value != norm_comp_value):\n comments += \"Inequivalent lines {} != {}\\n\".format(gold_value, comp_value)\n comments += \" NORMALIZED: {} != {}\\n\".format(norm_gold_value, norm_comp_value)\n cnt += 1\n if offset_method and (norm_gold_value != norm_comp_value):\n if gnum > cnum:\n gidx += 1\n else:\n cidx += 1\n else:\n gidx += 1\n cidx += 1\n\n return comments, cnt\n\n###############################################################################\ndef compare_files(gold_file, compare_file, case=None):\n###############################################################################\n \"\"\"\n Returns true if files are the same, comments are returned too:\n (success, comments)\n \"\"\"\n expect(os.path.exists(gold_file), \"File not found: {}\".format(gold_file))\n expect(os.path.exists(compare_file), \"File not found: {}\".format(compare_file))\n\n comments, cnt = _compare_data(open(gold_file, \"r\").readlines(),\n open(compare_file, \"r\").readlines(), case)\n\n if cnt > 0:\n comments2, cnt2 = _compare_data(open(gold_file, \"r\").readlines(),\n open(compare_file, \"r\").readlines(),\n case, offset_method=True)\n if cnt2 < cnt:\n comments = comments2\n\n return comments == \"\", comments\n\n###############################################################################\ndef compare_runconfigfiles(gold_file, compare_file, case=None):\n###############################################################################\n \"\"\"\n Returns true if files are the same, comments are returned too:\n (success, comments)\n \"\"\"\n expect(os.path.exists(gold_file), \"File not found: {}\".format(gold_file))\n expect(os.path.exists(compare_file), \"File not found: {}\".format(compare_file))\n\n #create dictionary's of the runconfig files and compare them\n gold_dict = _parse_runconfig(gold_file)\n compare_dict = _parse_runconfig(compare_file)\n\n comments = findDiff(gold_dict, compare_dict, case=case)\n comments = comments.replace(\" d1\", \" \" + gold_file)\n comments = comments.replace(\" d2\", \" \" + compare_file)\n # this picks up the case that an entry in compare is not in gold\n if comments == \"\":\n comments = findDiff(compare_dict, gold_dict, case=case)\n comments = comments.replace(\" d2\", \" \" + gold_file)\n comments = comments.replace(\" d1\", \" \" + compare_file)\n\n return comments == \"\", comments\n\ndef _parse_runconfig(filename):\n runconfig = {}\n inrunseq = False\n insubsection = None\n subsection_re = re.compile(r'\\s*(\\S+)::')\n group_re = re.compile(r'\\s*(\\S+)\\s*:\\s*(\\S+)')\n var_re = re.compile(r'\\s*(\\S+)\\s*=\\s*(\\S+)')\n with open(filename, \"r\") as fd:\n for line in fd:\n # remove comments\n line = line.split('#')[0]\n subsection_match = subsection_re.match(line)\n group_match = group_re.match(line)\n var_match = var_re.match(line)\n if re.match(r'\\s*runSeq\\s*::', line):\n runconfig['runSeq'] = []\n inrunseq = True\n elif re.match(r'\\s*::\\s*', line):\n inrunseq = False\n elif inrunseq:\n runconfig['runSeq'].append(line)\n elif subsection_match:\n insubsection = subsection_match.group(1)\n runconfig[insubsection] = {}\n elif group_match:\n runconfig[group_match.group(1)] = group_match.group(2)\n elif insubsection and var_match:\n runconfig[insubsection][var_match.group(1)] = var_match.group(2)\n return runconfig\n\ndef findDiff(d1, d2, path=\"\", case=None):\n comment = \"\"\n for k in d1.keys():\n if not d2.has_key(k):\n comment += path + \":\\n\"\n comment += k + \" as key not in d2\\n\"\n else:\n if type(d1[k]) is dict:\n if path == \"\":\n path = k\n else:\n path = path + \"->\" + k\n comment += findDiff(d1[k],d2[k], path=path, case=case)\n else:\n if case in d1[k]:\n pass\n elif \"username\" in k:\n pass\n elif \"logfile\" in k:\n pass\n elif d1[k] != d2[k]:\n comment += path+\":\\n\"\n comment += \" - {} : {}\\n\".format(k,d1[k])\n comment += \" + {} : {}\\n\".format(k,d2[k])\n return comment\n", "path": "scripts/lib/CIME/simple_compare.py"}]} | 3,480 | 126 |
gh_patches_debug_15028 | rasdani/github-patches | git_diff | Pyomo__pyomo-1521 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate the pyomo install-extras subcommand
The conda pyomo.extras package supports this functionality more robustly. We should not duplicate this logic in separate places.
</issue>
<code>
[start of pyomo/scripting/plugins/extras.py]
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 import six
12 from pyomo.scripting.pyomo_parser import add_subparser, CustomHelpFormatter
13
14 def get_packages():
15 packages = [
16 'sympy',
17 'xlrd',
18 'openpyxl',
19 #('suds-jurko', 'suds'),
20 ('PyYAML', 'yaml'),
21 'pypyodbc',
22 'pymysql',
23 #'openopt',
24 #'FuncDesigner',
25 #'DerApproximator',
26 ('ipython[notebook]', 'IPython'),
27 ('pyro4', 'Pyro4'),
28 ]
29 if six.PY2:
30 packages.append(('pyro','Pyro'))
31 return packages
32
33 def install_extras(args=[], quiet=False):
34 #
35 # Verify that pip is installed
36 #
37 try:
38 import pip
39 pip_version = pip.__version__.split('.')
40 for i,s in enumerate(pip_version):
41 try:
42 pip_version[i] = int(s)
43 except:
44 pass
45 pip_version = tuple(pip_version)
46 except ImportError:
47 print("You must have 'pip' installed to run this script.")
48 raise SystemExit
49
50 cmd = ['--disable-pip-version-check', 'install','--upgrade']
51 # Disable the PIP download cache
52 if pip_version[0] >= 6:
53 cmd.append('--no-cache-dir')
54 else:
55 cmd.append('--download-cache')
56 cmd.append('')
57
58 if not quiet:
59 print(' ')
60 print('-'*60)
61 print("Installation Output Logs")
62 print(" (A summary will be printed below)")
63 print('-'*60)
64 print(' ')
65
66 results = {}
67 for package in get_packages():
68 if type(package) is tuple:
69 package, pkg_import = package
70 else:
71 pkg_import = package
72 try:
73 # Allow the user to provide extra options
74 pip.main(cmd + args + [package])
75 __import__(pkg_import)
76 results[package] = True
77 except:
78 results[package] = False
79 try:
80 pip.logger.consumers = []
81 except AttributeError:
82 # old pip versions (prior to 6.0~104^2)
83 pip.log.consumers = []
84
85 if not quiet:
86 print(' ')
87 print(' ')
88 print('-'*60)
89 print("Installation Summary")
90 print('-'*60)
91 print(' ')
92 for package, result in sorted(six.iteritems(results)):
93 if result:
94 print("YES %s" % package)
95 else:
96 print("NO %s" % package)
97
98
99 def pyomo_subcommand(options):
100 return install_extras(options.args, quiet=options.quiet)
101
102
103 _parser = add_subparser(
104 'install-extras',
105 func=pyomo_subcommand,
106 help='Install "extra" packages that Pyomo can leverage.',
107 description="""
108 This pyomo subcommand uses PIP to install optional third-party Python
109 packages that Pyomo could leverage from PyPI. The installation of some
110 packages may fail, but this subcommand ignore these failures and
111 provides a summary describing which packages were installed.
112 """,
113 epilog="""
114 Since pip options begin with a dash, the --pip-args option can only be
115 used with the equals syntax. --pip-args may appear multiple times on
116 the command line. For example:\n\n
117 pyomo install-extras --pip-args="--upgrade"
118 """,
119 formatter_class=CustomHelpFormatter,
120 )
121
122 _parser.add_argument(
123 '-q', '--quiet',
124 action='store_true',
125 dest='quiet',
126 default=False,
127 help="Suppress some terminal output",
128 )
129 _parser.add_argument(
130 "--pip-args",
131 dest="args",
132 action="append",
133 help=("Arguments that are passed to the 'pip' command when "
134 "installing packages"),
135 )
136
137
[end of pyomo/scripting/plugins/extras.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyomo/scripting/plugins/extras.py b/pyomo/scripting/plugins/extras.py
--- a/pyomo/scripting/plugins/extras.py
+++ b/pyomo/scripting/plugins/extras.py
@@ -11,6 +11,8 @@
import six
from pyomo.scripting.pyomo_parser import add_subparser, CustomHelpFormatter
+from pyomo.common.deprecation import deprecated
+
def get_packages():
packages = [
'sympy',
@@ -30,6 +32,11 @@
packages.append(('pyro','Pyro'))
return packages
+@deprecated(
+ "Use of the pyomo install-extras is deprecated."
+ "The current recommended course of action is to manually install "
+ "optional dependencies as needed.",
+ version='TBD')
def install_extras(args=[], quiet=False):
#
# Verify that pip is installed
| {"golden_diff": "diff --git a/pyomo/scripting/plugins/extras.py b/pyomo/scripting/plugins/extras.py\n--- a/pyomo/scripting/plugins/extras.py\n+++ b/pyomo/scripting/plugins/extras.py\n@@ -11,6 +11,8 @@\n import six\n from pyomo.scripting.pyomo_parser import add_subparser, CustomHelpFormatter\n \n+from pyomo.common.deprecation import deprecated\n+\n def get_packages():\n packages = [\n 'sympy', \n@@ -30,6 +32,11 @@\n packages.append(('pyro','Pyro'))\n return packages\n \n+@deprecated(\n+ \"Use of the pyomo install-extras is deprecated.\"\n+ \"The current recommended course of action is to manually install \"\n+ \"optional dependencies as needed.\",\n+ version='TBD')\n def install_extras(args=[], quiet=False):\n #\n # Verify that pip is installed\n", "issue": "Deprecate the pyomo install-extras subcommand\nThe conda pyomo.extras package supports this functionality more robustly. We should not duplicate this logic in separate places.\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and \n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain \n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nimport six\nfrom pyomo.scripting.pyomo_parser import add_subparser, CustomHelpFormatter\n\ndef get_packages():\n packages = [\n 'sympy', \n 'xlrd', \n 'openpyxl', \n #('suds-jurko', 'suds'),\n ('PyYAML', 'yaml'),\n 'pypyodbc', \n 'pymysql', \n #'openopt', \n #'FuncDesigner', \n #'DerApproximator', \n ('ipython[notebook]', 'IPython'),\n ('pyro4', 'Pyro4'),\n ]\n if six.PY2:\n packages.append(('pyro','Pyro'))\n return packages\n\ndef install_extras(args=[], quiet=False):\n #\n # Verify that pip is installed\n #\n try:\n import pip\n pip_version = pip.__version__.split('.')\n for i,s in enumerate(pip_version):\n try:\n pip_version[i] = int(s)\n except:\n pass\n pip_version = tuple(pip_version)\n except ImportError:\n print(\"You must have 'pip' installed to run this script.\")\n raise SystemExit\n\n cmd = ['--disable-pip-version-check', 'install','--upgrade']\n # Disable the PIP download cache\n if pip_version[0] >= 6:\n cmd.append('--no-cache-dir')\n else:\n cmd.append('--download-cache')\n cmd.append('')\n\n if not quiet:\n print(' ')\n print('-'*60)\n print(\"Installation Output Logs\")\n print(\" (A summary will be printed below)\")\n print('-'*60)\n print(' ')\n\n results = {}\n for package in get_packages():\n if type(package) is tuple:\n package, pkg_import = package\n else:\n pkg_import = package\n try:\n # Allow the user to provide extra options\n pip.main(cmd + args + [package])\n __import__(pkg_import)\n results[package] = True\n except:\n results[package] = False\n try:\n pip.logger.consumers = []\n except AttributeError:\n # old pip versions (prior to 6.0~104^2)\n pip.log.consumers = []\n\n if not quiet:\n print(' ')\n print(' ')\n print('-'*60)\n print(\"Installation Summary\")\n print('-'*60)\n print(' ')\n for package, result in sorted(six.iteritems(results)):\n if result:\n print(\"YES %s\" % package)\n else:\n print(\"NO %s\" % package)\n\n\ndef pyomo_subcommand(options):\n return install_extras(options.args, quiet=options.quiet)\n\n\n_parser = add_subparser(\n 'install-extras',\n func=pyomo_subcommand,\n help='Install \"extra\" packages that Pyomo can leverage.',\n description=\"\"\"\nThis pyomo subcommand uses PIP to install optional third-party Python\npackages that Pyomo could leverage from PyPI. The installation of some\npackages may fail, but this subcommand ignore these failures and\nprovides a summary describing which packages were installed.\n\"\"\",\n epilog=\"\"\"\nSince pip options begin with a dash, the --pip-args option can only be\nused with the equals syntax. --pip-args may appear multiple times on\nthe command line. For example:\\n\\n\n pyomo install-extras --pip-args=\"--upgrade\"\n\"\"\",\n formatter_class=CustomHelpFormatter,\n)\n\n_parser.add_argument(\n '-q', '--quiet',\n action='store_true',\n dest='quiet',\n default=False,\n help=\"Suppress some terminal output\",\n)\n_parser.add_argument(\n \"--pip-args\",\n dest=\"args\",\n action=\"append\",\n help=(\"Arguments that are passed to the 'pip' command when \"\n \"installing packages\"),\n)\n\n", "path": "pyomo/scripting/plugins/extras.py"}]} | 1,825 | 195 |
gh_patches_debug_5294 | rasdani/github-patches | git_diff | OBOFoundry__OBOFoundry.github.io-2483 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
JSON-LD context does not expand as intended
https://purl.obolibrary.org/meta/obo_context.jsonld
We can't have trailing underscores on expansions (and have it behave as expected). Sorry, I don't make the rules
More context here:
- https://github.com/w3c/json-ld-syntax/issues/329
These would all have to be modified to be nested
```json
"RO": {
"@id": "http://purl.obolibrary.org/obo/RO_",
"@prefix": true
}
```
</issue>
<code>
[start of util/processor.py]
1 #!/usr/bin/env python3
2
3 import argparse
4 import datetime
5 import logging
6 import sys
7 import time
8 from contextlib import closing
9 from json import dumps
10
11 import requests
12 import yaml
13 from SPARQLWrapper import JSON, SPARQLWrapper
14
15 __author__ = "cjm"
16
17
18 def main():
19 parser = argparse.ArgumentParser(
20 description="Helper utils for OBO",
21 formatter_class=argparse.RawTextHelpFormatter,
22 )
23 parser.add_argument(
24 "-i", "--input", type=str, required=False, help="Input metadata file"
25 )
26 parser.add_argument(
27 "-v",
28 "--verbosity",
29 default=0,
30 action="count",
31 help="Increase output verbosity (min.: 0, max. 2)",
32 )
33 subparsers = parser.add_subparsers(dest="subcommand", help="sub-command help")
34
35 # SUBCOMMAND
36 parser_n = subparsers.add_parser("check-urls", help="Ensure PURLs resolve")
37 parser_n.set_defaults(function=check_urls)
38
39 parser_n = subparsers.add_parser(
40 "sparql-compare",
41 help="Run SPARQL commands against the db to generate a " "consistency report",
42 )
43 parser_n.set_defaults(function=sparql_compare_all)
44
45 parser_n = subparsers.add_parser("extract-context", help="Extracts JSON-LD context")
46 parser_n.set_defaults(function=extract_context)
47
48 parser_n = subparsers.add_parser(
49 "extract-contributors",
50 help="Queries github API for metadata about contributors",
51 )
52 parser_n.set_defaults(function=write_all_contributors)
53
54 args = parser.parse_args()
55 if args.verbosity >= 2:
56 logging.basicConfig(level=logging.DEBUG)
57 elif args.verbosity == 1:
58 logging.basicConfig(level=logging.INFO)
59 else:
60 logging.basicConfig(level=logging.WARNING)
61
62 with open(args.input, "r") as f:
63 obj = yaml.load(f, Loader=yaml.SafeLoader)
64 ontologies = obj["ontologies"]
65
66 func = args.function
67 func(ontologies, args)
68
69
70 def check_urls(ontologies, args):
71 """
72 Ensure PURLs resolve
73 """
74
75 def test_url(url):
76 try:
77 with closing(requests.get(url, stream=False)) as resp:
78 return resp.status_code == 200
79 except requests.exceptions.InvalidSchema as e:
80 # TODO: requests lib doesn't handle ftp. For now simply return True in that case.
81 if not format(e).startswith("No connection adapters were found for 'ftp:"):
82 raise
83 return True
84
85 failed_ids = []
86 for ont in ontologies:
87 for p in ont.get("products", []):
88 pid = p["id"]
89 if not test_url(p.get("ontology_purl")):
90 failed_ids.append(pid)
91 if len(failed_ids) > 0:
92 print("FAILURES:")
93 for pid in failed_ids:
94 print(pid, file=sys.stderr)
95 exit(1)
96
97
98 def extract_context(ontologies, args):
99 """
100 Writes to STDOUT a sorted JSON map from ontology prefixes to PURLs
101 """
102
103 def has_obo_prefix(obj):
104 return ("uri_prefix" not in obj) or (
105 obj["uri_prefix"] == "http://purl.obolibrary.org/obo/"
106 )
107
108 prefix_map = {}
109 for obj in ontologies:
110 if has_obo_prefix(obj):
111 prefix = obj.get("preferredPrefix") or obj["id"].upper()
112 prefix_map[prefix] = "http://purl.obolibrary.org/obo/" + prefix + "_"
113
114 print(
115 dumps(
116 {"@context": prefix_map}, sort_keys=True, indent=4, separators=(",", ": ")
117 )
118 )
119
120
121 def write_all_contributors(ontologies, args):
122 """
123 Query github API for all contributors to an ontology,
124 write results as json
125 """
126 results = []
127 for ont_obj in ontologies:
128 id = ont_obj["id"]
129 logging.info("Getting info for {}".format(id))
130 repo_path = get_repo_path(ont_obj)
131 if repo_path is not None:
132 contribs = list(get_ontology_contributors(repo_path))
133 print("CONTRIBS({})=={}".format(id, contribs))
134 for c in contribs:
135 print("#{}\t{}\n".format(id, c["login"]))
136 results.append(dict(id=id, contributors=contribs))
137 else:
138 logging.warn("No repo_path declared for {}".format(id))
139 print(dumps(results, sort_keys=True, indent=4, separators=(",", ": ")))
140
141
142 def get_ontology_contributors(repo_path):
143 """
144 Get individual contributors to a org/repo_path
145 repo_path is a string "org/repo"
146 """
147 url = "https://api.github.com/repos/{}/contributors".format(repo_path)
148 # TODO: allow use of oauth token;
149 # GH has a quota for non-logged in API calls
150 time.sleep(3)
151 with closing(requests.get(url, stream=False)) as resp:
152 ok = resp.status_code == 200
153 if ok:
154 results = resp.json()
155 logging.info("RESP={}".format(results))
156 return results
157 else:
158 logging.error("Failed: {}".format(url))
159 return []
160
161
162 def get_repo_path(ont_obj):
163 """
164 Extract the repository path for the given object
165 """
166 repo_path = None
167 if "repository" in ont_obj:
168 repo_path = ont_obj["repository"]
169 elif "tracker" in ont_obj:
170 tracker = ont_obj["tracker"]
171 if tracker is not None and "github" in tracker:
172 repo_path = tracker.replace("/issues", "")
173
174 if repo_path is not None:
175 repo_path = repo_path.replace("https://github.com/", "")
176 if repo_path.endswith("/"):
177 repo_path = repo_path[:-1]
178 return repo_path
179 else:
180 logging.warn("Could not get gh repo_path for ".format(ont_obj))
181 return None
182
183
184 def run_sparql(obj, p, expected_value, q):
185 """
186 Generate a SPARQL statement using query q and parameter p, and expect 'expected_value' as the
187 result. Print out a message indicating whether the there is or is not a match for the given object
188 """
189 sparql = SPARQLWrapper("http://sparql.hegroup.org/sparql")
190 sparql.setQuery(q)
191 sparql.setReturnFormat(JSON)
192 results = sparql.query().convert()
193
194 id = obj["id"]
195 got_value = False
196 is_match = False
197 vs = []
198
199 for result in results["results"]["bindings"]:
200 got_value = True
201 v = result[p]["value"]
202 vs.append(str(v))
203 if v == expected_value:
204 is_match = True
205
206 if got_value and is_match:
207 msg = "CONSISTENT"
208 elif got_value and not is_match:
209 if expected_value == "":
210 msg = "UNDECLARED_LOCAL: REMOTE:" + ",".join(vs)
211 else:
212 msg = "INCONSISTENT: REMOTE:" + ",".join(vs) + " != LOCAL:" + expected_value
213 else:
214 msg = "UNDECLARED_REMOTE"
215 print(id + " " + p + " " + msg)
216
217
218 def sparql_compare_ont(obj):
219 """
220 Some ontologies will directly declare some subset of the OBO metadata
221 directly in the ontology header. In the majority of cases we should
222 yield to the provider. However, we reserve the right to override. For
223 example, OBO may have particular guidelines about the length of the title,
224 required for coherency within the registry. All differences should be
225 discussed with the provider and an accomodation reached
226 """
227 if "ontology_purl" not in obj:
228 return
229
230 purl = obj["ontology_purl"]
231 # this could be made more declarative, or driven by the context.jsonld mapping;
232 # however, for now this is relatively simple and easy to understand:
233 run_sparql(
234 obj,
235 "license",
236 obj["license"]["url"] if "license" in obj else "",
237 "SELECT DISTINCT ?license WHERE {<"
238 + purl
239 + "> <http://purl.org/dc/elements/1.1/license> ?license}",
240 )
241 run_sparql(
242 obj,
243 "title",
244 obj["title"] if "title" in obj else "",
245 "SELECT DISTINCT ?title WHERE {<"
246 + purl
247 + "> <http://purl.org/dc/elements/1.1/title> ?title}",
248 )
249 run_sparql(
250 obj,
251 "description",
252 obj["description"] if "description" in obj else "",
253 "SELECT DISTINCT ?description WHERE {<"
254 + purl
255 + "> <http://purl.org/dc/elements/1.1/description> ?description}",
256 )
257 run_sparql(
258 obj,
259 "homepage",
260 obj["homepage"] if "homepage" in obj else "",
261 "SELECT DISTINCT ?homepage WHERE {<"
262 + purl
263 + "> <http://xmlns.com/foaf/0.1/homepage> ?homepage}",
264 )
265
266
267 def sparql_compare_all(ontologies, args):
268 """
269 Run sparql_compare_ont() on all the given ontologies.
270 """
271 # The `args` parameter is not used here but it is convenient to have it in our definition, since
272 # whether this function or one of the other main `subcommands` of this script is called is
273 # determine dynamically, and we want all of the subcommands to have a consistent signature.
274 for obj in ontologies:
275 sparql_compare_ont(obj)
276
277
278 if __name__ == "__main__":
279 main()
280
[end of util/processor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/util/processor.py b/util/processor.py
--- a/util/processor.py
+++ b/util/processor.py
@@ -109,7 +109,10 @@
for obj in ontologies:
if has_obo_prefix(obj):
prefix = obj.get("preferredPrefix") or obj["id"].upper()
- prefix_map[prefix] = "http://purl.obolibrary.org/obo/" + prefix + "_"
+ prefix_map[prefix] = {
+ "@id": "http://purl.obolibrary.org/obo/" + prefix + "_",
+ "@prefix": True,
+ }
print(
dumps(
| {"golden_diff": "diff --git a/util/processor.py b/util/processor.py\n--- a/util/processor.py\n+++ b/util/processor.py\n@@ -109,7 +109,10 @@\n for obj in ontologies:\n if has_obo_prefix(obj):\n prefix = obj.get(\"preferredPrefix\") or obj[\"id\"].upper()\n- prefix_map[prefix] = \"http://purl.obolibrary.org/obo/\" + prefix + \"_\"\n+ prefix_map[prefix] = {\n+ \"@id\": \"http://purl.obolibrary.org/obo/\" + prefix + \"_\",\n+ \"@prefix\": True,\n+ }\n \n print(\n dumps(\n", "issue": "JSON-LD context does not expand as intended\nhttps://purl.obolibrary.org/meta/obo_context.jsonld\r\n\r\nWe can't have trailing underscores on expansions (and have it behave as expected). Sorry, I don't make the rules\r\n\r\nMore context here:\r\n\r\n- https://github.com/w3c/json-ld-syntax/issues/329\r\n\r\nThese would all have to be modified to be nested\r\n\r\n```json\r\n \"RO\": {\r\n \"@id\": \"http://purl.obolibrary.org/obo/RO_\",\r\n \"@prefix\": true\r\n }\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport datetime\nimport logging\nimport sys\nimport time\nfrom contextlib import closing\nfrom json import dumps\n\nimport requests\nimport yaml\nfrom SPARQLWrapper import JSON, SPARQLWrapper\n\n__author__ = \"cjm\"\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Helper utils for OBO\",\n formatter_class=argparse.RawTextHelpFormatter,\n )\n parser.add_argument(\n \"-i\", \"--input\", type=str, required=False, help=\"Input metadata file\"\n )\n parser.add_argument(\n \"-v\",\n \"--verbosity\",\n default=0,\n action=\"count\",\n help=\"Increase output verbosity (min.: 0, max. 2)\",\n )\n subparsers = parser.add_subparsers(dest=\"subcommand\", help=\"sub-command help\")\n\n # SUBCOMMAND\n parser_n = subparsers.add_parser(\"check-urls\", help=\"Ensure PURLs resolve\")\n parser_n.set_defaults(function=check_urls)\n\n parser_n = subparsers.add_parser(\n \"sparql-compare\",\n help=\"Run SPARQL commands against the db to generate a \" \"consistency report\",\n )\n parser_n.set_defaults(function=sparql_compare_all)\n\n parser_n = subparsers.add_parser(\"extract-context\", help=\"Extracts JSON-LD context\")\n parser_n.set_defaults(function=extract_context)\n\n parser_n = subparsers.add_parser(\n \"extract-contributors\",\n help=\"Queries github API for metadata about contributors\",\n )\n parser_n.set_defaults(function=write_all_contributors)\n\n args = parser.parse_args()\n if args.verbosity >= 2:\n logging.basicConfig(level=logging.DEBUG)\n elif args.verbosity == 1:\n logging.basicConfig(level=logging.INFO)\n else:\n logging.basicConfig(level=logging.WARNING)\n\n with open(args.input, \"r\") as f:\n obj = yaml.load(f, Loader=yaml.SafeLoader)\n ontologies = obj[\"ontologies\"]\n\n func = args.function\n func(ontologies, args)\n\n\ndef check_urls(ontologies, args):\n \"\"\"\n Ensure PURLs resolve\n \"\"\"\n\n def test_url(url):\n try:\n with closing(requests.get(url, stream=False)) as resp:\n return resp.status_code == 200\n except requests.exceptions.InvalidSchema as e:\n # TODO: requests lib doesn't handle ftp. For now simply return True in that case.\n if not format(e).startswith(\"No connection adapters were found for 'ftp:\"):\n raise\n return True\n\n failed_ids = []\n for ont in ontologies:\n for p in ont.get(\"products\", []):\n pid = p[\"id\"]\n if not test_url(p.get(\"ontology_purl\")):\n failed_ids.append(pid)\n if len(failed_ids) > 0:\n print(\"FAILURES:\")\n for pid in failed_ids:\n print(pid, file=sys.stderr)\n exit(1)\n\n\ndef extract_context(ontologies, args):\n \"\"\"\n Writes to STDOUT a sorted JSON map from ontology prefixes to PURLs\n \"\"\"\n\n def has_obo_prefix(obj):\n return (\"uri_prefix\" not in obj) or (\n obj[\"uri_prefix\"] == \"http://purl.obolibrary.org/obo/\"\n )\n\n prefix_map = {}\n for obj in ontologies:\n if has_obo_prefix(obj):\n prefix = obj.get(\"preferredPrefix\") or obj[\"id\"].upper()\n prefix_map[prefix] = \"http://purl.obolibrary.org/obo/\" + prefix + \"_\"\n\n print(\n dumps(\n {\"@context\": prefix_map}, sort_keys=True, indent=4, separators=(\",\", \": \")\n )\n )\n\n\ndef write_all_contributors(ontologies, args):\n \"\"\"\n Query github API for all contributors to an ontology,\n write results as json\n \"\"\"\n results = []\n for ont_obj in ontologies:\n id = ont_obj[\"id\"]\n logging.info(\"Getting info for {}\".format(id))\n repo_path = get_repo_path(ont_obj)\n if repo_path is not None:\n contribs = list(get_ontology_contributors(repo_path))\n print(\"CONTRIBS({})=={}\".format(id, contribs))\n for c in contribs:\n print(\"#{}\\t{}\\n\".format(id, c[\"login\"]))\n results.append(dict(id=id, contributors=contribs))\n else:\n logging.warn(\"No repo_path declared for {}\".format(id))\n print(dumps(results, sort_keys=True, indent=4, separators=(\",\", \": \")))\n\n\ndef get_ontology_contributors(repo_path):\n \"\"\"\n Get individual contributors to a org/repo_path\n repo_path is a string \"org/repo\"\n \"\"\"\n url = \"https://api.github.com/repos/{}/contributors\".format(repo_path)\n # TODO: allow use of oauth token;\n # GH has a quota for non-logged in API calls\n time.sleep(3)\n with closing(requests.get(url, stream=False)) as resp:\n ok = resp.status_code == 200\n if ok:\n results = resp.json()\n logging.info(\"RESP={}\".format(results))\n return results\n else:\n logging.error(\"Failed: {}\".format(url))\n return []\n\n\ndef get_repo_path(ont_obj):\n \"\"\"\n Extract the repository path for the given object\n \"\"\"\n repo_path = None\n if \"repository\" in ont_obj:\n repo_path = ont_obj[\"repository\"]\n elif \"tracker\" in ont_obj:\n tracker = ont_obj[\"tracker\"]\n if tracker is not None and \"github\" in tracker:\n repo_path = tracker.replace(\"/issues\", \"\")\n\n if repo_path is not None:\n repo_path = repo_path.replace(\"https://github.com/\", \"\")\n if repo_path.endswith(\"/\"):\n repo_path = repo_path[:-1]\n return repo_path\n else:\n logging.warn(\"Could not get gh repo_path for \".format(ont_obj))\n return None\n\n\ndef run_sparql(obj, p, expected_value, q):\n \"\"\"\n Generate a SPARQL statement using query q and parameter p, and expect 'expected_value' as the\n result. Print out a message indicating whether the there is or is not a match for the given object\n \"\"\"\n sparql = SPARQLWrapper(\"http://sparql.hegroup.org/sparql\")\n sparql.setQuery(q)\n sparql.setReturnFormat(JSON)\n results = sparql.query().convert()\n\n id = obj[\"id\"]\n got_value = False\n is_match = False\n vs = []\n\n for result in results[\"results\"][\"bindings\"]:\n got_value = True\n v = result[p][\"value\"]\n vs.append(str(v))\n if v == expected_value:\n is_match = True\n\n if got_value and is_match:\n msg = \"CONSISTENT\"\n elif got_value and not is_match:\n if expected_value == \"\":\n msg = \"UNDECLARED_LOCAL: REMOTE:\" + \",\".join(vs)\n else:\n msg = \"INCONSISTENT: REMOTE:\" + \",\".join(vs) + \" != LOCAL:\" + expected_value\n else:\n msg = \"UNDECLARED_REMOTE\"\n print(id + \" \" + p + \" \" + msg)\n\n\ndef sparql_compare_ont(obj):\n \"\"\"\n Some ontologies will directly declare some subset of the OBO metadata\n directly in the ontology header. In the majority of cases we should\n yield to the provider. However, we reserve the right to override. For\n example, OBO may have particular guidelines about the length of the title,\n required for coherency within the registry. All differences should be\n discussed with the provider and an accomodation reached\n \"\"\"\n if \"ontology_purl\" not in obj:\n return\n\n purl = obj[\"ontology_purl\"]\n # this could be made more declarative, or driven by the context.jsonld mapping;\n # however, for now this is relatively simple and easy to understand:\n run_sparql(\n obj,\n \"license\",\n obj[\"license\"][\"url\"] if \"license\" in obj else \"\",\n \"SELECT DISTINCT ?license WHERE {<\"\n + purl\n + \"> <http://purl.org/dc/elements/1.1/license> ?license}\",\n )\n run_sparql(\n obj,\n \"title\",\n obj[\"title\"] if \"title\" in obj else \"\",\n \"SELECT DISTINCT ?title WHERE {<\"\n + purl\n + \"> <http://purl.org/dc/elements/1.1/title> ?title}\",\n )\n run_sparql(\n obj,\n \"description\",\n obj[\"description\"] if \"description\" in obj else \"\",\n \"SELECT DISTINCT ?description WHERE {<\"\n + purl\n + \"> <http://purl.org/dc/elements/1.1/description> ?description}\",\n )\n run_sparql(\n obj,\n \"homepage\",\n obj[\"homepage\"] if \"homepage\" in obj else \"\",\n \"SELECT DISTINCT ?homepage WHERE {<\"\n + purl\n + \"> <http://xmlns.com/foaf/0.1/homepage> ?homepage}\",\n )\n\n\ndef sparql_compare_all(ontologies, args):\n \"\"\"\n Run sparql_compare_ont() on all the given ontologies.\n \"\"\"\n # The `args` parameter is not used here but it is convenient to have it in our definition, since\n # whether this function or one of the other main `subcommands` of this script is called is\n # determine dynamically, and we want all of the subcommands to have a consistent signature.\n for obj in ontologies:\n sparql_compare_ont(obj)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "util/processor.py"}]} | 3,514 | 147 |
gh_patches_debug_39322 | rasdani/github-patches | git_diff | carpentries__amy-583 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add event organizer info to the API
Compute Canada would like to be able to use the API to pull all the events it is hosting and then use this information to populate website.
Might be nice to have the EventBrite IDs there too.
</issue>
<code>
[start of api/serializers.py]
1 from rest_framework import serializers
2
3 from workshops.models import Badge, Airport, Person, Event
4
5
6 class PersonUsernameSerializer(serializers.ModelSerializer):
7 name = serializers.CharField(source='get_full_name')
8 user = serializers.CharField(source='username')
9
10 class Meta:
11 model = Person
12 fields = ('name', 'user', )
13
14
15 class ExportBadgesSerializer(serializers.ModelSerializer):
16 persons = PersonUsernameSerializer(many=True, source='person_set')
17
18 class Meta:
19 model = Badge
20 fields = ('name', 'persons')
21
22
23 class ExportInstructorLocationsSerializer(serializers.ModelSerializer):
24 name = serializers.CharField(source='fullname')
25 instructors = PersonUsernameSerializer(many=True, source='person_set')
26
27 class Meta:
28 model = Airport
29 fields = ('name', 'latitude', 'longitude', 'instructors', 'country')
30
31
32 class EventSerializer(serializers.ModelSerializer):
33 humandate = serializers.SerializerMethodField()
34 country = serializers.CharField()
35 start = serializers.DateField(format=None)
36 end = serializers.DateField(format=None)
37 url = serializers.URLField(source='website_url')
38
39 def get_humandate(self, obj):
40 """Render start and end dates as human-readable short date."""
41 return EventSerializer.human_readable_date(obj.start, obj.end)
42
43 @staticmethod
44 def human_readable_date(date1, date2):
45 """Render start and end dates as human-readable short date."""
46 if date1 and not date2:
47 return '{:%b %d, %Y}-???'.format(date1)
48 elif date2 and not date1:
49 return '???-{:%b %d, %Y}'.format(date2)
50 elif not date2 and not date1:
51 return '???-???'
52
53 if date1.year == date2.year:
54 if date1.month == date2.month:
55 return '{:%b %d}-{:%d, %Y}'.format(date1, date2)
56 else:
57 return '{:%b %d}-{:%b %d, %Y}'.format(date1, date2)
58 else:
59 return '{:%b %d, %Y}-{:%b %d, %Y}'.format(date1, date2)
60
61 class Meta:
62 model = Event
63 fields = (
64 'slug', 'start', 'end', 'url', 'humandate', 'contact', 'country',
65 'venue', 'address', 'latitude', 'longitude',
66 )
67
[end of api/serializers.py]
[start of api/views.py]
1 from django.db.models import Q
2 from rest_framework.generics import ListAPIView
3 from rest_framework.permissions import IsAuthenticatedOrReadOnly
4 from rest_framework.response import Response
5 from rest_framework.reverse import reverse
6 from rest_framework.views import APIView
7
8 from workshops.models import Badge, Airport, Event
9
10 from .serializers import (
11 ExportBadgesSerializer,
12 ExportInstructorLocationsSerializer,
13 EventSerializer,
14 )
15
16
17 class ApiRoot(APIView):
18 def get(self, request, format=None):
19 return Response({
20 'export-badges': reverse('api:export-badges', request=request,
21 format=format),
22 'export-instructors': reverse('api:export-instructors',
23 request=request, format=format),
24 'events-published': reverse('api:events-published',
25 request=request, format=format),
26 })
27
28
29 class ExportBadgesView(ListAPIView):
30 """List all badges and people who have them."""
31 permission_classes = (IsAuthenticatedOrReadOnly, )
32 paginator = None # disable pagination
33
34 queryset = Badge.objects.prefetch_related('person_set')
35 serializer_class = ExportBadgesSerializer
36
37
38 class ExportInstructorLocationsView(ListAPIView):
39 """List all airports and instructors located near them."""
40 permission_classes = (IsAuthenticatedOrReadOnly, )
41 paginator = None # disable pagination
42
43 queryset = Airport.objects.exclude(person=None) \
44 .prefetch_related('person_set')
45 serializer_class = ExportInstructorLocationsSerializer
46
47
48 class PublishedEvents(ListAPIView):
49 # only events that have both a starting date and a URL
50 permission_classes = (IsAuthenticatedOrReadOnly, )
51 paginator = None # disable pagination
52
53 serializer_class = EventSerializer
54 queryset = Event.objects.published_events()
55
[end of api/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/api/serializers.py b/api/serializers.py
--- a/api/serializers.py
+++ b/api/serializers.py
@@ -35,6 +35,7 @@
start = serializers.DateField(format=None)
end = serializers.DateField(format=None)
url = serializers.URLField(source='website_url')
+ eventbrite_id = serializers.CharField(source='reg_key')
def get_humandate(self, obj):
"""Render start and end dates as human-readable short date."""
@@ -62,5 +63,5 @@
model = Event
fields = (
'slug', 'start', 'end', 'url', 'humandate', 'contact', 'country',
- 'venue', 'address', 'latitude', 'longitude',
+ 'venue', 'address', 'latitude', 'longitude', 'eventbrite_id',
)
diff --git a/api/views.py b/api/views.py
--- a/api/views.py
+++ b/api/views.py
@@ -1,5 +1,6 @@
from django.db.models import Q
from rest_framework.generics import ListAPIView
+from rest_framework.metadata import SimpleMetadata
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from rest_framework.reverse import reverse
@@ -14,6 +15,21 @@
)
+class QueryMetadata(SimpleMetadata):
+ """Additionally include info about query parameters."""
+
+ def determine_metadata(self, request, view):
+ print('doing something')
+ data = super().determine_metadata(request, view)
+
+ try:
+ data['query_params'] = view.get_query_params_description()
+ except AttributeError:
+ pass
+
+ return data
+
+
class ApiRoot(APIView):
def get(self, request, format=None):
return Response({
@@ -46,9 +62,34 @@
class PublishedEvents(ListAPIView):
+ """List published events."""
+
# only events that have both a starting date and a URL
permission_classes = (IsAuthenticatedOrReadOnly, )
paginator = None # disable pagination
serializer_class = EventSerializer
- queryset = Event.objects.published_events()
+
+ metadata_class = QueryMetadata
+
+ def get_queryset(self):
+ """Optionally restrict the returned event set to events hosted by
+ specific host or administered by specific admin."""
+ queryset = Event.objects.published_events()
+
+ administrator = self.request.query_params.get('administrator', None)
+ if administrator is not None:
+ queryset = queryset.filter(administrator__pk=administrator)
+
+ host = self.request.query_params.get('host', None)
+ if host is not None:
+ queryset = queryset.filter(host__pk=host)
+
+ return queryset
+
+ def get_query_params_description(self):
+ return {
+ 'administrator': 'ID of the organization responsible for admin '
+ 'work on events.',
+ 'host': 'ID of the organization hosting the event.',
+ }
| {"golden_diff": "diff --git a/api/serializers.py b/api/serializers.py\n--- a/api/serializers.py\n+++ b/api/serializers.py\n@@ -35,6 +35,7 @@\n start = serializers.DateField(format=None)\n end = serializers.DateField(format=None)\n url = serializers.URLField(source='website_url')\n+ eventbrite_id = serializers.CharField(source='reg_key')\n \n def get_humandate(self, obj):\n \"\"\"Render start and end dates as human-readable short date.\"\"\"\n@@ -62,5 +63,5 @@\n model = Event\n fields = (\n 'slug', 'start', 'end', 'url', 'humandate', 'contact', 'country',\n- 'venue', 'address', 'latitude', 'longitude',\n+ 'venue', 'address', 'latitude', 'longitude', 'eventbrite_id',\n )\ndiff --git a/api/views.py b/api/views.py\n--- a/api/views.py\n+++ b/api/views.py\n@@ -1,5 +1,6 @@\n from django.db.models import Q\n from rest_framework.generics import ListAPIView\n+from rest_framework.metadata import SimpleMetadata\n from rest_framework.permissions import IsAuthenticatedOrReadOnly\n from rest_framework.response import Response\n from rest_framework.reverse import reverse\n@@ -14,6 +15,21 @@\n )\n \n \n+class QueryMetadata(SimpleMetadata):\n+ \"\"\"Additionally include info about query parameters.\"\"\"\n+\n+ def determine_metadata(self, request, view):\n+ print('doing something')\n+ data = super().determine_metadata(request, view)\n+\n+ try:\n+ data['query_params'] = view.get_query_params_description()\n+ except AttributeError:\n+ pass\n+\n+ return data\n+\n+\n class ApiRoot(APIView):\n def get(self, request, format=None):\n return Response({\n@@ -46,9 +62,34 @@\n \n \n class PublishedEvents(ListAPIView):\n+ \"\"\"List published events.\"\"\"\n+\n # only events that have both a starting date and a URL\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n \n serializer_class = EventSerializer\n- queryset = Event.objects.published_events()\n+\n+ metadata_class = QueryMetadata\n+\n+ def get_queryset(self):\n+ \"\"\"Optionally restrict the returned event set to events hosted by\n+ specific host or administered by specific admin.\"\"\"\n+ queryset = Event.objects.published_events()\n+\n+ administrator = self.request.query_params.get('administrator', None)\n+ if administrator is not None:\n+ queryset = queryset.filter(administrator__pk=administrator)\n+\n+ host = self.request.query_params.get('host', None)\n+ if host is not None:\n+ queryset = queryset.filter(host__pk=host)\n+\n+ return queryset\n+\n+ def get_query_params_description(self):\n+ return {\n+ 'administrator': 'ID of the organization responsible for admin '\n+ 'work on events.',\n+ 'host': 'ID of the organization hosting the event.',\n+ }\n", "issue": "Add event organizer info to the API\nCompute Canada would like to be able to use the API to pull all the events it is hosting and then use this information to populate website.\n\nMight be nice to have the EventBrite IDs there too.\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom workshops.models import Badge, Airport, Person, Event\n\n\nclass PersonUsernameSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source='get_full_name')\n user = serializers.CharField(source='username')\n\n class Meta:\n model = Person\n fields = ('name', 'user', )\n\n\nclass ExportBadgesSerializer(serializers.ModelSerializer):\n persons = PersonUsernameSerializer(many=True, source='person_set')\n\n class Meta:\n model = Badge\n fields = ('name', 'persons')\n\n\nclass ExportInstructorLocationsSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source='fullname')\n instructors = PersonUsernameSerializer(many=True, source='person_set')\n\n class Meta:\n model = Airport\n fields = ('name', 'latitude', 'longitude', 'instructors', 'country')\n\n\nclass EventSerializer(serializers.ModelSerializer):\n humandate = serializers.SerializerMethodField()\n country = serializers.CharField()\n start = serializers.DateField(format=None)\n end = serializers.DateField(format=None)\n url = serializers.URLField(source='website_url')\n\n def get_humandate(self, obj):\n \"\"\"Render start and end dates as human-readable short date.\"\"\"\n return EventSerializer.human_readable_date(obj.start, obj.end)\n\n @staticmethod\n def human_readable_date(date1, date2):\n \"\"\"Render start and end dates as human-readable short date.\"\"\"\n if date1 and not date2:\n return '{:%b %d, %Y}-???'.format(date1)\n elif date2 and not date1:\n return '???-{:%b %d, %Y}'.format(date2)\n elif not date2 and not date1:\n return '???-???'\n\n if date1.year == date2.year:\n if date1.month == date2.month:\n return '{:%b %d}-{:%d, %Y}'.format(date1, date2)\n else:\n return '{:%b %d}-{:%b %d, %Y}'.format(date1, date2)\n else:\n return '{:%b %d, %Y}-{:%b %d, %Y}'.format(date1, date2)\n\n class Meta:\n model = Event\n fields = (\n 'slug', 'start', 'end', 'url', 'humandate', 'contact', 'country',\n 'venue', 'address', 'latitude', 'longitude',\n )\n", "path": "api/serializers.py"}, {"content": "from django.db.models import Q\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework.views import APIView\n\nfrom workshops.models import Badge, Airport, Event\n\nfrom .serializers import (\n ExportBadgesSerializer,\n ExportInstructorLocationsSerializer,\n EventSerializer,\n)\n\n\nclass ApiRoot(APIView):\n def get(self, request, format=None):\n return Response({\n 'export-badges': reverse('api:export-badges', request=request,\n format=format),\n 'export-instructors': reverse('api:export-instructors',\n request=request, format=format),\n 'events-published': reverse('api:events-published',\n request=request, format=format),\n })\n\n\nclass ExportBadgesView(ListAPIView):\n \"\"\"List all badges and people who have them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Badge.objects.prefetch_related('person_set')\n serializer_class = ExportBadgesSerializer\n\n\nclass ExportInstructorLocationsView(ListAPIView):\n \"\"\"List all airports and instructors located near them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Airport.objects.exclude(person=None) \\\n .prefetch_related('person_set')\n serializer_class = ExportInstructorLocationsSerializer\n\n\nclass PublishedEvents(ListAPIView):\n # only events that have both a starting date and a URL\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n serializer_class = EventSerializer\n queryset = Event.objects.published_events()\n", "path": "api/views.py"}]} | 1,704 | 653 |
gh_patches_debug_37874 | rasdani/github-patches | git_diff | microsoft__torchgeo-352 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Re-think how configs are handled in train.py
Currently configuration to `train.py` is handled with [OmegaConf](https://omegaconf.readthedocs.io/en/2.1_branch/usage.html). This made more sense when the tasks (and accompanying trainer code) were fragmented, as we could easily define per-task configuration. Now that the trainer code that we would like to include in base TorchGeo are being generalized into things like `ClassificationTask` and `SemanticSegmentationTask` _and_ it is clear that more complicated training configurations won't be supported by torchgeo proper, it might make sense to pull out the OmegaConf part, and go with a more simple `argparse` based approach. Bonus: this would also allow us to get rid of a dependency. I'm not sure how exactly the argparse approach would work in all cases but it is worth more thought!
Lightning has a few pieces of docs that can help with this:
- https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-in-python-scripts
- https://pytorch-lightning.readthedocs.io/en/stable/common/hyperparameters.html
- https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_cli.html
Whatever we settle on here should definitely still allow passing arguments via a YAML config file. This allows reproducible benchmark experiment configurations to be saved in source control.
</issue>
<code>
[start of train.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Microsoft Corporation. All rights reserved.
4 # Licensed under the MIT License.
5
6 """torchgeo model training script."""
7
8 import os
9 from typing import Any, Dict, Tuple, Type, cast
10
11 import pytorch_lightning as pl
12 from omegaconf import DictConfig, OmegaConf
13 from pytorch_lightning import loggers as pl_loggers
14 from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
15
16 from torchgeo.datamodules import (
17 BigEarthNetDataModule,
18 ChesapeakeCVPRDataModule,
19 COWCCountingDataModule,
20 CycloneDataModule,
21 ETCI2021DataModule,
22 EuroSATDataModule,
23 LandCoverAIDataModule,
24 NAIPChesapeakeDataModule,
25 OSCDDataModule,
26 RESISC45DataModule,
27 SEN12MSDataModule,
28 So2SatDataModule,
29 UCMercedDataModule,
30 )
31 from torchgeo.trainers import (
32 BYOLTask,
33 ClassificationTask,
34 MultiLabelClassificationTask,
35 RegressionTask,
36 SemanticSegmentationTask,
37 )
38
39 TASK_TO_MODULES_MAPPING: Dict[
40 str, Tuple[Type[pl.LightningModule], Type[pl.LightningDataModule]]
41 ] = {
42 "bigearthnet_all": (MultiLabelClassificationTask, BigEarthNetDataModule),
43 "bigearthnet_s1": (MultiLabelClassificationTask, BigEarthNetDataModule),
44 "bigearthnet_s2": (MultiLabelClassificationTask, BigEarthNetDataModule),
45 "byol": (BYOLTask, ChesapeakeCVPRDataModule),
46 "chesapeake_cvpr_5": (SemanticSegmentationTask, ChesapeakeCVPRDataModule),
47 "chesapeake_cvpr_7": (SemanticSegmentationTask, ChesapeakeCVPRDataModule),
48 "chesapeake_cvpr_prior": (SemanticSegmentationTask, ChesapeakeCVPRDataModule),
49 "cowc_counting": (RegressionTask, COWCCountingDataModule),
50 "cyclone": (RegressionTask, CycloneDataModule),
51 "eurosat": (ClassificationTask, EuroSATDataModule),
52 "etci2021": (SemanticSegmentationTask, ETCI2021DataModule),
53 "landcoverai": (SemanticSegmentationTask, LandCoverAIDataModule),
54 "naipchesapeake": (SemanticSegmentationTask, NAIPChesapeakeDataModule),
55 "oscd_all": (SemanticSegmentationTask, OSCDDataModule),
56 "oscd_rgb": (SemanticSegmentationTask, OSCDDataModule),
57 "resisc45": (ClassificationTask, RESISC45DataModule),
58 "sen12ms_all": (SemanticSegmentationTask, SEN12MSDataModule),
59 "sen12ms_s1": (SemanticSegmentationTask, SEN12MSDataModule),
60 "sen12ms_s2_all": (SemanticSegmentationTask, SEN12MSDataModule),
61 "sen12ms_s2_reduced": (SemanticSegmentationTask, SEN12MSDataModule),
62 "so2sat_supervised": (ClassificationTask, So2SatDataModule),
63 "so2sat_unsupervised": (ClassificationTask, So2SatDataModule),
64 "ucmerced": (ClassificationTask, UCMercedDataModule),
65 }
66
67
68 def set_up_omegaconf() -> DictConfig:
69 """Loads program arguments from either YAML config files or command line arguments.
70
71 This method loads defaults/a schema from "conf/defaults.yaml" as well as potential
72 arguments from the command line. If one of the command line arguments is
73 "config_file", then we additionally read arguments from that YAML file. One of the
74 config file based arguments or command line arguments must specify task.name. The
75 task.name value is used to grab a task specific defaults from its respective
76 trainer. The final configuration is given as merge(task_defaults, defaults,
77 config file, command line). The merge() works from the first argument to the last,
78 replacing existing values with newer values. Additionally, if any values are
79 merged into task_defaults without matching types, then there will be a runtime
80 error.
81
82 Returns:
83 an OmegaConf DictConfig containing all the validated program arguments
84
85 Raises:
86 FileNotFoundError: when ``config_file`` does not exist
87 ValueError: when ``task.name`` is not a valid task
88 """
89 conf = OmegaConf.load("conf/defaults.yaml")
90 command_line_conf = OmegaConf.from_cli()
91
92 if "config_file" in command_line_conf:
93 config_fn = command_line_conf.config_file
94 if not os.path.isfile(config_fn):
95 raise FileNotFoundError(f"config_file={config_fn} is not a valid file")
96
97 user_conf = OmegaConf.load(config_fn)
98 conf = OmegaConf.merge(conf, user_conf)
99
100 conf = OmegaConf.merge( # Merge in any arguments passed via the command line
101 conf, command_line_conf
102 )
103
104 # These OmegaConf structured configs enforce a schema at runtime, see:
105 # https://omegaconf.readthedocs.io/en/2.0_branch/structured_config.html#merging-with-other-configs
106 task_name = conf.experiment.task
107 task_config_fn = os.path.join("conf", "task_defaults", f"{task_name}.yaml")
108 if task_name == "test":
109 task_conf = OmegaConf.create()
110 elif os.path.exists(task_config_fn):
111 task_conf = cast(DictConfig, OmegaConf.load(task_config_fn))
112 else:
113 raise ValueError(
114 f"experiment.task={task_name} is not recognized as a valid task"
115 )
116
117 conf = OmegaConf.merge(task_conf, conf)
118 conf = cast(DictConfig, conf) # convince mypy that everything is alright
119
120 return conf
121
122
123 def main(conf: DictConfig) -> None:
124 """Main training loop."""
125 ######################################
126 # Setup output directory
127 ######################################
128
129 experiment_name = conf.experiment.name
130 task_name = conf.experiment.task
131 if os.path.isfile(conf.program.output_dir):
132 raise NotADirectoryError("`program.output_dir` must be a directory")
133 os.makedirs(conf.program.output_dir, exist_ok=True)
134
135 experiment_dir = os.path.join(conf.program.output_dir, experiment_name)
136 os.makedirs(experiment_dir, exist_ok=True)
137
138 if len(os.listdir(experiment_dir)) > 0:
139 if conf.program.overwrite:
140 print(
141 f"WARNING! The experiment directory, {experiment_dir}, already exists, "
142 + "we might overwrite data in it!"
143 )
144 else:
145 raise FileExistsError(
146 f"The experiment directory, {experiment_dir}, already exists and isn't "
147 + "empty. We don't want to overwrite any existing results, exiting..."
148 )
149
150 with open(os.path.join(experiment_dir, "experiment_config.yaml"), "w") as f:
151 OmegaConf.save(config=conf, f=f)
152
153 ######################################
154 # Choose task to run based on arguments or configuration
155 ######################################
156 # Convert the DictConfig into a dictionary so that we can pass as kwargs.
157 task_args = cast(Dict[str, Any], OmegaConf.to_object(conf.experiment.module))
158 datamodule_args = cast(
159 Dict[str, Any], OmegaConf.to_object(conf.experiment.datamodule)
160 )
161
162 datamodule: pl.LightningDataModule
163 task: pl.LightningModule
164 if task_name in TASK_TO_MODULES_MAPPING:
165 task_class, datamodule_class = TASK_TO_MODULES_MAPPING[task_name]
166 task = task_class(**task_args)
167 datamodule = datamodule_class(**datamodule_args)
168 else:
169 raise ValueError(
170 f"experiment.task={task_name} is not recognized as a valid task"
171 )
172
173 ######################################
174 # Setup trainer
175 ######################################
176 tb_logger = pl_loggers.TensorBoardLogger(conf.program.log_dir, name=experiment_name)
177
178 checkpoint_callback = ModelCheckpoint(
179 monitor="val_loss", dirpath=experiment_dir, save_top_k=1, save_last=True
180 )
181 early_stopping_callback = EarlyStopping(
182 monitor="val_loss", min_delta=0.00, patience=18
183 )
184
185 trainer_args = cast(Dict[str, Any], OmegaConf.to_object(conf.trainer))
186
187 trainer_args["callbacks"] = [checkpoint_callback, early_stopping_callback]
188 trainer_args["logger"] = tb_logger
189 trainer_args["default_root_dir"] = experiment_dir
190 trainer = pl.Trainer(**trainer_args)
191
192 if trainer_args.get("auto_lr_find"):
193 trainer.tune(model=task, datamodule=datamodule)
194
195 ######################################
196 # Run experiment
197 ######################################
198 trainer.fit(model=task, datamodule=datamodule)
199 trainer.test(model=task, datamodule=datamodule)
200
201
202 if __name__ == "__main__":
203 # Taken from https://github.com/pangeo-data/cog-best-practices
204 _rasterio_best_practices = {
205 "GDAL_DISABLE_READDIR_ON_OPEN": "EMPTY_DIR",
206 "AWS_NO_SIGN_REQUEST": "YES",
207 "GDAL_MAX_RAW_BLOCK_CACHE_SIZE": "200000000",
208 "GDAL_SWATH_SIZE": "200000000",
209 "VSI_CURL_CACHE_SIZE": "200000000",
210 }
211 os.environ.update(_rasterio_best_practices)
212
213 conf = set_up_omegaconf()
214
215 # Set random seed for reproducibility
216 # https://pytorch-lightning.readthedocs.io/en/latest/api/pytorch_lightning.utilities.seed.html#pytorch_lightning.utilities.seed.seed_everything
217 pl.seed_everything(conf.program.seed)
218
219 # Main training procedure
220 main(conf)
221
[end of train.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/train.py b/train.py
--- a/train.py
+++ b/train.py
@@ -39,28 +39,19 @@
TASK_TO_MODULES_MAPPING: Dict[
str, Tuple[Type[pl.LightningModule], Type[pl.LightningDataModule]]
] = {
- "bigearthnet_all": (MultiLabelClassificationTask, BigEarthNetDataModule),
- "bigearthnet_s1": (MultiLabelClassificationTask, BigEarthNetDataModule),
- "bigearthnet_s2": (MultiLabelClassificationTask, BigEarthNetDataModule),
+ "bigearthnet": (MultiLabelClassificationTask, BigEarthNetDataModule),
"byol": (BYOLTask, ChesapeakeCVPRDataModule),
- "chesapeake_cvpr_5": (SemanticSegmentationTask, ChesapeakeCVPRDataModule),
- "chesapeake_cvpr_7": (SemanticSegmentationTask, ChesapeakeCVPRDataModule),
- "chesapeake_cvpr_prior": (SemanticSegmentationTask, ChesapeakeCVPRDataModule),
+ "chesapeake_cvpr": (SemanticSegmentationTask, ChesapeakeCVPRDataModule),
"cowc_counting": (RegressionTask, COWCCountingDataModule),
"cyclone": (RegressionTask, CycloneDataModule),
"eurosat": (ClassificationTask, EuroSATDataModule),
"etci2021": (SemanticSegmentationTask, ETCI2021DataModule),
"landcoverai": (SemanticSegmentationTask, LandCoverAIDataModule),
"naipchesapeake": (SemanticSegmentationTask, NAIPChesapeakeDataModule),
- "oscd_all": (SemanticSegmentationTask, OSCDDataModule),
- "oscd_rgb": (SemanticSegmentationTask, OSCDDataModule),
+ "oscd": (SemanticSegmentationTask, OSCDDataModule),
"resisc45": (ClassificationTask, RESISC45DataModule),
- "sen12ms_all": (SemanticSegmentationTask, SEN12MSDataModule),
- "sen12ms_s1": (SemanticSegmentationTask, SEN12MSDataModule),
- "sen12ms_s2_all": (SemanticSegmentationTask, SEN12MSDataModule),
- "sen12ms_s2_reduced": (SemanticSegmentationTask, SEN12MSDataModule),
- "so2sat_supervised": (ClassificationTask, So2SatDataModule),
- "so2sat_unsupervised": (ClassificationTask, So2SatDataModule),
+ "sen12ms": (SemanticSegmentationTask, SEN12MSDataModule),
+ "so2sat": (ClassificationTask, So2SatDataModule),
"ucmerced": (ClassificationTask, UCMercedDataModule),
}
@@ -104,7 +95,7 @@
# These OmegaConf structured configs enforce a schema at runtime, see:
# https://omegaconf.readthedocs.io/en/2.0_branch/structured_config.html#merging-with-other-configs
task_name = conf.experiment.task
- task_config_fn = os.path.join("conf", "task_defaults", f"{task_name}.yaml")
+ task_config_fn = os.path.join("conf", f"{task_name}.yaml")
if task_name == "test":
task_conf = OmegaConf.create()
elif os.path.exists(task_config_fn):
| {"golden_diff": "diff --git a/train.py b/train.py\n--- a/train.py\n+++ b/train.py\n@@ -39,28 +39,19 @@\n TASK_TO_MODULES_MAPPING: Dict[\n str, Tuple[Type[pl.LightningModule], Type[pl.LightningDataModule]]\n ] = {\n- \"bigearthnet_all\": (MultiLabelClassificationTask, BigEarthNetDataModule),\n- \"bigearthnet_s1\": (MultiLabelClassificationTask, BigEarthNetDataModule),\n- \"bigearthnet_s2\": (MultiLabelClassificationTask, BigEarthNetDataModule),\n+ \"bigearthnet\": (MultiLabelClassificationTask, BigEarthNetDataModule),\n \"byol\": (BYOLTask, ChesapeakeCVPRDataModule),\n- \"chesapeake_cvpr_5\": (SemanticSegmentationTask, ChesapeakeCVPRDataModule),\n- \"chesapeake_cvpr_7\": (SemanticSegmentationTask, ChesapeakeCVPRDataModule),\n- \"chesapeake_cvpr_prior\": (SemanticSegmentationTask, ChesapeakeCVPRDataModule),\n+ \"chesapeake_cvpr\": (SemanticSegmentationTask, ChesapeakeCVPRDataModule),\n \"cowc_counting\": (RegressionTask, COWCCountingDataModule),\n \"cyclone\": (RegressionTask, CycloneDataModule),\n \"eurosat\": (ClassificationTask, EuroSATDataModule),\n \"etci2021\": (SemanticSegmentationTask, ETCI2021DataModule),\n \"landcoverai\": (SemanticSegmentationTask, LandCoverAIDataModule),\n \"naipchesapeake\": (SemanticSegmentationTask, NAIPChesapeakeDataModule),\n- \"oscd_all\": (SemanticSegmentationTask, OSCDDataModule),\n- \"oscd_rgb\": (SemanticSegmentationTask, OSCDDataModule),\n+ \"oscd\": (SemanticSegmentationTask, OSCDDataModule),\n \"resisc45\": (ClassificationTask, RESISC45DataModule),\n- \"sen12ms_all\": (SemanticSegmentationTask, SEN12MSDataModule),\n- \"sen12ms_s1\": (SemanticSegmentationTask, SEN12MSDataModule),\n- \"sen12ms_s2_all\": (SemanticSegmentationTask, SEN12MSDataModule),\n- \"sen12ms_s2_reduced\": (SemanticSegmentationTask, SEN12MSDataModule),\n- \"so2sat_supervised\": (ClassificationTask, So2SatDataModule),\n- \"so2sat_unsupervised\": (ClassificationTask, So2SatDataModule),\n+ \"sen12ms\": (SemanticSegmentationTask, SEN12MSDataModule),\n+ \"so2sat\": (ClassificationTask, So2SatDataModule),\n \"ucmerced\": (ClassificationTask, UCMercedDataModule),\n }\n \n@@ -104,7 +95,7 @@\n # These OmegaConf structured configs enforce a schema at runtime, see:\n # https://omegaconf.readthedocs.io/en/2.0_branch/structured_config.html#merging-with-other-configs\n task_name = conf.experiment.task\n- task_config_fn = os.path.join(\"conf\", \"task_defaults\", f\"{task_name}.yaml\")\n+ task_config_fn = os.path.join(\"conf\", f\"{task_name}.yaml\")\n if task_name == \"test\":\n task_conf = OmegaConf.create()\n elif os.path.exists(task_config_fn):\n", "issue": "Re-think how configs are handled in train.py\nCurrently configuration to `train.py` is handled with [OmegaConf](https://omegaconf.readthedocs.io/en/2.1_branch/usage.html). This made more sense when the tasks (and accompanying trainer code) were fragmented, as we could easily define per-task configuration. Now that the trainer code that we would like to include in base TorchGeo are being generalized into things like `ClassificationTask` and `SemanticSegmentationTask` _and_ it is clear that more complicated training configurations won't be supported by torchgeo proper, it might make sense to pull out the OmegaConf part, and go with a more simple `argparse` based approach. Bonus: this would also allow us to get rid of a dependency. I'm not sure how exactly the argparse approach would work in all cases but it is worth more thought!\r\n\r\nLightning has a few pieces of docs that can help with this:\r\n- https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-in-python-scripts\r\n- https://pytorch-lightning.readthedocs.io/en/stable/common/hyperparameters.html\r\n- https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_cli.html\r\n\r\nWhatever we settle on here should definitely still allow passing arguments via a YAML config file. This allows reproducible benchmark experiment configurations to be saved in source control.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"torchgeo model training script.\"\"\"\n\nimport os\nfrom typing import Any, Dict, Tuple, Type, cast\n\nimport pytorch_lightning as pl\nfrom omegaconf import DictConfig, OmegaConf\nfrom pytorch_lightning import loggers as pl_loggers\nfrom pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint\n\nfrom torchgeo.datamodules import (\n BigEarthNetDataModule,\n ChesapeakeCVPRDataModule,\n COWCCountingDataModule,\n CycloneDataModule,\n ETCI2021DataModule,\n EuroSATDataModule,\n LandCoverAIDataModule,\n NAIPChesapeakeDataModule,\n OSCDDataModule,\n RESISC45DataModule,\n SEN12MSDataModule,\n So2SatDataModule,\n UCMercedDataModule,\n)\nfrom torchgeo.trainers import (\n BYOLTask,\n ClassificationTask,\n MultiLabelClassificationTask,\n RegressionTask,\n SemanticSegmentationTask,\n)\n\nTASK_TO_MODULES_MAPPING: Dict[\n str, Tuple[Type[pl.LightningModule], Type[pl.LightningDataModule]]\n] = {\n \"bigearthnet_all\": (MultiLabelClassificationTask, BigEarthNetDataModule),\n \"bigearthnet_s1\": (MultiLabelClassificationTask, BigEarthNetDataModule),\n \"bigearthnet_s2\": (MultiLabelClassificationTask, BigEarthNetDataModule),\n \"byol\": (BYOLTask, ChesapeakeCVPRDataModule),\n \"chesapeake_cvpr_5\": (SemanticSegmentationTask, ChesapeakeCVPRDataModule),\n \"chesapeake_cvpr_7\": (SemanticSegmentationTask, ChesapeakeCVPRDataModule),\n \"chesapeake_cvpr_prior\": (SemanticSegmentationTask, ChesapeakeCVPRDataModule),\n \"cowc_counting\": (RegressionTask, COWCCountingDataModule),\n \"cyclone\": (RegressionTask, CycloneDataModule),\n \"eurosat\": (ClassificationTask, EuroSATDataModule),\n \"etci2021\": (SemanticSegmentationTask, ETCI2021DataModule),\n \"landcoverai\": (SemanticSegmentationTask, LandCoverAIDataModule),\n \"naipchesapeake\": (SemanticSegmentationTask, NAIPChesapeakeDataModule),\n \"oscd_all\": (SemanticSegmentationTask, OSCDDataModule),\n \"oscd_rgb\": (SemanticSegmentationTask, OSCDDataModule),\n \"resisc45\": (ClassificationTask, RESISC45DataModule),\n \"sen12ms_all\": (SemanticSegmentationTask, SEN12MSDataModule),\n \"sen12ms_s1\": (SemanticSegmentationTask, SEN12MSDataModule),\n \"sen12ms_s2_all\": (SemanticSegmentationTask, SEN12MSDataModule),\n \"sen12ms_s2_reduced\": (SemanticSegmentationTask, SEN12MSDataModule),\n \"so2sat_supervised\": (ClassificationTask, So2SatDataModule),\n \"so2sat_unsupervised\": (ClassificationTask, So2SatDataModule),\n \"ucmerced\": (ClassificationTask, UCMercedDataModule),\n}\n\n\ndef set_up_omegaconf() -> DictConfig:\n \"\"\"Loads program arguments from either YAML config files or command line arguments.\n\n This method loads defaults/a schema from \"conf/defaults.yaml\" as well as potential\n arguments from the command line. If one of the command line arguments is\n \"config_file\", then we additionally read arguments from that YAML file. One of the\n config file based arguments or command line arguments must specify task.name. The\n task.name value is used to grab a task specific defaults from its respective\n trainer. The final configuration is given as merge(task_defaults, defaults,\n config file, command line). The merge() works from the first argument to the last,\n replacing existing values with newer values. Additionally, if any values are\n merged into task_defaults without matching types, then there will be a runtime\n error.\n\n Returns:\n an OmegaConf DictConfig containing all the validated program arguments\n\n Raises:\n FileNotFoundError: when ``config_file`` does not exist\n ValueError: when ``task.name`` is not a valid task\n \"\"\"\n conf = OmegaConf.load(\"conf/defaults.yaml\")\n command_line_conf = OmegaConf.from_cli()\n\n if \"config_file\" in command_line_conf:\n config_fn = command_line_conf.config_file\n if not os.path.isfile(config_fn):\n raise FileNotFoundError(f\"config_file={config_fn} is not a valid file\")\n\n user_conf = OmegaConf.load(config_fn)\n conf = OmegaConf.merge(conf, user_conf)\n\n conf = OmegaConf.merge( # Merge in any arguments passed via the command line\n conf, command_line_conf\n )\n\n # These OmegaConf structured configs enforce a schema at runtime, see:\n # https://omegaconf.readthedocs.io/en/2.0_branch/structured_config.html#merging-with-other-configs\n task_name = conf.experiment.task\n task_config_fn = os.path.join(\"conf\", \"task_defaults\", f\"{task_name}.yaml\")\n if task_name == \"test\":\n task_conf = OmegaConf.create()\n elif os.path.exists(task_config_fn):\n task_conf = cast(DictConfig, OmegaConf.load(task_config_fn))\n else:\n raise ValueError(\n f\"experiment.task={task_name} is not recognized as a valid task\"\n )\n\n conf = OmegaConf.merge(task_conf, conf)\n conf = cast(DictConfig, conf) # convince mypy that everything is alright\n\n return conf\n\n\ndef main(conf: DictConfig) -> None:\n \"\"\"Main training loop.\"\"\"\n ######################################\n # Setup output directory\n ######################################\n\n experiment_name = conf.experiment.name\n task_name = conf.experiment.task\n if os.path.isfile(conf.program.output_dir):\n raise NotADirectoryError(\"`program.output_dir` must be a directory\")\n os.makedirs(conf.program.output_dir, exist_ok=True)\n\n experiment_dir = os.path.join(conf.program.output_dir, experiment_name)\n os.makedirs(experiment_dir, exist_ok=True)\n\n if len(os.listdir(experiment_dir)) > 0:\n if conf.program.overwrite:\n print(\n f\"WARNING! The experiment directory, {experiment_dir}, already exists, \"\n + \"we might overwrite data in it!\"\n )\n else:\n raise FileExistsError(\n f\"The experiment directory, {experiment_dir}, already exists and isn't \"\n + \"empty. We don't want to overwrite any existing results, exiting...\"\n )\n\n with open(os.path.join(experiment_dir, \"experiment_config.yaml\"), \"w\") as f:\n OmegaConf.save(config=conf, f=f)\n\n ######################################\n # Choose task to run based on arguments or configuration\n ######################################\n # Convert the DictConfig into a dictionary so that we can pass as kwargs.\n task_args = cast(Dict[str, Any], OmegaConf.to_object(conf.experiment.module))\n datamodule_args = cast(\n Dict[str, Any], OmegaConf.to_object(conf.experiment.datamodule)\n )\n\n datamodule: pl.LightningDataModule\n task: pl.LightningModule\n if task_name in TASK_TO_MODULES_MAPPING:\n task_class, datamodule_class = TASK_TO_MODULES_MAPPING[task_name]\n task = task_class(**task_args)\n datamodule = datamodule_class(**datamodule_args)\n else:\n raise ValueError(\n f\"experiment.task={task_name} is not recognized as a valid task\"\n )\n\n ######################################\n # Setup trainer\n ######################################\n tb_logger = pl_loggers.TensorBoardLogger(conf.program.log_dir, name=experiment_name)\n\n checkpoint_callback = ModelCheckpoint(\n monitor=\"val_loss\", dirpath=experiment_dir, save_top_k=1, save_last=True\n )\n early_stopping_callback = EarlyStopping(\n monitor=\"val_loss\", min_delta=0.00, patience=18\n )\n\n trainer_args = cast(Dict[str, Any], OmegaConf.to_object(conf.trainer))\n\n trainer_args[\"callbacks\"] = [checkpoint_callback, early_stopping_callback]\n trainer_args[\"logger\"] = tb_logger\n trainer_args[\"default_root_dir\"] = experiment_dir\n trainer = pl.Trainer(**trainer_args)\n\n if trainer_args.get(\"auto_lr_find\"):\n trainer.tune(model=task, datamodule=datamodule)\n\n ######################################\n # Run experiment\n ######################################\n trainer.fit(model=task, datamodule=datamodule)\n trainer.test(model=task, datamodule=datamodule)\n\n\nif __name__ == \"__main__\":\n # Taken from https://github.com/pangeo-data/cog-best-practices\n _rasterio_best_practices = {\n \"GDAL_DISABLE_READDIR_ON_OPEN\": \"EMPTY_DIR\",\n \"AWS_NO_SIGN_REQUEST\": \"YES\",\n \"GDAL_MAX_RAW_BLOCK_CACHE_SIZE\": \"200000000\",\n \"GDAL_SWATH_SIZE\": \"200000000\",\n \"VSI_CURL_CACHE_SIZE\": \"200000000\",\n }\n os.environ.update(_rasterio_best_practices)\n\n conf = set_up_omegaconf()\n\n # Set random seed for reproducibility\n # https://pytorch-lightning.readthedocs.io/en/latest/api/pytorch_lightning.utilities.seed.html#pytorch_lightning.utilities.seed.seed_everything\n pl.seed_everything(conf.program.seed)\n\n # Main training procedure\n main(conf)\n", "path": "train.py"}]} | 3,494 | 756 |
gh_patches_debug_7093 | rasdani/github-patches | git_diff | ckan__ckan-260 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Recline does not preview datastore anymore
The new plugin does not evaluate `datastore_active`.
<!---
@huboard:{"order":247.0}
-->
Recline does not preview datastore anymore
The new plugin does not evaluate `datastore_active`.
<!---
@huboard:{"order":247.0}
-->
</issue>
<code>
[start of ckanext/reclinepreview/plugin.py]
1 from logging import getLogger
2
3 import ckan.plugins as p
4 import ckan.plugins.toolkit as toolkit
5
6 log = getLogger(__name__)
7
8
9 class ReclinePreview(p.SingletonPlugin):
10 """This extension previews resources using recline
11
12 This extension implements two interfaces
13
14 - ``IConfigurer`` allows to modify the configuration
15 - ``IResourcePreview`` allows to add previews
16 """
17 p.implements(p.IConfigurer, inherit=True)
18 p.implements(p.IResourcePreview, inherit=True)
19
20 def update_config(self, config):
21 ''' Set up the resource library, public directory and
22 template directory for the preview
23 '''
24 toolkit.add_public_directory(config, 'theme/public')
25 toolkit.add_template_directory(config, 'theme/templates')
26 toolkit.add_resource('theme/public', 'ckanext-reclinepreview')
27
28 def can_preview(self, data_dict):
29 format_lower = data_dict['resource']['format'].lower()
30 return format_lower in ['csv', 'xls', 'tsv']
31
32 def preview_template(self, context, data_dict):
33 return 'recline.html'
34
[end of ckanext/reclinepreview/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext/reclinepreview/plugin.py b/ckanext/reclinepreview/plugin.py
--- a/ckanext/reclinepreview/plugin.py
+++ b/ckanext/reclinepreview/plugin.py
@@ -26,6 +26,9 @@
toolkit.add_resource('theme/public', 'ckanext-reclinepreview')
def can_preview(self, data_dict):
+ # if the resource is in the datastore then we can preview it with recline
+ if data_dict['resource'].get('datastore_active'):
+ return True
format_lower = data_dict['resource']['format'].lower()
return format_lower in ['csv', 'xls', 'tsv']
| {"golden_diff": "diff --git a/ckanext/reclinepreview/plugin.py b/ckanext/reclinepreview/plugin.py\n--- a/ckanext/reclinepreview/plugin.py\n+++ b/ckanext/reclinepreview/plugin.py\n@@ -26,6 +26,9 @@\n toolkit.add_resource('theme/public', 'ckanext-reclinepreview')\n \n def can_preview(self, data_dict):\n+ # if the resource is in the datastore then we can preview it with recline\n+ if data_dict['resource'].get('datastore_active'):\n+ return True\n format_lower = data_dict['resource']['format'].lower()\n return format_lower in ['csv', 'xls', 'tsv']\n", "issue": "Recline does not preview datastore anymore\nThe new plugin does not evaluate `datastore_active`.\n\n<!---\n@huboard:{\"order\":247.0}\n-->\n\nRecline does not preview datastore anymore\nThe new plugin does not evaluate `datastore_active`.\n\n<!---\n@huboard:{\"order\":247.0}\n-->\n\n", "before_files": [{"content": "from logging import getLogger\n\nimport ckan.plugins as p\nimport ckan.plugins.toolkit as toolkit\n\nlog = getLogger(__name__)\n\n\nclass ReclinePreview(p.SingletonPlugin):\n \"\"\"This extension previews resources using recline\n\n This extension implements two interfaces\n\n - ``IConfigurer`` allows to modify the configuration\n - ``IResourcePreview`` allows to add previews\n \"\"\"\n p.implements(p.IConfigurer, inherit=True)\n p.implements(p.IResourcePreview, inherit=True)\n\n def update_config(self, config):\n ''' Set up the resource library, public directory and\n template directory for the preview\n '''\n toolkit.add_public_directory(config, 'theme/public')\n toolkit.add_template_directory(config, 'theme/templates')\n toolkit.add_resource('theme/public', 'ckanext-reclinepreview')\n\n def can_preview(self, data_dict):\n format_lower = data_dict['resource']['format'].lower()\n return format_lower in ['csv', 'xls', 'tsv']\n\n def preview_template(self, context, data_dict):\n return 'recline.html'\n", "path": "ckanext/reclinepreview/plugin.py"}]} | 903 | 152 |
gh_patches_debug_1229 | rasdani/github-patches | git_diff | streamlit__streamlit-6348 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
experimental_get_query_params won't work before rerun
### Checklist
- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.
- [X] I added a very descriptive title to this issue.
- [X] I have provided sufficient information below to help reproduce this issue.
### Summary
User can not get right query_params before rerun.
### Reproducible Code Example
```Python
import streamlit as st
st.experimental_set_query_params(param=3)
st.write(st.experimental_get_query_params())
```
### Steps To Reproduce
Run script, `{"param ": 3}` will not appear at first time until rerun script after querystring in browser already changed.
### Expected Behavior
Show `{"param ": 3}`
### Current Behavior
show empty dict
### Is this a regression?
- [X] Yes, this used to work in a previous version.
### Debug info
- Streamlit version: 1.20.0
- Python version: 3.10.6
- Operating System: Linux
- Browser: Chrome
- Virtual environment: None
### Additional Information
In previous version `set_query_params` will set `ctx.query_string = parse.urlencode(query_params, doseq=True)` immediately.
But in 1.20, this line is removed while `get_query_params` still get if from `ctx.query_string` .
### Are you willing to submit a PR?
- [x] Yes, I am willing to submit a PR!
</issue>
<code>
[start of lib/streamlit/commands/query_params.py]
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import urllib.parse as parse
16 from typing import Any, Dict, List
17
18 from streamlit import util
19 from streamlit.errors import StreamlitAPIException
20 from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
21 from streamlit.runtime.metrics_util import gather_metrics
22 from streamlit.runtime.scriptrunner import get_script_run_ctx
23
24 EMBED_QUERY_PARAM = "embed"
25 EMBED_OPTIONS_QUERY_PARAM = "embed_options"
26 EMBED_QUERY_PARAMS_KEYS = [EMBED_QUERY_PARAM, EMBED_OPTIONS_QUERY_PARAM]
27
28
29 @gather_metrics("experimental_get_query_params")
30 def get_query_params() -> Dict[str, List[str]]:
31 """Return the query parameters that is currently showing in the browser's URL bar.
32
33 Returns
34 -------
35 dict
36 The current query parameters as a dict. "Query parameters" are the part of the URL that comes
37 after the first "?".
38
39 Example
40 -------
41 Let's say the user's web browser is at
42 `http://localhost:8501/?show_map=True&selected=asia&selected=america`.
43 Then, you can get the query parameters using the following:
44
45 >>> import streamlit as st
46 >>>
47 >>> st.experimental_get_query_params()
48 {"show_map": ["True"], "selected": ["asia", "america"]}
49
50 Note that the values in the returned dict are *always* lists. This is
51 because we internally use Python's urllib.parse.parse_qs(), which behaves
52 this way. And this behavior makes sense when you consider that every item
53 in a query string is potentially a 1-element array.
54
55 """
56 ctx = get_script_run_ctx()
57 if ctx is None:
58 return {}
59 # Return new query params dict, but without embed, embed_options query params
60 return util.exclude_key_query_params(
61 parse.parse_qs(ctx.query_string), keys_to_exclude=EMBED_QUERY_PARAMS_KEYS
62 )
63
64
65 @gather_metrics("experimental_set_query_params")
66 def set_query_params(**query_params: Any) -> None:
67 """Set the query parameters that are shown in the browser's URL bar.
68
69 .. warning::
70 Query param `embed` cannot be set using this method.
71
72 Parameters
73 ----------
74 **query_params : dict
75 The query parameters to set, as key-value pairs.
76
77 Example
78 -------
79
80 To point the user's web browser to something like
81 "http://localhost:8501/?show_map=True&selected=asia&selected=america",
82 you would do the following:
83
84 >>> import streamlit as st
85 >>>
86 >>> st.experimental_set_query_params(
87 ... show_map=True,
88 ... selected=["asia", "america"],
89 ... )
90
91 """
92 ctx = get_script_run_ctx()
93 if ctx is None:
94 return
95
96 msg = ForwardMsg()
97 msg.page_info_changed.query_string = _ensure_no_embed_params(
98 query_params, ctx.query_string
99 )
100 ctx.enqueue(msg)
101
102
103 def _ensure_no_embed_params(
104 query_params: Dict[str, List[str]], query_string: str
105 ) -> str:
106 """Ensures there are no embed params set (raises StreamlitAPIException) if there is a try,
107 also makes sure old param values in query_string are preserved. Returns query_string : str."""
108 # Get query params dict without embed, embed_options params
109 query_params_without_embed = util.exclude_key_query_params(
110 query_params, keys_to_exclude=EMBED_QUERY_PARAMS_KEYS
111 )
112 if query_params != query_params_without_embed:
113 raise StreamlitAPIException(
114 "Query param embed and embed_options (case-insensitive) cannot be set using set_query_params method."
115 )
116
117 all_current_params = parse.parse_qs(query_string)
118 current_embed_params = parse.urlencode(
119 {
120 EMBED_QUERY_PARAM: [
121 param
122 for param in util.extract_key_query_params(
123 all_current_params, param_key=EMBED_QUERY_PARAM
124 )
125 ],
126 EMBED_OPTIONS_QUERY_PARAM: [
127 param
128 for param in util.extract_key_query_params(
129 all_current_params, param_key=EMBED_OPTIONS_QUERY_PARAM
130 )
131 ],
132 },
133 doseq=True,
134 )
135 query_string = parse.urlencode(query_params, doseq=True)
136
137 if query_string:
138 separator = "&" if current_embed_params else ""
139 return separator.join([query_string, current_embed_params])
140 return current_embed_params
141
[end of lib/streamlit/commands/query_params.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/streamlit/commands/query_params.py b/lib/streamlit/commands/query_params.py
--- a/lib/streamlit/commands/query_params.py
+++ b/lib/streamlit/commands/query_params.py
@@ -97,6 +97,7 @@
msg.page_info_changed.query_string = _ensure_no_embed_params(
query_params, ctx.query_string
)
+ ctx.query_string = msg.page_info_changed.query_string
ctx.enqueue(msg)
| {"golden_diff": "diff --git a/lib/streamlit/commands/query_params.py b/lib/streamlit/commands/query_params.py\n--- a/lib/streamlit/commands/query_params.py\n+++ b/lib/streamlit/commands/query_params.py\n@@ -97,6 +97,7 @@\n msg.page_info_changed.query_string = _ensure_no_embed_params(\n query_params, ctx.query_string\n )\n+ ctx.query_string = msg.page_info_changed.query_string\n ctx.enqueue(msg)\n", "issue": " experimental_get_query_params won't work before rerun \n### Checklist\n\n- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\n- [X] I added a very descriptive title to this issue.\n- [X] I have provided sufficient information below to help reproduce this issue.\n\n### Summary\n\nUser can not get right query_params before rerun.\n\n### Reproducible Code Example\n\n```Python\nimport streamlit as st\r\n\r\nst.experimental_set_query_params(param=3)\r\nst.write(st.experimental_get_query_params())\n```\n\n\n### Steps To Reproduce\n\nRun script, `{\"param \": 3}` will not appear at first time until rerun script after querystring in browser already changed.\n\n### Expected Behavior\n\nShow `{\"param \": 3}`\n\n### Current Behavior\n\nshow empty dict\n\n### Is this a regression?\n\n- [X] Yes, this used to work in a previous version.\n\n### Debug info\n\n- Streamlit version: 1.20.0\r\n- Python version: 3.10.6\r\n- Operating System: Linux\r\n- Browser: Chrome\r\n- Virtual environment: None\r\n\n\n### Additional Information\n\nIn previous version `set_query_params` will set `ctx.query_string = parse.urlencode(query_params, doseq=True)` immediately.\r\n\r\nBut in 1.20, this line is removed while `get_query_params` still get if from `ctx.query_string` .\n\n### Are you willing to submit a PR?\n\n- [x] Yes, I am willing to submit a PR!\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport urllib.parse as parse\nfrom typing import Any, Dict, List\n\nfrom streamlit import util\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto.ForwardMsg_pb2 import ForwardMsg\nfrom streamlit.runtime.metrics_util import gather_metrics\nfrom streamlit.runtime.scriptrunner import get_script_run_ctx\n\nEMBED_QUERY_PARAM = \"embed\"\nEMBED_OPTIONS_QUERY_PARAM = \"embed_options\"\nEMBED_QUERY_PARAMS_KEYS = [EMBED_QUERY_PARAM, EMBED_OPTIONS_QUERY_PARAM]\n\n\n@gather_metrics(\"experimental_get_query_params\")\ndef get_query_params() -> Dict[str, List[str]]:\n \"\"\"Return the query parameters that is currently showing in the browser's URL bar.\n\n Returns\n -------\n dict\n The current query parameters as a dict. \"Query parameters\" are the part of the URL that comes\n after the first \"?\".\n\n Example\n -------\n Let's say the user's web browser is at\n `http://localhost:8501/?show_map=True&selected=asia&selected=america`.\n Then, you can get the query parameters using the following:\n\n >>> import streamlit as st\n >>>\n >>> st.experimental_get_query_params()\n {\"show_map\": [\"True\"], \"selected\": [\"asia\", \"america\"]}\n\n Note that the values in the returned dict are *always* lists. This is\n because we internally use Python's urllib.parse.parse_qs(), which behaves\n this way. And this behavior makes sense when you consider that every item\n in a query string is potentially a 1-element array.\n\n \"\"\"\n ctx = get_script_run_ctx()\n if ctx is None:\n return {}\n # Return new query params dict, but without embed, embed_options query params\n return util.exclude_key_query_params(\n parse.parse_qs(ctx.query_string), keys_to_exclude=EMBED_QUERY_PARAMS_KEYS\n )\n\n\n@gather_metrics(\"experimental_set_query_params\")\ndef set_query_params(**query_params: Any) -> None:\n \"\"\"Set the query parameters that are shown in the browser's URL bar.\n\n .. warning::\n Query param `embed` cannot be set using this method.\n\n Parameters\n ----------\n **query_params : dict\n The query parameters to set, as key-value pairs.\n\n Example\n -------\n\n To point the user's web browser to something like\n \"http://localhost:8501/?show_map=True&selected=asia&selected=america\",\n you would do the following:\n\n >>> import streamlit as st\n >>>\n >>> st.experimental_set_query_params(\n ... show_map=True,\n ... selected=[\"asia\", \"america\"],\n ... )\n\n \"\"\"\n ctx = get_script_run_ctx()\n if ctx is None:\n return\n\n msg = ForwardMsg()\n msg.page_info_changed.query_string = _ensure_no_embed_params(\n query_params, ctx.query_string\n )\n ctx.enqueue(msg)\n\n\ndef _ensure_no_embed_params(\n query_params: Dict[str, List[str]], query_string: str\n) -> str:\n \"\"\"Ensures there are no embed params set (raises StreamlitAPIException) if there is a try,\n also makes sure old param values in query_string are preserved. Returns query_string : str.\"\"\"\n # Get query params dict without embed, embed_options params\n query_params_without_embed = util.exclude_key_query_params(\n query_params, keys_to_exclude=EMBED_QUERY_PARAMS_KEYS\n )\n if query_params != query_params_without_embed:\n raise StreamlitAPIException(\n \"Query param embed and embed_options (case-insensitive) cannot be set using set_query_params method.\"\n )\n\n all_current_params = parse.parse_qs(query_string)\n current_embed_params = parse.urlencode(\n {\n EMBED_QUERY_PARAM: [\n param\n for param in util.extract_key_query_params(\n all_current_params, param_key=EMBED_QUERY_PARAM\n )\n ],\n EMBED_OPTIONS_QUERY_PARAM: [\n param\n for param in util.extract_key_query_params(\n all_current_params, param_key=EMBED_OPTIONS_QUERY_PARAM\n )\n ],\n },\n doseq=True,\n )\n query_string = parse.urlencode(query_params, doseq=True)\n\n if query_string:\n separator = \"&\" if current_embed_params else \"\"\n return separator.join([query_string, current_embed_params])\n return current_embed_params\n", "path": "lib/streamlit/commands/query_params.py"}]} | 2,276 | 98 |
gh_patches_debug_18458 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-603 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
I miss one with C-Trace.de/WZV
Hello guys,
I just switched from ics to C-Trace.de. Since then, unfortunately, it no longer shows me all the bins. I'm missing the residual waste, everything else is displayed as usual. Can someone help me?
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py]
1 import requests
2 from waste_collection_schedule import Collection # type: ignore[attr-defined]
3 from waste_collection_schedule.service.ICS import ICS
4
5 TITLE = "C-Trace"
6 DESCRIPTION = "Source for C-Trace.de."
7 URL = "https://c-trace.de/"
8 EXTRA_INFO = [
9 {
10 "title": "Bremener Stadreinigung",
11 "url": "https://www.die-bremer-stadtreinigung.de/",
12 },
13 {
14 "title": "AWB Landkreis Augsburg",
15 "url": "https://www.awb-landkreis-augsburg.de/",
16 },
17 {
18 "title": "WZV Kreis Segeberg",
19 "url": "https://www.wzv.de/",
20 },
21 ]
22 TEST_CASES = {
23 "Bremen": {"ort": "Bremen", "strasse": "Abbentorstraße", "hausnummer": 5},
24 "AugsburgLand": {
25 "ort": "Königsbrunn",
26 "strasse": "Marktplatz",
27 "hausnummer": 7,
28 "service": "augsburglandkreis",
29 },
30 }
31
32
33 BASE_URL = "https://web.c-trace.de"
34
35
36 class Source:
37 def __init__(self, ort, strasse, hausnummer, service=None):
38 # Compatibility handling for Bremen which was the first supported
39 # district and didn't require to set a service name.
40 if service is None:
41 if ort == "Bremen":
42 service = "bremenabfallkalender"
43 else:
44 raise Exception("service is missing")
45
46 self._service = service
47 self._ort = ort
48 self._strasse = strasse
49 self._hausnummer = hausnummer
50 self._ics = ICS(regex=r"Abfuhr: (.*)")
51
52 def fetch(self):
53 session = requests.session()
54
55 # get session url
56 r = session.get(
57 f"{BASE_URL}/{self._service}/Abfallkalender",
58 allow_redirects=False,
59 )
60 session_id = r.headers["location"].split("/")[
61 2
62 ] # session_id like "(S(r3bme50igdgsp2lstgxxhvs2))"
63
64 args = {
65 "Ort": self._ort,
66 "Gemeinde": self._ort,
67 "Strasse": self._strasse,
68 "Hausnr": self._hausnummer,
69 "Abfall": "|".join(str(i) for i in range(1, 99)), # return all waste types
70 }
71 r = session.get(
72 f"{BASE_URL}/{self._service}/{session_id}/abfallkalender/cal", params=args
73 )
74 r.raise_for_status()
75
76 # parse ics file
77 r.encoding = "utf-8"
78 dates = self._ics.convert(r.text)
79
80 entries = []
81 for d in dates:
82 entries.append(Collection(d[0], d[1]))
83 return entries
84
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py
@@ -27,6 +27,12 @@
"hausnummer": 7,
"service": "augsburglandkreis",
},
+ "WZV": {
+ "ort": "Bark",
+ "strasse": "Birkenweg",
+ "hausnummer": 1,
+ "service": "segebergwzv-abfallkalender",
+ },
}
@@ -66,7 +72,7 @@
"Gemeinde": self._ort,
"Strasse": self._strasse,
"Hausnr": self._hausnummer,
- "Abfall": "|".join(str(i) for i in range(1, 99)), # return all waste types
+ "Abfall": "|".join(str(i) for i in range(0, 99)), # return all waste types
}
r = session.get(
f"{BASE_URL}/{self._service}/{session_id}/abfallkalender/cal", params=args
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py\n@@ -27,6 +27,12 @@\n \"hausnummer\": 7,\n \"service\": \"augsburglandkreis\",\n },\n+ \"WZV\": {\n+ \"ort\": \"Bark\",\n+ \"strasse\": \"Birkenweg\",\n+ \"hausnummer\": 1,\n+ \"service\": \"segebergwzv-abfallkalender\",\n+ },\n }\n \n \n@@ -66,7 +72,7 @@\n \"Gemeinde\": self._ort,\n \"Strasse\": self._strasse,\n \"Hausnr\": self._hausnummer,\n- \"Abfall\": \"|\".join(str(i) for i in range(1, 99)), # return all waste types\n+ \"Abfall\": \"|\".join(str(i) for i in range(0, 99)), # return all waste types\n }\n r = session.get(\n f\"{BASE_URL}/{self._service}/{session_id}/abfallkalender/cal\", params=args\n", "issue": "I miss one with C-Trace.de/WZV\nHello guys,\r\n\r\nI just switched from ics to C-Trace.de. Since then, unfortunately, it no longer shows me all the bins. I'm missing the residual waste, everything else is displayed as usual. Can someone help me?\r\n\r\n\n", "before_files": [{"content": "import requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"C-Trace\"\nDESCRIPTION = \"Source for C-Trace.de.\"\nURL = \"https://c-trace.de/\"\nEXTRA_INFO = [\n {\n \"title\": \"Bremener Stadreinigung\",\n \"url\": \"https://www.die-bremer-stadtreinigung.de/\",\n },\n {\n \"title\": \"AWB Landkreis Augsburg\",\n \"url\": \"https://www.awb-landkreis-augsburg.de/\",\n },\n {\n \"title\": \"WZV Kreis Segeberg\",\n \"url\": \"https://www.wzv.de/\",\n },\n]\nTEST_CASES = {\n \"Bremen\": {\"ort\": \"Bremen\", \"strasse\": \"Abbentorstra\u00dfe\", \"hausnummer\": 5},\n \"AugsburgLand\": {\n \"ort\": \"K\u00f6nigsbrunn\",\n \"strasse\": \"Marktplatz\",\n \"hausnummer\": 7,\n \"service\": \"augsburglandkreis\",\n },\n}\n\n\nBASE_URL = \"https://web.c-trace.de\"\n\n\nclass Source:\n def __init__(self, ort, strasse, hausnummer, service=None):\n # Compatibility handling for Bremen which was the first supported\n # district and didn't require to set a service name.\n if service is None:\n if ort == \"Bremen\":\n service = \"bremenabfallkalender\"\n else:\n raise Exception(\"service is missing\")\n\n self._service = service\n self._ort = ort\n self._strasse = strasse\n self._hausnummer = hausnummer\n self._ics = ICS(regex=r\"Abfuhr: (.*)\")\n\n def fetch(self):\n session = requests.session()\n\n # get session url\n r = session.get(\n f\"{BASE_URL}/{self._service}/Abfallkalender\",\n allow_redirects=False,\n )\n session_id = r.headers[\"location\"].split(\"/\")[\n 2\n ] # session_id like \"(S(r3bme50igdgsp2lstgxxhvs2))\"\n\n args = {\n \"Ort\": self._ort,\n \"Gemeinde\": self._ort,\n \"Strasse\": self._strasse,\n \"Hausnr\": self._hausnummer,\n \"Abfall\": \"|\".join(str(i) for i in range(1, 99)), # return all waste types\n }\n r = session.get(\n f\"{BASE_URL}/{self._service}/{session_id}/abfallkalender/cal\", params=args\n )\n r.raise_for_status()\n\n # parse ics file\n r.encoding = \"utf-8\"\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py"}]} | 1,432 | 300 |
gh_patches_debug_24900 | rasdani/github-patches | git_diff | liberapay__liberapay.com-502 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
add support for xmpp: uri in markdown syntax
When adding and XMPP uri in the following form:
`[[email protected]](xmpp:[email protected]?join)`
the uri syntax is shown raw instead of linking to the room as expected.
add support for xmpp: uri in markdown syntax
When adding and XMPP uri in the following form:
`[[email protected]](xmpp:[email protected]?join)`
the uri syntax is shown raw instead of linking to the room as expected.
</issue>
<code>
[start of liberapay/utils/markdown.py]
1 from markupsafe import Markup
2 import misaka as m # http://misaka.61924.nl/
3
4 def render(markdown):
5 return Markup(m.html(
6 markdown,
7 extensions=m.EXT_AUTOLINK | m.EXT_STRIKETHROUGH | m.EXT_NO_INTRA_EMPHASIS,
8 render_flags=m.HTML_SKIP_HTML | m.HTML_TOC | m.HTML_SMARTYPANTS | m.HTML_SAFELINK
9 ))
10
[end of liberapay/utils/markdown.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/liberapay/utils/markdown.py b/liberapay/utils/markdown.py
--- a/liberapay/utils/markdown.py
+++ b/liberapay/utils/markdown.py
@@ -1,9 +1,41 @@
-from markupsafe import Markup
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import re
+
+from markupsafe import Markup, escape
import misaka as m # http://misaka.61924.nl/
+
+url_re = re.compile(r'^(https?|xmpp):')
+
+
+class CustomRenderer(m.HtmlRenderer):
+
+ def image(self, link, title='', alt=''):
+ if url_re.match(link):
+ maybe_alt = Markup(' alt="%s"') % alt if alt else ''
+ maybe_title = Markup(' title="%s"') % title if title else ''
+ return Markup('<img src="%s"%s%s />') % (link, maybe_alt, maybe_title)
+ else:
+ return escape("" % (alt, link))
+
+ def link(self, content, link, title=''):
+ if url_re.match(link):
+ maybe_title = Markup(' title="%s"') % title if title else ''
+ return Markup('<a href="%s"%s>%s</a>') % (link, maybe_title, content)
+ else:
+ return escape("[%s](%s)" % (content, link))
+
+ def autolink(self, link, is_email):
+ if url_re.match(link):
+ return Markup('<a href="%s">%s</a>') % (link, link)
+ else:
+ return escape('<%s>' % link)
+
+
+renderer = CustomRenderer(flags=m.HTML_SKIP_HTML)
+md = m.Markdown(renderer, extensions=('autolink', 'strikethrough', 'no-intra-emphasis'))
+
+
def render(markdown):
- return Markup(m.html(
- markdown,
- extensions=m.EXT_AUTOLINK | m.EXT_STRIKETHROUGH | m.EXT_NO_INTRA_EMPHASIS,
- render_flags=m.HTML_SKIP_HTML | m.HTML_TOC | m.HTML_SMARTYPANTS | m.HTML_SAFELINK
- ))
+ return Markup(md(markdown))
| {"golden_diff": "diff --git a/liberapay/utils/markdown.py b/liberapay/utils/markdown.py\n--- a/liberapay/utils/markdown.py\n+++ b/liberapay/utils/markdown.py\n@@ -1,9 +1,41 @@\n-from markupsafe import Markup\n+from __future__ import absolute_import, division, print_function, unicode_literals\n+\n+import re\n+\n+from markupsafe import Markup, escape\n import misaka as m # http://misaka.61924.nl/\n \n+\n+url_re = re.compile(r'^(https?|xmpp):')\n+\n+\n+class CustomRenderer(m.HtmlRenderer):\n+\n+ def image(self, link, title='', alt=''):\n+ if url_re.match(link):\n+ maybe_alt = Markup(' alt=\"%s\"') % alt if alt else ''\n+ maybe_title = Markup(' title=\"%s\"') % title if title else ''\n+ return Markup('<img src=\"%s\"%s%s />') % (link, maybe_alt, maybe_title)\n+ else:\n+ return escape(\"\" % (alt, link))\n+\n+ def link(self, content, link, title=''):\n+ if url_re.match(link):\n+ maybe_title = Markup(' title=\"%s\"') % title if title else ''\n+ return Markup('<a href=\"%s\"%s>%s</a>') % (link, maybe_title, content)\n+ else:\n+ return escape(\"[%s](%s)\" % (content, link))\n+\n+ def autolink(self, link, is_email):\n+ if url_re.match(link):\n+ return Markup('<a href=\"%s\">%s</a>') % (link, link)\n+ else:\n+ return escape('<%s>' % link)\n+\n+\n+renderer = CustomRenderer(flags=m.HTML_SKIP_HTML)\n+md = m.Markdown(renderer, extensions=('autolink', 'strikethrough', 'no-intra-emphasis'))\n+\n+\n def render(markdown):\n- return Markup(m.html(\n- markdown,\n- extensions=m.EXT_AUTOLINK | m.EXT_STRIKETHROUGH | m.EXT_NO_INTRA_EMPHASIS,\n- render_flags=m.HTML_SKIP_HTML | m.HTML_TOC | m.HTML_SMARTYPANTS | m.HTML_SAFELINK\n- ))\n+ return Markup(md(markdown))\n", "issue": "add support for xmpp: uri in markdown syntax\nWhen adding and XMPP uri in the following form:\r\n`[[email protected]](xmpp:[email protected]?join)`\r\nthe uri syntax is shown raw instead of linking to the room as expected.\nadd support for xmpp: uri in markdown syntax\nWhen adding and XMPP uri in the following form:\r\n`[[email protected]](xmpp:[email protected]?join)`\r\nthe uri syntax is shown raw instead of linking to the room as expected.\n", "before_files": [{"content": "from markupsafe import Markup\nimport misaka as m # http://misaka.61924.nl/\n\ndef render(markdown):\n return Markup(m.html(\n markdown,\n extensions=m.EXT_AUTOLINK | m.EXT_STRIKETHROUGH | m.EXT_NO_INTRA_EMPHASIS,\n render_flags=m.HTML_SKIP_HTML | m.HTML_TOC | m.HTML_SMARTYPANTS | m.HTML_SAFELINK\n ))\n", "path": "liberapay/utils/markdown.py"}]} | 774 | 512 |
gh_patches_debug_10069 | rasdani/github-patches | git_diff | mkdocs__mkdocs-276 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Multi-page documentaion does not work on Windows
Multi-page documentation did not work on Windows, possibly because of Windows usage of backward slash instead of forward slash for paths.

Should be similar to:

</issue>
<code>
[start of mkdocs/nav.py]
1 # coding: utf-8
2
3 """
4 Deals with generating the site-wide navigation.
5
6 This consists of building a set of interlinked page and header objects.
7 """
8
9 from mkdocs import utils
10 import posixpath
11 import os
12
13
14 def filename_to_title(filename):
15 """
16 Automatically generate a default title, given a filename.
17 """
18 if utils.is_homepage(filename):
19 return 'Home'
20
21 title = os.path.splitext(filename)[0]
22 title = title.replace('-', ' ').replace('_', ' ')
23 # Captialize if the filename was all lowercase, otherwise leave it as-is.
24 if title.lower() == title:
25 title = title.capitalize()
26 return title
27
28
29 class SiteNavigation(object):
30 def __init__(self, pages_config, use_directory_urls=True):
31 self.url_context = URLContext()
32 self.file_context = FileContext()
33 self.nav_items, self.pages = \
34 _generate_site_navigation(pages_config, self.url_context, use_directory_urls)
35 self.homepage = self.pages[0] if self.pages else None
36 self.use_directory_urls = use_directory_urls
37
38 def __str__(self):
39 return str(self.homepage) + ''.join([str(item) for item in self])
40
41 def __iter__(self):
42 return iter(self.nav_items)
43
44 def walk_pages(self):
45 """
46 Returns each page in the site in turn.
47
48 Additionally this sets the active status of the pages and headers,
49 in the site navigation, so that the rendered navbar can correctly
50 highlight the currently active page and/or header item.
51 """
52 page = self.homepage
53 page.set_active()
54 self.url_context.set_current_url(page.abs_url)
55 self.file_context.set_current_path(page.input_path)
56 yield page
57 while page.next_page:
58 page.set_active(False)
59 page = page.next_page
60 page.set_active()
61 self.url_context.set_current_url(page.abs_url)
62 self.file_context.set_current_path(page.input_path)
63 yield page
64 page.set_active(False)
65
66 @property
67 def source_files(self):
68 if not hasattr(self, '_source_files'):
69 self._source_files = set([page.input_path for page in self.pages])
70 return self._source_files
71
72
73 class URLContext(object):
74 """
75 The URLContext is used to ensure that we can generate the appropriate
76 relative URLs to other pages from any given page in the site.
77
78 We use relative URLs so that static sites can be deployed to any location
79 without having to specify what the path component on the host will be
80 if the documentation is not hosted at the root path.
81 """
82
83 def __init__(self):
84 self.base_path = '/'
85
86 def set_current_url(self, current_url):
87 self.base_path = posixpath.dirname(current_url)
88
89 def make_relative(self, url):
90 """
91 Given a URL path return it as a relative URL,
92 given the context of the current page.
93 """
94 suffix = '/' if (url.endswith('/') and len(url) > 1) else ''
95 # Workaround for bug on `posixpath.relpath()` in Python 2.6
96 if self.base_path == '/':
97 if url == '/':
98 # Workaround for static assets
99 return '.'
100 return url.lstrip('/')
101 relative_path = posixpath.relpath(url, start=self.base_path) + suffix
102
103 # Under Python 2.6, relative_path adds an extra '/' at the end.
104 return relative_path.rstrip('/')
105
106
107 class FileContext(object):
108 """
109 The FileContext is used to ensure that we can generate the appropriate
110 full path for other pages given their relative path from a particular page.
111
112 This is used when we have relative hyperlinks in the documentation, so that
113 we can ensure that they point to markdown documents that actually exist
114 in the `pages` config.
115 """
116 def __init__(self):
117 self.current_file = None
118 self.base_path = ''
119
120 def set_current_path(self, current_path):
121 self.current_file = current_path
122 self.base_path = os.path.dirname(current_path)
123
124 def make_absolute(self, path):
125 """
126 Given a relative file path return it as a POSIX-style
127 absolute filepath, given the context of the current page.
128 """
129 return posixpath.normpath(posixpath.join(self.base_path, path))
130
131
132 class Page(object):
133 def __init__(self, title, url, path, url_context):
134 self.title = title
135 self.abs_url = url
136 self.active = False
137 self.url_context = url_context
138
139 # Relative paths to the input markdown file and output html file.
140 self.input_path = path
141 self.output_path = utils.get_html_path(path)
142
143 # Links to related pages
144 self.previous_page = None
145 self.next_page = None
146 self.ancestors = []
147
148 @property
149 def url(self):
150 return self.url_context.make_relative(self.abs_url)
151
152 @property
153 def is_homepage(self):
154 return utils.is_homepage(self.input_path)
155
156 def __str__(self):
157 return self._indent_print()
158
159 def _indent_print(self, depth=0):
160 indent = ' ' * depth
161 active_marker = ' [*]' if self.active else ''
162 title = self.title if (self.title is not None) else '[blank]'
163 return '%s%s - %s%s\n' % (indent, title, self.abs_url, active_marker)
164
165 def set_active(self, active=True):
166 self.active = active
167 for ancestor in self.ancestors:
168 ancestor.active = active
169
170
171 class Header(object):
172 def __init__(self, title, children):
173 self.title, self.children = title, children
174 self.active = False
175
176 def __str__(self):
177 return self._indent_print()
178
179 def _indent_print(self, depth=0):
180 indent = ' ' * depth
181 active_marker = ' [*]' if self.active else ''
182 ret = '%s%s%s\n' % (indent, self.title, active_marker)
183 for item in self.children:
184 ret += item._indent_print(depth + 1)
185 return ret
186
187
188 def _generate_site_navigation(pages_config, url_context, use_directory_urls=True):
189 """
190 Returns a list of Page and Header instances that represent the
191 top level site navigation.
192 """
193 nav_items = []
194 pages = []
195 previous = None
196
197 for config_line in pages_config:
198 if isinstance(config_line, str):
199 path = config_line
200 title, child_title = None, None
201 elif len(config_line) in (1, 2, 3):
202 # Pad any items that don't exist with 'None'
203 padded_config = (list(config_line) + [None, None])[:3]
204 path, title, child_title = padded_config
205 else:
206 msg = (
207 "Line in 'page' config contained %d items. "
208 "Expected 1, 2 or 3 strings." % len(config_line)
209 )
210 assert False, msg
211
212 if title is None:
213 filename = path.split('/')[0]
214 title = filename_to_title(filename)
215 if child_title is None and '/' in path:
216 filename = path.split('/')[1]
217 child_title = filename_to_title(filename)
218
219 url = utils.get_url_path(path, use_directory_urls)
220
221 if not child_title:
222 # New top level page.
223 page = Page(title=title, url=url, path=path, url_context=url_context)
224 if not utils.is_homepage(path):
225 nav_items.append(page)
226 elif not nav_items or (nav_items[-1].title != title):
227 # New second level page.
228 page = Page(title=child_title, url=url, path=path, url_context=url_context)
229 header = Header(title=title, children=[page])
230 nav_items.append(header)
231 page.ancestors = [header]
232 else:
233 # Additional second level page.
234 page = Page(title=child_title, url=url, path=path, url_context=url_context)
235 header = nav_items[-1]
236 header.children.append(page)
237 page.ancestors = [header]
238
239 # Add in previous and next information.
240 if previous:
241 page.previous_page = previous
242 previous.next_page = page
243 previous = page
244
245 pages.append(page)
246
247 return (nav_items, pages)
248
[end of mkdocs/nav.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkdocs/nav.py b/mkdocs/nav.py
--- a/mkdocs/nav.py
+++ b/mkdocs/nav.py
@@ -210,10 +210,11 @@
assert False, msg
if title is None:
- filename = path.split('/')[0]
+ filename = path.split(os.path.sep)[0]
title = filename_to_title(filename)
- if child_title is None and '/' in path:
- filename = path.split('/')[1]
+
+ if child_title is None and os.path.sep in path:
+ filename = path.split(os.path.sep)[-1]
child_title = filename_to_title(filename)
url = utils.get_url_path(path, use_directory_urls)
| {"golden_diff": "diff --git a/mkdocs/nav.py b/mkdocs/nav.py\n--- a/mkdocs/nav.py\n+++ b/mkdocs/nav.py\n@@ -210,10 +210,11 @@\n assert False, msg\n \n if title is None:\n- filename = path.split('/')[0]\n+ filename = path.split(os.path.sep)[0]\n title = filename_to_title(filename)\n- if child_title is None and '/' in path:\n- filename = path.split('/')[1]\n+\n+ if child_title is None and os.path.sep in path:\n+ filename = path.split(os.path.sep)[-1]\n child_title = filename_to_title(filename)\n \n url = utils.get_url_path(path, use_directory_urls)\n", "issue": "Multi-page documentaion does not work on Windows\nMulti-page documentation did not work on Windows, possibly because of Windows usage of backward slash instead of forward slash for paths.\n\n\n\nShould be similar to:\n\n\n\n", "before_files": [{"content": "# coding: utf-8\n\n\"\"\"\nDeals with generating the site-wide navigation.\n\nThis consists of building a set of interlinked page and header objects.\n\"\"\"\n\nfrom mkdocs import utils\nimport posixpath\nimport os\n\n\ndef filename_to_title(filename):\n \"\"\"\n Automatically generate a default title, given a filename.\n \"\"\"\n if utils.is_homepage(filename):\n return 'Home'\n\n title = os.path.splitext(filename)[0]\n title = title.replace('-', ' ').replace('_', ' ')\n # Captialize if the filename was all lowercase, otherwise leave it as-is.\n if title.lower() == title:\n title = title.capitalize()\n return title\n\n\nclass SiteNavigation(object):\n def __init__(self, pages_config, use_directory_urls=True):\n self.url_context = URLContext()\n self.file_context = FileContext()\n self.nav_items, self.pages = \\\n _generate_site_navigation(pages_config, self.url_context, use_directory_urls)\n self.homepage = self.pages[0] if self.pages else None\n self.use_directory_urls = use_directory_urls\n\n def __str__(self):\n return str(self.homepage) + ''.join([str(item) for item in self])\n\n def __iter__(self):\n return iter(self.nav_items)\n\n def walk_pages(self):\n \"\"\"\n Returns each page in the site in turn.\n\n Additionally this sets the active status of the pages and headers,\n in the site navigation, so that the rendered navbar can correctly\n highlight the currently active page and/or header item.\n \"\"\"\n page = self.homepage\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n while page.next_page:\n page.set_active(False)\n page = page.next_page\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n page.set_active(False)\n\n @property\n def source_files(self):\n if not hasattr(self, '_source_files'):\n self._source_files = set([page.input_path for page in self.pages])\n return self._source_files\n\n\nclass URLContext(object):\n \"\"\"\n The URLContext is used to ensure that we can generate the appropriate\n relative URLs to other pages from any given page in the site.\n\n We use relative URLs so that static sites can be deployed to any location\n without having to specify what the path component on the host will be\n if the documentation is not hosted at the root path.\n \"\"\"\n\n def __init__(self):\n self.base_path = '/'\n\n def set_current_url(self, current_url):\n self.base_path = posixpath.dirname(current_url)\n\n def make_relative(self, url):\n \"\"\"\n Given a URL path return it as a relative URL,\n given the context of the current page.\n \"\"\"\n suffix = '/' if (url.endswith('/') and len(url) > 1) else ''\n # Workaround for bug on `posixpath.relpath()` in Python 2.6\n if self.base_path == '/':\n if url == '/':\n # Workaround for static assets\n return '.'\n return url.lstrip('/')\n relative_path = posixpath.relpath(url, start=self.base_path) + suffix\n\n # Under Python 2.6, relative_path adds an extra '/' at the end.\n return relative_path.rstrip('/')\n\n\nclass FileContext(object):\n \"\"\"\n The FileContext is used to ensure that we can generate the appropriate\n full path for other pages given their relative path from a particular page.\n\n This is used when we have relative hyperlinks in the documentation, so that\n we can ensure that they point to markdown documents that actually exist\n in the `pages` config.\n \"\"\"\n def __init__(self):\n self.current_file = None\n self.base_path = ''\n\n def set_current_path(self, current_path):\n self.current_file = current_path\n self.base_path = os.path.dirname(current_path)\n\n def make_absolute(self, path):\n \"\"\"\n Given a relative file path return it as a POSIX-style\n absolute filepath, given the context of the current page.\n \"\"\"\n return posixpath.normpath(posixpath.join(self.base_path, path))\n\n\nclass Page(object):\n def __init__(self, title, url, path, url_context):\n self.title = title\n self.abs_url = url\n self.active = False\n self.url_context = url_context\n\n # Relative paths to the input markdown file and output html file.\n self.input_path = path\n self.output_path = utils.get_html_path(path)\n\n # Links to related pages\n self.previous_page = None\n self.next_page = None\n self.ancestors = []\n\n @property\n def url(self):\n return self.url_context.make_relative(self.abs_url)\n\n @property\n def is_homepage(self):\n return utils.is_homepage(self.input_path)\n\n def __str__(self):\n return self._indent_print()\n\n def _indent_print(self, depth=0):\n indent = ' ' * depth\n active_marker = ' [*]' if self.active else ''\n title = self.title if (self.title is not None) else '[blank]'\n return '%s%s - %s%s\\n' % (indent, title, self.abs_url, active_marker)\n\n def set_active(self, active=True):\n self.active = active\n for ancestor in self.ancestors:\n ancestor.active = active\n\n\nclass Header(object):\n def __init__(self, title, children):\n self.title, self.children = title, children\n self.active = False\n\n def __str__(self):\n return self._indent_print()\n\n def _indent_print(self, depth=0):\n indent = ' ' * depth\n active_marker = ' [*]' if self.active else ''\n ret = '%s%s%s\\n' % (indent, self.title, active_marker)\n for item in self.children:\n ret += item._indent_print(depth + 1)\n return ret\n\n\ndef _generate_site_navigation(pages_config, url_context, use_directory_urls=True):\n \"\"\"\n Returns a list of Page and Header instances that represent the\n top level site navigation.\n \"\"\"\n nav_items = []\n pages = []\n previous = None\n\n for config_line in pages_config:\n if isinstance(config_line, str):\n path = config_line\n title, child_title = None, None\n elif len(config_line) in (1, 2, 3):\n # Pad any items that don't exist with 'None'\n padded_config = (list(config_line) + [None, None])[:3]\n path, title, child_title = padded_config\n else:\n msg = (\n \"Line in 'page' config contained %d items. \"\n \"Expected 1, 2 or 3 strings.\" % len(config_line)\n )\n assert False, msg\n\n if title is None:\n filename = path.split('/')[0]\n title = filename_to_title(filename)\n if child_title is None and '/' in path:\n filename = path.split('/')[1]\n child_title = filename_to_title(filename)\n\n url = utils.get_url_path(path, use_directory_urls)\n\n if not child_title:\n # New top level page.\n page = Page(title=title, url=url, path=path, url_context=url_context)\n if not utils.is_homepage(path):\n nav_items.append(page)\n elif not nav_items or (nav_items[-1].title != title):\n # New second level page.\n page = Page(title=child_title, url=url, path=path, url_context=url_context)\n header = Header(title=title, children=[page])\n nav_items.append(header)\n page.ancestors = [header]\n else:\n # Additional second level page.\n page = Page(title=child_title, url=url, path=path, url_context=url_context)\n header = nav_items[-1]\n header.children.append(page)\n page.ancestors = [header]\n\n # Add in previous and next information.\n if previous:\n page.previous_page = previous\n previous.next_page = page\n previous = page\n\n pages.append(page)\n\n return (nav_items, pages)\n", "path": "mkdocs/nav.py"}]} | 3,137 | 160 |
gh_patches_debug_842 | rasdani/github-patches | git_diff | streamlit__streamlit-6377 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Streamlit logger working on root
### Summary
Upon import, Streamlit adds a new **global** log handler that dumps logs in text format. Packages should not be doing that, because it might break the logging convention of the host systems.
In our case for example, we dump logs in JSON format and push it all to our logging aggregation system. Streamlit's log message break the format and so it happens that the only service we can't debug properly is Streamlit.
### Steps to reproduce
Nothing special, logging comes out of the box.
**Expected behavior:**
Streamlit should attach its handler to a specific logger namespace (e.g. `streamlit`) instead of attaching it to the root logger.
**Actual behavior:**
Streamlit attaches a stream handler to the root logger
### Is this a regression?
That is, did this use to work the way you expected in the past?
no
### Debug info
- Streamlit version: 1.1.0
- Python version: 3.8
- Using Conda? PipEnv? PyEnv? Pex?
- OS version: Any
- Browser version: Irrelevant
---
Community voting on feature requests enables the Streamlit team to understand which features are most important to our users.
**If you'd like the Streamlit team to prioritize this feature request, please use the 👍 (thumbs up emoji) reaction in response to the initial post.**
</issue>
<code>
[start of lib/streamlit/logger.py]
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Logging module."""
16
17 import logging
18 import sys
19 from typing import Dict, Union
20
21 from typing_extensions import Final
22
23 DEFAULT_LOG_MESSAGE: Final = "%(asctime)s %(levelname) -7s " "%(name)s: %(message)s"
24
25 # Loggers for each name are saved here.
26 _loggers: Dict[str, logging.Logger] = {}
27
28 # The global log level is set here across all names.
29 _global_log_level = logging.INFO
30
31
32 def set_log_level(level: Union[str, int]) -> None:
33 """Set log level."""
34 logger = get_logger(__name__)
35
36 if isinstance(level, str):
37 level = level.upper()
38 if level == "CRITICAL" or level == logging.CRITICAL:
39 log_level = logging.CRITICAL
40 elif level == "ERROR" or level == logging.ERROR:
41 log_level = logging.ERROR
42 elif level == "WARNING" or level == logging.WARNING:
43 log_level = logging.WARNING
44 elif level == "INFO" or level == logging.INFO:
45 log_level = logging.INFO
46 elif level == "DEBUG" or level == logging.DEBUG:
47 log_level = logging.DEBUG
48 else:
49 msg = 'undefined log level "%s"' % level
50 logger.critical(msg)
51 sys.exit(1)
52
53 for log in _loggers.values():
54 log.setLevel(log_level)
55
56 global _global_log_level
57 _global_log_level = log_level
58
59
60 def setup_formatter(logger: logging.Logger) -> None:
61 """Set up the console formatter for a given logger."""
62 # Deregister any previous console loggers.
63 if hasattr(logger, "streamlit_console_handler"):
64 logger.removeHandler(logger.streamlit_console_handler)
65
66 logger.streamlit_console_handler = logging.StreamHandler() # type: ignore[attr-defined]
67
68 # Import here to avoid circular imports
69 from streamlit import config
70
71 if config._config_options:
72 # logger is required in ConfigOption.set_value
73 # Getting the config option before the config file has been parsed
74 # can create an infinite loop
75 message_format = config.get_option("logger.messageFormat")
76 else:
77 message_format = DEFAULT_LOG_MESSAGE
78 formatter = logging.Formatter(fmt=message_format)
79 formatter.default_msec_format = "%s.%03d"
80 logger.streamlit_console_handler.setFormatter(formatter) # type: ignore[attr-defined]
81
82 # Register the new console logger.
83 logger.addHandler(logger.streamlit_console_handler) # type: ignore[attr-defined]
84
85
86 def update_formatter() -> None:
87 for log in _loggers.values():
88 setup_formatter(log)
89
90
91 def init_tornado_logs() -> None:
92 """Set Tornado log levels.
93
94 This function does not import any Tornado code, so it's safe to call even
95 when Server is not running.
96 """
97 # http://www.tornadoweb.org/en/stable/log.html
98 for log in ("access", "application", "general"):
99 # get_logger will set the log level for the logger with the given name.
100 get_logger(f"tornado.{log}")
101
102
103 def get_logger(name: str) -> logging.Logger:
104 """Return a logger.
105
106 Parameters
107 ----------
108 name : str
109 The name of the logger to use. You should just pass in __name__.
110
111 Returns
112 -------
113 Logger
114
115 """
116 if name in _loggers.keys():
117 return _loggers[name]
118
119 if name == "root":
120 logger = logging.getLogger()
121 else:
122 logger = logging.getLogger(name)
123
124 logger.setLevel(_global_log_level)
125 logger.propagate = False
126 setup_formatter(logger)
127
128 _loggers[name] = logger
129
130 return logger
131
[end of lib/streamlit/logger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/streamlit/logger.py b/lib/streamlit/logger.py
--- a/lib/streamlit/logger.py
+++ b/lib/streamlit/logger.py
@@ -117,7 +117,7 @@
return _loggers[name]
if name == "root":
- logger = logging.getLogger()
+ logger = logging.getLogger("streamlit")
else:
logger = logging.getLogger(name)
| {"golden_diff": "diff --git a/lib/streamlit/logger.py b/lib/streamlit/logger.py\n--- a/lib/streamlit/logger.py\n+++ b/lib/streamlit/logger.py\n@@ -117,7 +117,7 @@\n return _loggers[name]\n \n if name == \"root\":\n- logger = logging.getLogger()\n+ logger = logging.getLogger(\"streamlit\")\n else:\n logger = logging.getLogger(name)\n", "issue": "Streamlit logger working on root\n### Summary\r\n\r\nUpon import, Streamlit adds a new **global** log handler that dumps logs in text format. Packages should not be doing that, because it might break the logging convention of the host systems. \r\nIn our case for example, we dump logs in JSON format and push it all to our logging aggregation system. Streamlit's log message break the format and so it happens that the only service we can't debug properly is Streamlit.\r\n\r\n### Steps to reproduce\r\nNothing special, logging comes out of the box.\r\n\r\n**Expected behavior:**\r\nStreamlit should attach its handler to a specific logger namespace (e.g. `streamlit`) instead of attaching it to the root logger.\r\n\r\n**Actual behavior:**\r\n\r\nStreamlit attaches a stream handler to the root logger\r\n\r\n### Is this a regression?\r\n\r\nThat is, did this use to work the way you expected in the past?\r\nno\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 1.1.0\r\n- Python version: 3.8\r\n- Using Conda? PipEnv? PyEnv? Pex?\r\n- OS version: Any\r\n- Browser version: Irrelevant\r\n\r\n---\r\n\r\nCommunity voting on feature requests enables the Streamlit team to understand which features are most important to our users.\r\n\r\n**If you'd like the Streamlit team to prioritize this feature request, please use the \ud83d\udc4d (thumbs up emoji) reaction in response to the initial post.**\r\n\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Logging module.\"\"\"\n\nimport logging\nimport sys\nfrom typing import Dict, Union\n\nfrom typing_extensions import Final\n\nDEFAULT_LOG_MESSAGE: Final = \"%(asctime)s %(levelname) -7s \" \"%(name)s: %(message)s\"\n\n# Loggers for each name are saved here.\n_loggers: Dict[str, logging.Logger] = {}\n\n# The global log level is set here across all names.\n_global_log_level = logging.INFO\n\n\ndef set_log_level(level: Union[str, int]) -> None:\n \"\"\"Set log level.\"\"\"\n logger = get_logger(__name__)\n\n if isinstance(level, str):\n level = level.upper()\n if level == \"CRITICAL\" or level == logging.CRITICAL:\n log_level = logging.CRITICAL\n elif level == \"ERROR\" or level == logging.ERROR:\n log_level = logging.ERROR\n elif level == \"WARNING\" or level == logging.WARNING:\n log_level = logging.WARNING\n elif level == \"INFO\" or level == logging.INFO:\n log_level = logging.INFO\n elif level == \"DEBUG\" or level == logging.DEBUG:\n log_level = logging.DEBUG\n else:\n msg = 'undefined log level \"%s\"' % level\n logger.critical(msg)\n sys.exit(1)\n\n for log in _loggers.values():\n log.setLevel(log_level)\n\n global _global_log_level\n _global_log_level = log_level\n\n\ndef setup_formatter(logger: logging.Logger) -> None:\n \"\"\"Set up the console formatter for a given logger.\"\"\"\n # Deregister any previous console loggers.\n if hasattr(logger, \"streamlit_console_handler\"):\n logger.removeHandler(logger.streamlit_console_handler)\n\n logger.streamlit_console_handler = logging.StreamHandler() # type: ignore[attr-defined]\n\n # Import here to avoid circular imports\n from streamlit import config\n\n if config._config_options:\n # logger is required in ConfigOption.set_value\n # Getting the config option before the config file has been parsed\n # can create an infinite loop\n message_format = config.get_option(\"logger.messageFormat\")\n else:\n message_format = DEFAULT_LOG_MESSAGE\n formatter = logging.Formatter(fmt=message_format)\n formatter.default_msec_format = \"%s.%03d\"\n logger.streamlit_console_handler.setFormatter(formatter) # type: ignore[attr-defined]\n\n # Register the new console logger.\n logger.addHandler(logger.streamlit_console_handler) # type: ignore[attr-defined]\n\n\ndef update_formatter() -> None:\n for log in _loggers.values():\n setup_formatter(log)\n\n\ndef init_tornado_logs() -> None:\n \"\"\"Set Tornado log levels.\n\n This function does not import any Tornado code, so it's safe to call even\n when Server is not running.\n \"\"\"\n # http://www.tornadoweb.org/en/stable/log.html\n for log in (\"access\", \"application\", \"general\"):\n # get_logger will set the log level for the logger with the given name.\n get_logger(f\"tornado.{log}\")\n\n\ndef get_logger(name: str) -> logging.Logger:\n \"\"\"Return a logger.\n\n Parameters\n ----------\n name : str\n The name of the logger to use. You should just pass in __name__.\n\n Returns\n -------\n Logger\n\n \"\"\"\n if name in _loggers.keys():\n return _loggers[name]\n\n if name == \"root\":\n logger = logging.getLogger()\n else:\n logger = logging.getLogger(name)\n\n logger.setLevel(_global_log_level)\n logger.propagate = False\n setup_formatter(logger)\n\n _loggers[name] = logger\n\n return logger\n", "path": "lib/streamlit/logger.py"}]} | 2,060 | 88 |
gh_patches_debug_24570 | rasdani/github-patches | git_diff | scikit-image__scikit-image-4329 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`skimage.filters.gaussian` doesn't use `output` parameter at all
## Description
`skimage.filters.gaussian` doesn't use the value of `output` parameter. It returns only the image of float64 dtype, even if explicitly specify the output dtype.
## Way to reproduce
Simply run the code snippet:
```python
from skimage.filters import gaussian
import numpy as np
image = np.arange(25, dtype=np.uint8).reshape((5,5))
# output is not specified
filtered_0 = gaussian(image, sigma=0.25)
print(filtered_0.dtype)
# return: float64
# output is specified
filtered_output = gaussian(image, output = np.uint8, sigma=0.25)
print(filtered_output.dtype)
# return: float64
```
This function is a wrapper around `scipy.ndi.gaussian_filter`. But the scikit-image `gaussian` doesn't pass the `output` to `scipy.ndi.gaussian_filter`.
`skimage.filters.gaussian` doesn't use `output` parameter at all
## Description
`skimage.filters.gaussian` doesn't use the value of `output` parameter. It returns only the image of float64 dtype, even if explicitly specify the output dtype.
## Way to reproduce
Simply run the code snippet:
```python
from skimage.filters import gaussian
import numpy as np
image = np.arange(25, dtype=np.uint8).reshape((5,5))
# output is not specified
filtered_0 = gaussian(image, sigma=0.25)
print(filtered_0.dtype)
# return: float64
# output is specified
filtered_output = gaussian(image, output = np.uint8, sigma=0.25)
print(filtered_output.dtype)
# return: float64
```
This function is a wrapper around `scipy.ndi.gaussian_filter`. But the scikit-image `gaussian` doesn't pass the `output` to `scipy.ndi.gaussian_filter`.
</issue>
<code>
[start of skimage/filters/_gaussian.py]
1 from collections.abc import Iterable
2 import numpy as np
3 from scipy import ndimage as ndi
4
5 from ..util import img_as_float
6 from .._shared.utils import warn, convert_to_float
7
8
9 __all__ = ['gaussian']
10
11
12 def gaussian(image, sigma=1, output=None, mode='nearest', cval=0,
13 multichannel=None, preserve_range=False, truncate=4.0):
14 """Multi-dimensional Gaussian filter.
15
16 Parameters
17 ----------
18 image : array-like
19 Input image (grayscale or color) to filter.
20 sigma : scalar or sequence of scalars, optional
21 Standard deviation for Gaussian kernel. The standard
22 deviations of the Gaussian filter are given for each axis as a
23 sequence, or as a single number, in which case it is equal for
24 all axes.
25 output : array, optional
26 The ``output`` parameter passes an array in which to store the
27 filter output.
28 mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
29 The ``mode`` parameter determines how the array borders are
30 handled, where ``cval`` is the value when mode is equal to
31 'constant'. Default is 'nearest'.
32 cval : scalar, optional
33 Value to fill past edges of input if ``mode`` is 'constant'. Default
34 is 0.0
35 multichannel : bool, optional (default: None)
36 Whether the last axis of the image is to be interpreted as multiple
37 channels. If True, each channel is filtered separately (channels are
38 not mixed together). Only 3 channels are supported. If ``None``,
39 the function will attempt to guess this, and raise a warning if
40 ambiguous, when the array has shape (M, N, 3).
41 preserve_range : bool, optional
42 Whether to keep the original range of values. Otherwise, the input
43 image is converted according to the conventions of ``img_as_float``.
44 Also see
45 https://scikit-image.org/docs/dev/user_guide/data_types.html
46 truncate : float, optional
47 Truncate the filter at this many standard deviations.
48
49 Returns
50 -------
51 filtered_image : ndarray
52 the filtered array
53
54 Notes
55 -----
56 This function is a wrapper around :func:`scipy.ndi.gaussian_filter`.
57
58 Integer arrays are converted to float.
59
60 The multi-dimensional filter is implemented as a sequence of
61 one-dimensional convolution filters. The intermediate arrays are
62 stored in the same data type as the output. Therefore, for output
63 types with a limited precision, the results may be imprecise
64 because intermediate results may be stored with insufficient
65 precision.
66
67 Examples
68 --------
69
70 >>> a = np.zeros((3, 3))
71 >>> a[1, 1] = 1
72 >>> a
73 array([[0., 0., 0.],
74 [0., 1., 0.],
75 [0., 0., 0.]])
76 >>> gaussian(a, sigma=0.4) # mild smoothing
77 array([[0.00163116, 0.03712502, 0.00163116],
78 [0.03712502, 0.84496158, 0.03712502],
79 [0.00163116, 0.03712502, 0.00163116]])
80 >>> gaussian(a, sigma=1) # more smoothing
81 array([[0.05855018, 0.09653293, 0.05855018],
82 [0.09653293, 0.15915589, 0.09653293],
83 [0.05855018, 0.09653293, 0.05855018]])
84 >>> # Several modes are possible for handling boundaries
85 >>> gaussian(a, sigma=1, mode='reflect')
86 array([[0.08767308, 0.12075024, 0.08767308],
87 [0.12075024, 0.16630671, 0.12075024],
88 [0.08767308, 0.12075024, 0.08767308]])
89 >>> # For RGB images, each is filtered separately
90 >>> from skimage.data import astronaut
91 >>> image = astronaut()
92 >>> filtered_img = gaussian(image, sigma=1, multichannel=True)
93
94 """
95
96 spatial_dims = None
97 try:
98 spatial_dims = _guess_spatial_dimensions(image)
99 except ValueError:
100 spatial_dims = image.ndim
101 if spatial_dims is None and multichannel is None:
102 msg = ("Images with dimensions (M, N, 3) are interpreted as 2D+RGB "
103 "by default. Use `multichannel=False` to interpret as "
104 "3D image with last dimension of length 3.")
105 warn(RuntimeWarning(msg))
106 multichannel = True
107 if np.any(np.asarray(sigma) < 0.0):
108 raise ValueError("Sigma values less than zero are not valid")
109 if multichannel:
110 # do not filter across channels
111 if not isinstance(sigma, Iterable):
112 sigma = [sigma] * (image.ndim - 1)
113 if len(sigma) != image.ndim:
114 sigma = np.concatenate((np.asarray(sigma), [0]))
115 image = convert_to_float(image, preserve_range)
116 return ndi.gaussian_filter(image, sigma, mode=mode, cval=cval,
117 truncate=truncate)
118
119
120 def _guess_spatial_dimensions(image):
121 """Make an educated guess about whether an image has a channels dimension.
122
123 Parameters
124 ----------
125 image : ndarray
126 The input image.
127
128 Returns
129 -------
130 spatial_dims : int or None
131 The number of spatial dimensions of ``image``. If ambiguous, the value
132 is ``None``.
133
134 Raises
135 ------
136 ValueError
137 If the image array has less than two or more than four dimensions.
138 """
139 if image.ndim == 2:
140 return 2
141 if image.ndim == 3 and image.shape[-1] != 3:
142 return 3
143 if image.ndim == 3 and image.shape[-1] == 3:
144 return None
145 if image.ndim == 4 and image.shape[-1] == 3:
146 return 3
147 else:
148 raise ValueError("Expected 2D, 3D, or 4D array, got %iD." % image.ndim)
149
[end of skimage/filters/_gaussian.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/filters/_gaussian.py b/skimage/filters/_gaussian.py
--- a/skimage/filters/_gaussian.py
+++ b/skimage/filters/_gaussian.py
@@ -57,6 +57,10 @@
Integer arrays are converted to float.
+ The ``output`` should be floating point data type since gaussian converts
+ to float provided ``image``. If ``output`` is not provided, another array
+ will be allocated and returned as the result.
+
The multi-dimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
@@ -113,8 +117,13 @@
if len(sigma) != image.ndim:
sigma = np.concatenate((np.asarray(sigma), [0]))
image = convert_to_float(image, preserve_range)
- return ndi.gaussian_filter(image, sigma, mode=mode, cval=cval,
- truncate=truncate)
+ if output is None:
+ output = np.empty_like(image)
+ elif not np.issubdtype(output.dtype, np.floating):
+ raise ValueError("Provided output data type is not float")
+ ndi.gaussian_filter(image, sigma, output=output, mode=mode, cval=cval,
+ truncate=truncate)
+ return output
def _guess_spatial_dimensions(image):
| {"golden_diff": "diff --git a/skimage/filters/_gaussian.py b/skimage/filters/_gaussian.py\n--- a/skimage/filters/_gaussian.py\n+++ b/skimage/filters/_gaussian.py\n@@ -57,6 +57,10 @@\n \n Integer arrays are converted to float.\n \n+ The ``output`` should be floating point data type since gaussian converts\n+ to float provided ``image``. If ``output`` is not provided, another array\n+ will be allocated and returned as the result.\n+\n The multi-dimensional filter is implemented as a sequence of\n one-dimensional convolution filters. The intermediate arrays are\n stored in the same data type as the output. Therefore, for output\n@@ -113,8 +117,13 @@\n if len(sigma) != image.ndim:\n sigma = np.concatenate((np.asarray(sigma), [0]))\n image = convert_to_float(image, preserve_range)\n- return ndi.gaussian_filter(image, sigma, mode=mode, cval=cval,\n- truncate=truncate)\n+ if output is None:\n+ output = np.empty_like(image)\n+ elif not np.issubdtype(output.dtype, np.floating):\n+ raise ValueError(\"Provided output data type is not float\")\n+ ndi.gaussian_filter(image, sigma, output=output, mode=mode, cval=cval,\n+ truncate=truncate)\n+ return output\n \n \n def _guess_spatial_dimensions(image):\n", "issue": "`skimage.filters.gaussian` doesn't use `output` parameter at all\n## Description\r\n`skimage.filters.gaussian` doesn't use the value of `output` parameter. It returns only the image of float64 dtype, even if explicitly specify the output dtype. \r\n\r\n## Way to reproduce\r\nSimply run the code snippet:\r\n```python\r\nfrom skimage.filters import gaussian\r\nimport numpy as np\r\n\r\nimage = np.arange(25, dtype=np.uint8).reshape((5,5))\r\n\r\n# output is not specified\r\nfiltered_0 = gaussian(image, sigma=0.25)\r\nprint(filtered_0.dtype)\r\n# return: float64\r\n\r\n# output is specified\r\nfiltered_output = gaussian(image, output = np.uint8, sigma=0.25)\r\nprint(filtered_output.dtype)\r\n# return: float64\r\n```\r\nThis function is a wrapper around `scipy.ndi.gaussian_filter`. But the scikit-image `gaussian` doesn't pass the `output` to `scipy.ndi.gaussian_filter`.\r\n\r\n\n`skimage.filters.gaussian` doesn't use `output` parameter at all\n## Description\r\n`skimage.filters.gaussian` doesn't use the value of `output` parameter. It returns only the image of float64 dtype, even if explicitly specify the output dtype. \r\n\r\n## Way to reproduce\r\nSimply run the code snippet:\r\n```python\r\nfrom skimage.filters import gaussian\r\nimport numpy as np\r\n\r\nimage = np.arange(25, dtype=np.uint8).reshape((5,5))\r\n\r\n# output is not specified\r\nfiltered_0 = gaussian(image, sigma=0.25)\r\nprint(filtered_0.dtype)\r\n# return: float64\r\n\r\n# output is specified\r\nfiltered_output = gaussian(image, output = np.uint8, sigma=0.25)\r\nprint(filtered_output.dtype)\r\n# return: float64\r\n```\r\nThis function is a wrapper around `scipy.ndi.gaussian_filter`. But the scikit-image `gaussian` doesn't pass the `output` to `scipy.ndi.gaussian_filter`.\r\n\r\n\n", "before_files": [{"content": "from collections.abc import Iterable\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom ..util import img_as_float\nfrom .._shared.utils import warn, convert_to_float\n\n\n__all__ = ['gaussian']\n\n\ndef gaussian(image, sigma=1, output=None, mode='nearest', cval=0,\n multichannel=None, preserve_range=False, truncate=4.0):\n \"\"\"Multi-dimensional Gaussian filter.\n\n Parameters\n ----------\n image : array-like\n Input image (grayscale or color) to filter.\n sigma : scalar or sequence of scalars, optional\n Standard deviation for Gaussian kernel. The standard\n deviations of the Gaussian filter are given for each axis as a\n sequence, or as a single number, in which case it is equal for\n all axes.\n output : array, optional\n The ``output`` parameter passes an array in which to store the\n filter output.\n mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional\n The ``mode`` parameter determines how the array borders are\n handled, where ``cval`` is the value when mode is equal to\n 'constant'. Default is 'nearest'.\n cval : scalar, optional\n Value to fill past edges of input if ``mode`` is 'constant'. Default\n is 0.0\n multichannel : bool, optional (default: None)\n Whether the last axis of the image is to be interpreted as multiple\n channels. If True, each channel is filtered separately (channels are\n not mixed together). Only 3 channels are supported. If ``None``,\n the function will attempt to guess this, and raise a warning if\n ambiguous, when the array has shape (M, N, 3).\n preserve_range : bool, optional\n Whether to keep the original range of values. Otherwise, the input\n image is converted according to the conventions of ``img_as_float``.\n Also see\n https://scikit-image.org/docs/dev/user_guide/data_types.html\n truncate : float, optional\n Truncate the filter at this many standard deviations.\n\n Returns\n -------\n filtered_image : ndarray\n the filtered array\n\n Notes\n -----\n This function is a wrapper around :func:`scipy.ndi.gaussian_filter`.\n\n Integer arrays are converted to float.\n\n The multi-dimensional filter is implemented as a sequence of\n one-dimensional convolution filters. The intermediate arrays are\n stored in the same data type as the output. Therefore, for output\n types with a limited precision, the results may be imprecise\n because intermediate results may be stored with insufficient\n precision.\n\n Examples\n --------\n\n >>> a = np.zeros((3, 3))\n >>> a[1, 1] = 1\n >>> a\n array([[0., 0., 0.],\n [0., 1., 0.],\n [0., 0., 0.]])\n >>> gaussian(a, sigma=0.4) # mild smoothing\n array([[0.00163116, 0.03712502, 0.00163116],\n [0.03712502, 0.84496158, 0.03712502],\n [0.00163116, 0.03712502, 0.00163116]])\n >>> gaussian(a, sigma=1) # more smoothing\n array([[0.05855018, 0.09653293, 0.05855018],\n [0.09653293, 0.15915589, 0.09653293],\n [0.05855018, 0.09653293, 0.05855018]])\n >>> # Several modes are possible for handling boundaries\n >>> gaussian(a, sigma=1, mode='reflect')\n array([[0.08767308, 0.12075024, 0.08767308],\n [0.12075024, 0.16630671, 0.12075024],\n [0.08767308, 0.12075024, 0.08767308]])\n >>> # For RGB images, each is filtered separately\n >>> from skimage.data import astronaut\n >>> image = astronaut()\n >>> filtered_img = gaussian(image, sigma=1, multichannel=True)\n\n \"\"\"\n\n spatial_dims = None\n try:\n spatial_dims = _guess_spatial_dimensions(image)\n except ValueError:\n spatial_dims = image.ndim\n if spatial_dims is None and multichannel is None:\n msg = (\"Images with dimensions (M, N, 3) are interpreted as 2D+RGB \"\n \"by default. Use `multichannel=False` to interpret as \"\n \"3D image with last dimension of length 3.\")\n warn(RuntimeWarning(msg))\n multichannel = True\n if np.any(np.asarray(sigma) < 0.0):\n raise ValueError(\"Sigma values less than zero are not valid\")\n if multichannel:\n # do not filter across channels\n if not isinstance(sigma, Iterable):\n sigma = [sigma] * (image.ndim - 1)\n if len(sigma) != image.ndim:\n sigma = np.concatenate((np.asarray(sigma), [0]))\n image = convert_to_float(image, preserve_range)\n return ndi.gaussian_filter(image, sigma, mode=mode, cval=cval,\n truncate=truncate)\n\n\ndef _guess_spatial_dimensions(image):\n \"\"\"Make an educated guess about whether an image has a channels dimension.\n\n Parameters\n ----------\n image : ndarray\n The input image.\n\n Returns\n -------\n spatial_dims : int or None\n The number of spatial dimensions of ``image``. If ambiguous, the value\n is ``None``.\n\n Raises\n ------\n ValueError\n If the image array has less than two or more than four dimensions.\n \"\"\"\n if image.ndim == 2:\n return 2\n if image.ndim == 3 and image.shape[-1] != 3:\n return 3\n if image.ndim == 3 and image.shape[-1] == 3:\n return None\n if image.ndim == 4 and image.shape[-1] == 3:\n return 3\n else:\n raise ValueError(\"Expected 2D, 3D, or 4D array, got %iD.\" % image.ndim)\n", "path": "skimage/filters/_gaussian.py"}]} | 2,843 | 322 |
gh_patches_debug_24425 | rasdani/github-patches | git_diff | conda__conda-5421 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
conda-env update error in 4.3.20
```
conda env update
An unexpected error has occurred.
Please consider posting the following information to the
conda GitHub issue tracker at:
https://github.com/conda/conda/issues
Current conda install:
platform : linux-64
conda version : 4.3.20
conda is private : False
conda-env version : 4.3.20
conda-build version : not installed
python version : 3.5.2.final.0
requests version : 2.14.2
root environment : /home/travis/miniconda (writable)
default environment : /home/travis/miniconda
envs directories : /home/travis/miniconda/envs
/home/travis/.conda/envs
package cache : /home/travis/miniconda/pkgs
/home/travis/.conda/pkgs
channel URLs : https://conda.anaconda.org/conda-canary/linux-64
https://conda.anaconda.org/conda-canary/noarch
https://repo.continuum.io/pkgs/free/linux-64
https://repo.continuum.io/pkgs/free/noarch
https://repo.continuum.io/pkgs/r/linux-64
https://repo.continuum.io/pkgs/r/noarch
https://repo.continuum.io/pkgs/pro/linux-64
https://repo.continuum.io/pkgs/pro/noarch
config file : /home/travis/.condarc
netrc file : None
offline mode : False
user-agent : conda/4.3.20 requests/2.14.2 CPython/3.5.2 Linux/4.4.0-51-generic debian/jessie/sid glibc/2.19
UID:GID : 1000:1000
`$ /home/travis/miniconda/bin/conda-env update`
Traceback (most recent call last):
File "/home/travis/miniconda/lib/python3.5/site-packages/conda/exceptions.py", line 632, in conda_exception_handler
return_value = func(*args, **kwargs)
File "/home/travis/miniconda/lib/python3.5/site-packages/conda_env/cli/main_update.py", line 82, in execute
if not (args.name or args.prefix):
AttributeError: 'Namespace' object has no attribute 'prefix'
```
</issue>
<code>
[start of conda_env/cli/main_update.py]
1 from argparse import RawDescriptionHelpFormatter
2 import os
3 import textwrap
4 import sys
5
6 from conda import config
7 from conda.cli import common
8 from conda.cli import install as cli_install
9 from conda.misc import touch_nonadmin
10 from ..installers.base import get_installer, InvalidInstaller
11 from .. import specs as install_specs
12 from .. import exceptions
13 # for conda env
14 from conda_env.cli.common import get_prefix
15 from ..exceptions import CondaEnvException
16 description = """
17 Update the current environment based on environment file
18 """
19
20 example = """
21 examples:
22 conda env update
23 conda env update -n=foo
24 conda env update -f=/path/to/environment.yml
25 conda env update --name=foo --file=environment.yml
26 conda env update vader/deathstar
27 """
28
29
30 def configure_parser(sub_parsers):
31 p = sub_parsers.add_parser(
32 'update',
33 formatter_class=RawDescriptionHelpFormatter,
34 description=description,
35 help=description,
36 epilog=example,
37 )
38 p.add_argument(
39 '-n', '--name',
40 action='store',
41 help='name of environment (in %s)' % os.pathsep.join(config.envs_dirs),
42 default=None,
43 )
44 p.add_argument(
45 '-f', '--file',
46 action='store',
47 help='environment definition (default: environment.yml)',
48 default='environment.yml',
49 )
50 p.add_argument(
51 '--prune',
52 action='store_true',
53 default=False,
54 help='remove installed packages not defined in environment.yml',
55 )
56 p.add_argument(
57 '-q', '--quiet',
58 action='store_true',
59 default=False,
60 )
61 p.add_argument(
62 'remote_definition',
63 help='remote environment definition / IPython notebook',
64 action='store',
65 default=None,
66 nargs='?'
67 )
68 common.add_parser_json(p)
69 p.set_defaults(func=execute)
70
71
72 def execute(args, parser):
73 name = args.remote_definition or args.name
74
75 try:
76 spec = install_specs.detect(name=name, filename=args.file,
77 directory=os.getcwd())
78 env = spec.environment
79 except exceptions.SpecNotFound:
80 raise
81
82 if not (args.name or args.prefix):
83 if not env.name:
84 # Note, this is a hack fofr get_prefix that assumes argparse results
85 # TODO Refactor common.get_prefix
86 name = os.environ.get('CONDA_DEFAULT_ENV', False)
87 if not name:
88 msg = "Unable to determine environment\n\n"
89 msg += textwrap.dedent("""
90 Please re-run this command with one of the following options:
91
92 * Provide an environment name via --name or -n
93 * Re-run this command inside an activated conda environment.""").lstrip()
94 # TODO Add json support
95 raise CondaEnvException(msg)
96
97 # Note: stubbing out the args object as all of the
98 # conda.cli.common code thinks that name will always
99 # be specified.
100 args.name = env.name
101
102 prefix = get_prefix(args, search=False)
103 # CAN'T Check with this function since it assumes we will create prefix.
104 # cli_install.check_prefix(prefix, json=args.json)
105
106 # TODO, add capability
107 # common.ensure_override_channels_requires_channel(args)
108 # channel_urls = args.channel or ()
109
110 for installer_type, specs in env.dependencies.items():
111 try:
112 installer = get_installer(installer_type)
113 installer.install(prefix, specs, args, env, prune=args.prune)
114 except InvalidInstaller:
115 sys.stderr.write(textwrap.dedent("""
116 Unable to install package for {0}.
117
118 Please double check and ensure you dependencies file has
119 the correct spelling. You might also try installing the
120 conda-env-{0} package to see if provides the required
121 installer.
122 """).lstrip().format(installer_type)
123 )
124 return -1
125
126 touch_nonadmin(prefix)
127 if not args.json:
128 print(cli_install.print_activate(args.name if args.name else prefix))
129
[end of conda_env/cli/main_update.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_env/cli/main_update.py b/conda_env/cli/main_update.py
--- a/conda_env/cli/main_update.py
+++ b/conda_env/cli/main_update.py
@@ -1,18 +1,16 @@
from argparse import RawDescriptionHelpFormatter
import os
-import textwrap
import sys
+import textwrap
-from conda import config
-from conda.cli import common
-from conda.cli import install as cli_install
+from conda.cli import common, install as cli_install
from conda.misc import touch_nonadmin
-from ..installers.base import get_installer, InvalidInstaller
-from .. import specs as install_specs
-from .. import exceptions
# for conda env
from conda_env.cli.common import get_prefix
+from .. import exceptions, specs as install_specs
from ..exceptions import CondaEnvException
+from ..installers.base import InvalidInstaller, get_installer
+
description = """
Update the current environment based on environment file
"""
@@ -35,12 +33,7 @@
help=description,
epilog=example,
)
- p.add_argument(
- '-n', '--name',
- action='store',
- help='name of environment (in %s)' % os.pathsep.join(config.envs_dirs),
- default=None,
- )
+ common.add_parser_prefix(p)
p.add_argument(
'-f', '--file',
action='store',
| {"golden_diff": "diff --git a/conda_env/cli/main_update.py b/conda_env/cli/main_update.py\n--- a/conda_env/cli/main_update.py\n+++ b/conda_env/cli/main_update.py\n@@ -1,18 +1,16 @@\n from argparse import RawDescriptionHelpFormatter\n import os\n-import textwrap\n import sys\n+import textwrap\n \n-from conda import config\n-from conda.cli import common\n-from conda.cli import install as cli_install\n+from conda.cli import common, install as cli_install\n from conda.misc import touch_nonadmin\n-from ..installers.base import get_installer, InvalidInstaller\n-from .. import specs as install_specs\n-from .. import exceptions\n # for conda env\n from conda_env.cli.common import get_prefix\n+from .. import exceptions, specs as install_specs\n from ..exceptions import CondaEnvException\n+from ..installers.base import InvalidInstaller, get_installer\n+\n description = \"\"\"\n Update the current environment based on environment file\n \"\"\"\n@@ -35,12 +33,7 @@\n help=description,\n epilog=example,\n )\n- p.add_argument(\n- '-n', '--name',\n- action='store',\n- help='name of environment (in %s)' % os.pathsep.join(config.envs_dirs),\n- default=None,\n- )\n+ common.add_parser_prefix(p)\n p.add_argument(\n '-f', '--file',\n action='store',\n", "issue": "conda-env update error in 4.3.20\n```\r\nconda env update\r\nAn unexpected error has occurred.\r\nPlease consider posting the following information to the\r\nconda GitHub issue tracker at:\r\n https://github.com/conda/conda/issues\r\nCurrent conda install:\r\n platform : linux-64\r\n conda version : 4.3.20\r\n conda is private : False\r\n conda-env version : 4.3.20\r\n conda-build version : not installed\r\n python version : 3.5.2.final.0\r\n requests version : 2.14.2\r\n root environment : /home/travis/miniconda (writable)\r\n default environment : /home/travis/miniconda\r\n envs directories : /home/travis/miniconda/envs\r\n /home/travis/.conda/envs\r\n package cache : /home/travis/miniconda/pkgs\r\n /home/travis/.conda/pkgs\r\n channel URLs : https://conda.anaconda.org/conda-canary/linux-64\r\n https://conda.anaconda.org/conda-canary/noarch\r\n https://repo.continuum.io/pkgs/free/linux-64\r\n https://repo.continuum.io/pkgs/free/noarch\r\n https://repo.continuum.io/pkgs/r/linux-64\r\n https://repo.continuum.io/pkgs/r/noarch\r\n https://repo.continuum.io/pkgs/pro/linux-64\r\n https://repo.continuum.io/pkgs/pro/noarch\r\n config file : /home/travis/.condarc\r\n netrc file : None\r\n offline mode : False\r\n user-agent : conda/4.3.20 requests/2.14.2 CPython/3.5.2 Linux/4.4.0-51-generic debian/jessie/sid glibc/2.19 \r\n UID:GID : 1000:1000\r\n`$ /home/travis/miniconda/bin/conda-env update`\r\n Traceback (most recent call last):\r\n File \"/home/travis/miniconda/lib/python3.5/site-packages/conda/exceptions.py\", line 632, in conda_exception_handler\r\n return_value = func(*args, **kwargs)\r\n File \"/home/travis/miniconda/lib/python3.5/site-packages/conda_env/cli/main_update.py\", line 82, in execute\r\n if not (args.name or args.prefix):\r\n AttributeError: 'Namespace' object has no attribute 'prefix'\r\n```\n", "before_files": [{"content": "from argparse import RawDescriptionHelpFormatter\nimport os\nimport textwrap\nimport sys\n\nfrom conda import config\nfrom conda.cli import common\nfrom conda.cli import install as cli_install\nfrom conda.misc import touch_nonadmin\nfrom ..installers.base import get_installer, InvalidInstaller\nfrom .. import specs as install_specs\nfrom .. import exceptions\n# for conda env\nfrom conda_env.cli.common import get_prefix\nfrom ..exceptions import CondaEnvException\ndescription = \"\"\"\nUpdate the current environment based on environment file\n\"\"\"\n\nexample = \"\"\"\nexamples:\n conda env update\n conda env update -n=foo\n conda env update -f=/path/to/environment.yml\n conda env update --name=foo --file=environment.yml\n conda env update vader/deathstar\n\"\"\"\n\n\ndef configure_parser(sub_parsers):\n p = sub_parsers.add_parser(\n 'update',\n formatter_class=RawDescriptionHelpFormatter,\n description=description,\n help=description,\n epilog=example,\n )\n p.add_argument(\n '-n', '--name',\n action='store',\n help='name of environment (in %s)' % os.pathsep.join(config.envs_dirs),\n default=None,\n )\n p.add_argument(\n '-f', '--file',\n action='store',\n help='environment definition (default: environment.yml)',\n default='environment.yml',\n )\n p.add_argument(\n '--prune',\n action='store_true',\n default=False,\n help='remove installed packages not defined in environment.yml',\n )\n p.add_argument(\n '-q', '--quiet',\n action='store_true',\n default=False,\n )\n p.add_argument(\n 'remote_definition',\n help='remote environment definition / IPython notebook',\n action='store',\n default=None,\n nargs='?'\n )\n common.add_parser_json(p)\n p.set_defaults(func=execute)\n\n\ndef execute(args, parser):\n name = args.remote_definition or args.name\n\n try:\n spec = install_specs.detect(name=name, filename=args.file,\n directory=os.getcwd())\n env = spec.environment\n except exceptions.SpecNotFound:\n raise\n\n if not (args.name or args.prefix):\n if not env.name:\n # Note, this is a hack fofr get_prefix that assumes argparse results\n # TODO Refactor common.get_prefix\n name = os.environ.get('CONDA_DEFAULT_ENV', False)\n if not name:\n msg = \"Unable to determine environment\\n\\n\"\n msg += textwrap.dedent(\"\"\"\n Please re-run this command with one of the following options:\n\n * Provide an environment name via --name or -n\n * Re-run this command inside an activated conda environment.\"\"\").lstrip()\n # TODO Add json support\n raise CondaEnvException(msg)\n\n # Note: stubbing out the args object as all of the\n # conda.cli.common code thinks that name will always\n # be specified.\n args.name = env.name\n\n prefix = get_prefix(args, search=False)\n # CAN'T Check with this function since it assumes we will create prefix.\n # cli_install.check_prefix(prefix, json=args.json)\n\n # TODO, add capability\n # common.ensure_override_channels_requires_channel(args)\n # channel_urls = args.channel or ()\n\n for installer_type, specs in env.dependencies.items():\n try:\n installer = get_installer(installer_type)\n installer.install(prefix, specs, args, env, prune=args.prune)\n except InvalidInstaller:\n sys.stderr.write(textwrap.dedent(\"\"\"\n Unable to install package for {0}.\n\n Please double check and ensure you dependencies file has\n the correct spelling. You might also try installing the\n conda-env-{0} package to see if provides the required\n installer.\n \"\"\").lstrip().format(installer_type)\n )\n return -1\n\n touch_nonadmin(prefix)\n if not args.json:\n print(cli_install.print_activate(args.name if args.name else prefix))\n", "path": "conda_env/cli/main_update.py"}]} | 2,257 | 311 |
gh_patches_debug_22597 | rasdani/github-patches | git_diff | pwndbg__pwndbg-430 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pwndbg failing with GDB using language different than English
### Description
After installing pwndbg with a GDB that uses Spanish language we do fail on detecting whether `osabi` is Linux.
This happens after launching GDB:
https://prnt.sc/itwf3u
And this is the return of `show osabi` for GDB with spanish language:
```
(gdb) show osabi
El actual SO ABI es «auto» (actualmente «GNU/Linux»).
El SO ABI predeterminado es «GNU/Linux».
```
This is the code responsible for failure:
https://github.com/pwndbg/pwndbg/blob/dev/pwndbg/abi.py#L127-L140
</issue>
<code>
[start of pwndbg/abi.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import
3 from __future__ import division
4 from __future__ import print_function
5 from __future__ import unicode_literals
6
7 import functools
8 import re
9
10 import gdb
11
12 import pwndbg.arch
13 import pwndbg.color.message as M
14
15
16 class ABI(object):
17 """
18 Encapsulates information about a calling convention.
19 """
20 #: List or registers which should be filled with arguments before
21 #: spilling onto the stack.
22 register_arguments = []
23
24 #: Minimum alignment of the stack.
25 #: The value used is min(context.bytes, stack_alignment)
26 #: This is necessary as Windows x64 frames must be 32-byte aligned.
27 #: "Alignment" is considered with respect to the last argument on the stack.
28 arg_alignment = 1
29
30 #: Minimum number of stack slots used by a function call
31 #: This is necessary as Windows x64 requires using 4 slots on the stack
32 stack_minimum = 0
33
34 #: Indicates that this ABI returns to the next address on the slot
35 returns = True
36
37 def __init__(self, regs, align, minimum):
38 self.register_arguments = regs
39 self.arg_alignment = align
40 self.stack_minimum = minimum
41
42 @staticmethod
43 def default():
44 return {
45 (32, 'i386', 'linux'): linux_i386,
46 (64, 'x86-64', 'linux'): linux_amd64,
47 (64, 'aarch64', 'linux'): linux_aarch64,
48 (32, 'arm', 'linux'): linux_arm,
49 (32, 'thumb', 'linux'): linux_arm,
50 (32, 'mips', 'linux'): linux_mips,
51 (32, 'powerpc', 'linux'): linux_ppc,
52 (64, 'powerpc', 'linux'): linux_ppc64,
53 }[(8*pwndbg.arch.ptrsize, pwndbg.arch.current, 'linux')]
54
55 @staticmethod
56 def syscall():
57 return {
58 (32, 'i386', 'linux'): linux_i386_syscall,
59 (64, 'x86-64', 'linux'): linux_amd64_syscall,
60 (64, 'aarch64', 'linux'): linux_aarch64_syscall,
61 (32, 'arm', 'linux'): linux_arm_syscall,
62 (32, 'thumb', 'linux'): linux_arm_syscall,
63 (32, 'mips', 'linux'): linux_mips_syscall,
64 (32, 'powerpc', 'linux'): linux_ppc_syscall,
65 (64, 'powerpc', 'linux'): linux_ppc64_syscall,
66 }[(8*pwndbg.arch.ptrsize, pwndbg.arch.current, 'linux')]
67
68 @staticmethod
69 def sigreturn():
70 return {
71 (32, 'i386', 'linux'): linux_i386_sigreturn,
72 (64, 'x86-64', 'linux'): linux_amd64_sigreturn,
73 (32, 'arm', 'linux'): linux_arm_sigreturn,
74 (32, 'thumb', 'linux'): linux_arm_sigreturn,
75 }[(8*pwndbg.arch.ptrsize, pwndbg.arch.current, 'linux')]
76
77 class SyscallABI(ABI):
78 """
79 The syscall ABI treats the syscall number as the zeroth argument,
80 which must be loaded into the specified register.
81 """
82 def __init__(self, register_arguments, *a, **kw):
83 self.syscall_register = register_arguments.pop(0)
84 super(SyscallABI, self).__init__(register_arguments, *a, **kw)
85
86
87 class SigreturnABI(SyscallABI):
88 """
89 The sigreturn ABI is similar to the syscall ABI, except that
90 both PC and SP are loaded from the stack. Because of this, there
91 is no 'return' slot necessary on the stack.
92 """
93 returns = False
94
95
96 linux_i386 = ABI([], 4, 0)
97 linux_amd64 = ABI(['rdi','rsi','rdx','rcx','r8','r9'], 8, 0)
98 linux_arm = ABI(['r0', 'r1', 'r2', 'r3'], 8, 0)
99 linux_aarch64 = ABI(['x0', 'x1', 'x2', 'x3'], 16, 0)
100 linux_mips = ABI(['$a0','$a1','$a2','$a3'], 4, 0)
101 linux_ppc = ABI(['r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10'], 4, 0)
102 linux_ppc64 = ABI(['r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10'], 8, 0)
103
104 linux_i386_syscall = SyscallABI(['eax', 'ebx', 'ecx', 'edx', 'esi', 'edi', 'ebp'], 4, 0)
105 linux_amd64_syscall = SyscallABI(['rax','rdi', 'rsi', 'rdx', 'r10', 'r8', 'r9'], 8, 0)
106 linux_arm_syscall = SyscallABI(['r7', 'r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6'], 4, 0)
107 linux_aarch64_syscall = SyscallABI(['x8', 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6'], 16, 0)
108 linux_mips_syscall = SyscallABI(['$v0', '$a0','$a1','$a2','$a3'], 4, 0)
109 linux_ppc_syscall = ABI(['r0', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9'], 4, 0)
110 linux_ppc64_syscall = ABI(['r0', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9'], 8, 0)
111
112 linux_i386_sigreturn = SigreturnABI(['eax'], 4, 0)
113 linux_amd64_sigreturn = SigreturnABI(['rax'], 4, 0)
114 linux_arm_sigreturn = SigreturnABI(['r7'], 4, 0)
115
116 # Fake ABIs used by SROP
117 linux_i386_srop = ABI(['eax'], 4, 0)
118 linux_amd64_srop = ABI(['rax'], 4, 0)
119 linux_arm_srop = ABI(['r7'], 4, 0)
120
121
122 @pwndbg.events.start
123 def update():
124 global abi
125 global linux
126
127 # Detect current ABI of client side by 'show osabi'
128 osabi_string = gdb.execute('show osabi', to_string=True)
129
130 # The return string will be:
131 # The current OS ABI is "auto" (currently "GNU/Linux").
132 match = re.search('currently "([^"]+)"', osabi_string)
133 if match:
134 # 'GNU/Linux': linux
135 # 'none': bare metal
136 abi = match.group(1)
137
138 linux = 'Linux' in abi
139
140 if not linux:
141 msg = M.warn(
142 "The bare metal debugging is enabled since the gdb's osabi is '%s' which is not 'GNU/Linux'.\n"
143 "Ex. the page resolving and memory de-referencing ONLY works on known pages.\n"
144 "This option is based ib gdb client compile arguments (by default) and will be corrected if you load an ELF which has the '.note.ABI-tag' section.\n"
145 "If you are debuging a program that runs on Linux ABI, please select the correct gdb client."
146 % abi
147 )
148 print(msg)
149
150
151 def LinuxOnly(default=None):
152 """Create a decorator that the function will be called when ABI is Linux.
153 Otherwise, return `default`.
154 """
155 def decorator(func):
156 @functools.wraps(func)
157 def caller(*args, **kwargs):
158 if linux:
159 return func(*args, **kwargs)
160 else:
161 return default
162 return caller
163
164 return decorator
165
166
167 # Update when starting the gdb to show warning message for non-Linux ABI user.
168 update()
169
[end of pwndbg/abi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/abi.py b/pwndbg/abi.py
--- a/pwndbg/abi.py
+++ b/pwndbg/abi.py
@@ -125,17 +125,23 @@
global linux
# Detect current ABI of client side by 'show osabi'
- osabi_string = gdb.execute('show osabi', to_string=True)
-
- # The return string will be:
- # The current OS ABI is "auto" (currently "GNU/Linux").
- match = re.search('currently "([^"]+)"', osabi_string)
- if match:
- # 'GNU/Linux': linux
- # 'none': bare metal
- abi = match.group(1)
-
- linux = 'Linux' in abi
+ #
+ # Examples of strings returned by `show osabi`:
+ # 'The current OS ABI is "auto" (currently "GNU/Linux").\nThe default OS ABI is "GNU/Linux".\n'
+ # 'The current OS ABI is "GNU/Linux".\nThe default OS ABI is "GNU/Linux".\n'
+ # 'El actual SO ABI es «auto» (actualmente «GNU/Linux»).\nEl SO ABI predeterminado es «GNU/Linux».\n'
+ # 'The current OS ABI is "auto" (currently "none")'
+ #
+ # As you can see, there might be GDBs with different language versions
+ # and so we have to support it there too.
+ # Lets assume and hope that `current osabi` is returned in first line in all languages...
+ current_osabi = gdb.execute('show osabi', to_string=True).split('\n')[0]
+
+ # Currently we support those osabis:
+ # 'GNU/Linux': linux
+ # 'none': bare metal
+
+ linux = 'GNU/Linux' in current_osabi
if not linux:
msg = M.warn(
| {"golden_diff": "diff --git a/pwndbg/abi.py b/pwndbg/abi.py\n--- a/pwndbg/abi.py\n+++ b/pwndbg/abi.py\n@@ -125,17 +125,23 @@\n global linux\n \n # Detect current ABI of client side by 'show osabi'\n- osabi_string = gdb.execute('show osabi', to_string=True)\n-\n- # The return string will be:\n- # The current OS ABI is \"auto\" (currently \"GNU/Linux\").\n- match = re.search('currently \"([^\"]+)\"', osabi_string)\n- if match:\n- # 'GNU/Linux': linux\n- # 'none': bare metal\n- abi = match.group(1)\n-\n- linux = 'Linux' in abi\n+ #\n+ # Examples of strings returned by `show osabi`:\n+ # 'The current OS ABI is \"auto\" (currently \"GNU/Linux\").\\nThe default OS ABI is \"GNU/Linux\".\\n'\n+ # 'The current OS ABI is \"GNU/Linux\".\\nThe default OS ABI is \"GNU/Linux\".\\n'\n+ # 'El actual SO ABI es \u00abauto\u00bb (actualmente \u00abGNU/Linux\u00bb).\\nEl SO ABI predeterminado es \u00abGNU/Linux\u00bb.\\n'\n+ # 'The current OS ABI is \"auto\" (currently \"none\")'\n+ #\n+ # As you can see, there might be GDBs with different language versions\n+ # and so we have to support it there too.\n+ # Lets assume and hope that `current osabi` is returned in first line in all languages...\n+ current_osabi = gdb.execute('show osabi', to_string=True).split('\\n')[0]\n+\n+ # Currently we support those osabis:\n+ # 'GNU/Linux': linux\n+ # 'none': bare metal\n+\n+ linux = 'GNU/Linux' in current_osabi\n \n if not linux:\n msg = M.warn(\n", "issue": "Pwndbg failing with GDB using language different than English\n### Description\r\n\r\nAfter installing pwndbg with a GDB that uses Spanish language we do fail on detecting whether `osabi` is Linux.\r\n\r\nThis happens after launching GDB:\r\nhttps://prnt.sc/itwf3u\r\n\r\nAnd this is the return of `show osabi` for GDB with spanish language:\r\n```\r\n(gdb) show osabi \r\nEl actual SO ABI es \u00abauto\u00bb (actualmente \u00abGNU/Linux\u00bb).\r\nEl SO ABI predeterminado es \u00abGNU/Linux\u00bb. \r\n```\r\n\r\nThis is the code responsible for failure:\r\nhttps://github.com/pwndbg/pwndbg/blob/dev/pwndbg/abi.py#L127-L140\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport functools\nimport re\n\nimport gdb\n\nimport pwndbg.arch\nimport pwndbg.color.message as M\n\n\nclass ABI(object):\n \"\"\"\n Encapsulates information about a calling convention.\n \"\"\"\n #: List or registers which should be filled with arguments before\n #: spilling onto the stack.\n register_arguments = []\n\n #: Minimum alignment of the stack.\n #: The value used is min(context.bytes, stack_alignment)\n #: This is necessary as Windows x64 frames must be 32-byte aligned.\n #: \"Alignment\" is considered with respect to the last argument on the stack.\n arg_alignment = 1\n\n #: Minimum number of stack slots used by a function call\n #: This is necessary as Windows x64 requires using 4 slots on the stack\n stack_minimum = 0\n\n #: Indicates that this ABI returns to the next address on the slot\n returns = True\n\n def __init__(self, regs, align, minimum):\n self.register_arguments = regs\n self.arg_alignment = align\n self.stack_minimum = minimum\n\n @staticmethod\n def default():\n return {\n (32, 'i386', 'linux'): linux_i386,\n (64, 'x86-64', 'linux'): linux_amd64,\n (64, 'aarch64', 'linux'): linux_aarch64,\n (32, 'arm', 'linux'): linux_arm,\n (32, 'thumb', 'linux'): linux_arm,\n (32, 'mips', 'linux'): linux_mips,\n (32, 'powerpc', 'linux'): linux_ppc,\n (64, 'powerpc', 'linux'): linux_ppc64,\n }[(8*pwndbg.arch.ptrsize, pwndbg.arch.current, 'linux')]\n\n @staticmethod\n def syscall():\n return {\n (32, 'i386', 'linux'): linux_i386_syscall,\n (64, 'x86-64', 'linux'): linux_amd64_syscall,\n (64, 'aarch64', 'linux'): linux_aarch64_syscall,\n (32, 'arm', 'linux'): linux_arm_syscall,\n (32, 'thumb', 'linux'): linux_arm_syscall,\n (32, 'mips', 'linux'): linux_mips_syscall,\n (32, 'powerpc', 'linux'): linux_ppc_syscall,\n (64, 'powerpc', 'linux'): linux_ppc64_syscall,\n }[(8*pwndbg.arch.ptrsize, pwndbg.arch.current, 'linux')]\n\n @staticmethod\n def sigreturn():\n return {\n (32, 'i386', 'linux'): linux_i386_sigreturn,\n (64, 'x86-64', 'linux'): linux_amd64_sigreturn,\n (32, 'arm', 'linux'): linux_arm_sigreturn,\n (32, 'thumb', 'linux'): linux_arm_sigreturn,\n }[(8*pwndbg.arch.ptrsize, pwndbg.arch.current, 'linux')]\n\nclass SyscallABI(ABI):\n \"\"\"\n The syscall ABI treats the syscall number as the zeroth argument,\n which must be loaded into the specified register.\n \"\"\"\n def __init__(self, register_arguments, *a, **kw):\n self.syscall_register = register_arguments.pop(0)\n super(SyscallABI, self).__init__(register_arguments, *a, **kw)\n\n\nclass SigreturnABI(SyscallABI):\n \"\"\"\n The sigreturn ABI is similar to the syscall ABI, except that\n both PC and SP are loaded from the stack. Because of this, there\n is no 'return' slot necessary on the stack.\n \"\"\"\n returns = False\n\n\nlinux_i386 = ABI([], 4, 0)\nlinux_amd64 = ABI(['rdi','rsi','rdx','rcx','r8','r9'], 8, 0)\nlinux_arm = ABI(['r0', 'r1', 'r2', 'r3'], 8, 0)\nlinux_aarch64 = ABI(['x0', 'x1', 'x2', 'x3'], 16, 0)\nlinux_mips = ABI(['$a0','$a1','$a2','$a3'], 4, 0)\nlinux_ppc = ABI(['r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10'], 4, 0)\nlinux_ppc64 = ABI(['r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10'], 8, 0)\n\nlinux_i386_syscall = SyscallABI(['eax', 'ebx', 'ecx', 'edx', 'esi', 'edi', 'ebp'], 4, 0)\nlinux_amd64_syscall = SyscallABI(['rax','rdi', 'rsi', 'rdx', 'r10', 'r8', 'r9'], 8, 0)\nlinux_arm_syscall = SyscallABI(['r7', 'r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6'], 4, 0)\nlinux_aarch64_syscall = SyscallABI(['x8', 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6'], 16, 0)\nlinux_mips_syscall = SyscallABI(['$v0', '$a0','$a1','$a2','$a3'], 4, 0)\nlinux_ppc_syscall = ABI(['r0', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9'], 4, 0)\nlinux_ppc64_syscall = ABI(['r0', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9'], 8, 0)\n\nlinux_i386_sigreturn = SigreturnABI(['eax'], 4, 0)\nlinux_amd64_sigreturn = SigreturnABI(['rax'], 4, 0)\nlinux_arm_sigreturn = SigreturnABI(['r7'], 4, 0)\n\n# Fake ABIs used by SROP\nlinux_i386_srop = ABI(['eax'], 4, 0)\nlinux_amd64_srop = ABI(['rax'], 4, 0)\nlinux_arm_srop = ABI(['r7'], 4, 0)\n\n\[email protected]\ndef update():\n global abi\n global linux\n\n # Detect current ABI of client side by 'show osabi'\n osabi_string = gdb.execute('show osabi', to_string=True)\n\n # The return string will be:\n # The current OS ABI is \"auto\" (currently \"GNU/Linux\").\n match = re.search('currently \"([^\"]+)\"', osabi_string)\n if match:\n # 'GNU/Linux': linux\n # 'none': bare metal\n abi = match.group(1)\n\n linux = 'Linux' in abi\n\n if not linux:\n msg = M.warn(\n \"The bare metal debugging is enabled since the gdb's osabi is '%s' which is not 'GNU/Linux'.\\n\"\n \"Ex. the page resolving and memory de-referencing ONLY works on known pages.\\n\"\n \"This option is based ib gdb client compile arguments (by default) and will be corrected if you load an ELF which has the '.note.ABI-tag' section.\\n\"\n \"If you are debuging a program that runs on Linux ABI, please select the correct gdb client.\"\n % abi\n )\n print(msg)\n\n\ndef LinuxOnly(default=None):\n \"\"\"Create a decorator that the function will be called when ABI is Linux.\n Otherwise, return `default`.\n \"\"\"\n def decorator(func):\n @functools.wraps(func)\n def caller(*args, **kwargs):\n if linux:\n return func(*args, **kwargs)\n else:\n return default\n return caller\n\n return decorator\n\n\n# Update when starting the gdb to show warning message for non-Linux ABI user.\nupdate()\n", "path": "pwndbg/abi.py"}]} | 3,021 | 436 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.