problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
18.9k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
465
23.6k
num_tokens_prompt
int64
556
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_17315
rasdani/github-patches
git_diff
astronomer__astro-sdk-1160
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Row count not working for snowflake **Describe the bug** Currently row count fails for snowflake **Expected behavior** Row count should work snowflake </issue> <code> [start of python-sdk/src/astro/table.py] 1 from __future__ import annotations 2 3 import random 4 import string 5 from typing import Any 6 7 from attr import define, field, fields_dict 8 from sqlalchemy import Column, MetaData, func, select 9 10 from astro.airflow.datasets import Dataset 11 from astro.databases import create_database 12 13 MAX_TABLE_NAME_LENGTH = 62 14 TEMP_PREFIX = "_tmp_" 15 16 17 @define 18 class Metadata: 19 """ 20 Contains additional information to access a SQL Table, which is very likely optional and, in some cases, may 21 be database-specific. 22 23 :param schema: A schema name 24 :param database: A database name 25 """ 26 27 # This property is used by several databases, including: Postgres, Snowflake and BigQuery ("namespace") 28 schema: str | None = None 29 database: str | None = None 30 31 def is_empty(self) -> bool: 32 """Check if all the fields are None.""" 33 return all(getattr(self, field_name) is None for field_name in fields_dict(self.__class__)) 34 35 36 @define(slots=False) 37 class BaseTable: 38 """ 39 Base class that has information necessary to access a SQL Table. It is agnostic to the database type. 40 If no name is given, it auto-generates a name for the Table and considers it temporary. 41 42 Temporary tables are prefixed with the prefix TEMP_PREFIX. 43 44 :param name: The name of the database table. If name not provided then it would create a temporary name 45 :param conn_id: The Airflow connection id. This will be used to identify the right database type at the runtime 46 :param metadata: A metadata object which will have database or schema name 47 :param columns: columns which define the database table schema. 48 :sphinx-autoapi-skip: 49 """ 50 51 template_fields = ("name",) 52 53 # TODO: discuss alternative names to this class, since it contains metadata as opposed to be the 54 # SQL table itself 55 # Some ideas: TableRef, TableMetadata, TableData, TableDataset 56 _name: str = field(default="") 57 conn_id: str = field(default="") 58 # Setting converter allows passing a dictionary to metadata arg 59 metadata: Metadata = field( 60 factory=Metadata, 61 converter=lambda val: Metadata(**val) if isinstance(val, dict) else val, 62 ) 63 columns: list[Column] = field(factory=list) 64 temp: bool = field(default=False) 65 66 # We need this method to pickle Table object, without this we cannot push/pull this object from xcom. 67 def __getstate__(self): 68 return self.__dict__ 69 70 def __attrs_post_init__(self) -> None: 71 if not self._name or self._name.startswith("_tmp"): 72 self.temp = True 73 74 def _create_unique_table_name(self, prefix: str = "") -> str: 75 """ 76 If a table is instantiated without a name, create a unique table for it. 77 This new name should be compatible with all supported databases. 78 """ 79 schema_length = len((self.metadata and self.metadata.schema) or "") + 1 80 prefix_length = len(prefix) 81 82 unique_id = random.choice(string.ascii_lowercase) + "".join( 83 random.choice(string.ascii_lowercase + string.digits) 84 for _ in range(MAX_TABLE_NAME_LENGTH - schema_length - prefix_length) 85 ) 86 if prefix: 87 unique_id = f"{prefix}{unique_id}" 88 89 return unique_id 90 91 def create_similar_table(self) -> Table: 92 """ 93 Create a new table with a unique name but with the same metadata. 94 """ 95 return Table( # type: ignore 96 name=self._create_unique_table_name(), 97 conn_id=self.conn_id, 98 metadata=self.metadata, 99 ) 100 101 @property 102 def sqlalchemy_metadata(self) -> MetaData: 103 """Return the Sqlalchemy metadata for the given table.""" 104 if self.metadata and self.metadata.schema: 105 alchemy_metadata = MetaData(schema=self.metadata.schema) 106 else: 107 alchemy_metadata = MetaData() 108 return alchemy_metadata 109 110 @property 111 def name(self) -> str: 112 """ 113 Return either the user-defined name or auto-generate one. 114 :sphinx-autoapi-skip: 115 """ 116 if self.temp and not self._name: 117 self._name = self._create_unique_table_name(TEMP_PREFIX) 118 return self._name 119 120 @name.setter 121 def name(self, value: str) -> None: 122 """ 123 Set the table name. Once this happens, the table is no longer considered temporary. 124 """ 125 if not isinstance(value, property) and value != self._name: 126 self._name = value 127 self.temp = False 128 129 @property 130 def row_count(self) -> Any: 131 """ 132 Return the row count of table. 133 """ 134 db = create_database(self.conn_id) 135 tb = db.get_sqla_table(table=self) 136 query = select(func.count("*")).select_from(tb) 137 return db.run_sql(query).scalar() 138 139 def to_json(self): 140 return { 141 "class": "Table", 142 "name": self.name, 143 "metadata": { 144 "schema": self.metadata.schema, 145 "database": self.metadata.database, 146 }, 147 "temp": self.temp, 148 "conn_id": self.conn_id, 149 } 150 151 @classmethod 152 def from_json(cls, obj: dict): 153 return Table( 154 name=obj["name"], 155 metadata=Metadata(**obj["metadata"]), 156 temp=obj["temp"], 157 conn_id=obj["conn_id"], 158 ) 159 160 def openlineage_dataset_name(self) -> str: 161 """ 162 Returns the open lineage dataset name as per 163 https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md 164 """ 165 database = create_database(self.conn_id) 166 return database.openlineage_dataset_name(table=self) 167 168 def openlineage_dataset_namespace(self) -> str: 169 """ 170 Returns the open lineage dataset namespace as per 171 https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md 172 """ 173 database = create_database(self.conn_id) 174 return database.openlineage_dataset_namespace() 175 176 177 @define(slots=False) 178 class TempTable(BaseTable): 179 """ 180 Internal class to represent a Temporary table 181 182 :sphinx-autoapi-skip: 183 """ 184 185 temp: bool = field(default=True) 186 187 188 @define(slots=False) 189 class Table(BaseTable, Dataset): 190 """ 191 User-facing class that has information necessary to access a SQL Table. It is agnostic to the database type. 192 If no name is given, it auto-generates a name for the Table and considers it temporary. 193 194 Temporary tables are prefixed with the prefix TEMP_PREFIX. 195 196 :param name: The name of the database table. If name not provided then it would create a temporary name 197 :param conn_id: The Airflow connection id. This will be used to identify the right database type at the runtime 198 :param metadata: A metadata object which will have database or schema name 199 :param columns: columns which define the database table schema. 200 """ 201 202 uri: str = field(init=False) 203 extra: dict | None = field(init=False, factory=dict) 204 205 def __new__(cls, *args, **kwargs): 206 name = kwargs.get("name") or args and args[0] or "" 207 temp = kwargs.get("temp", False) 208 if temp or (not name or name.startswith("_tmp")): 209 return TempTable(*args, **kwargs) 210 return super().__new__(cls) 211 212 @uri.default 213 def _path_to_dataset_uri(self) -> str: 214 """Build a URI to be passed to Dataset obj introduced in Airflow 2.4""" 215 from urllib.parse import urlencode, urlparse 216 217 path = f"astro://{self.conn_id}@" 218 db_extra = {"table": self.name} 219 if self.metadata.schema: 220 db_extra["schema"] = self.metadata.schema 221 if self.metadata.database: 222 db_extra["database"] = self.metadata.database 223 parsed_url = urlparse(url=path) 224 new_parsed_url = parsed_url._replace(query=urlencode(db_extra)) 225 return new_parsed_url.geturl() 226 [end of python-sdk/src/astro/table.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python-sdk/src/astro/table.py b/python-sdk/src/astro/table.py --- a/python-sdk/src/astro/table.py +++ b/python-sdk/src/astro/table.py @@ -5,7 +5,7 @@ from typing import Any from attr import define, field, fields_dict -from sqlalchemy import Column, MetaData, func, select +from sqlalchemy import Column, MetaData from astro.airflow.datasets import Dataset from astro.databases import create_database @@ -132,9 +132,10 @@ Return the row count of table. """ db = create_database(self.conn_id) - tb = db.get_sqla_table(table=self) - query = select(func.count("*")).select_from(tb) - return db.run_sql(query).scalar() + result = db.run_sql( + f"select count(*) from {db.get_table_qualified_name(self)}" # skipcq: BAN-B608 + ).scalar() + return result def to_json(self): return {
{"golden_diff": "diff --git a/python-sdk/src/astro/table.py b/python-sdk/src/astro/table.py\n--- a/python-sdk/src/astro/table.py\n+++ b/python-sdk/src/astro/table.py\n@@ -5,7 +5,7 @@\n from typing import Any\n \n from attr import define, field, fields_dict\n-from sqlalchemy import Column, MetaData, func, select\n+from sqlalchemy import Column, MetaData\n \n from astro.airflow.datasets import Dataset\n from astro.databases import create_database\n@@ -132,9 +132,10 @@\n Return the row count of table.\n \"\"\"\n db = create_database(self.conn_id)\n- tb = db.get_sqla_table(table=self)\n- query = select(func.count(\"*\")).select_from(tb)\n- return db.run_sql(query).scalar()\n+ result = db.run_sql(\n+ f\"select count(*) from {db.get_table_qualified_name(self)}\" # skipcq: BAN-B608\n+ ).scalar()\n+ return result\n \n def to_json(self):\n return {\n", "issue": "Row count not working for snowflake\n**Describe the bug**\r\nCurrently row count fails for snowflake \r\n\r\n**Expected behavior**\r\nRow count should work snowflake\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport random\nimport string\nfrom typing import Any\n\nfrom attr import define, field, fields_dict\nfrom sqlalchemy import Column, MetaData, func, select\n\nfrom astro.airflow.datasets import Dataset\nfrom astro.databases import create_database\n\nMAX_TABLE_NAME_LENGTH = 62\nTEMP_PREFIX = \"_tmp_\"\n\n\n@define\nclass Metadata:\n \"\"\"\n Contains additional information to access a SQL Table, which is very likely optional and, in some cases, may\n be database-specific.\n\n :param schema: A schema name\n :param database: A database name\n \"\"\"\n\n # This property is used by several databases, including: Postgres, Snowflake and BigQuery (\"namespace\")\n schema: str | None = None\n database: str | None = None\n\n def is_empty(self) -> bool:\n \"\"\"Check if all the fields are None.\"\"\"\n return all(getattr(self, field_name) is None for field_name in fields_dict(self.__class__))\n\n\n@define(slots=False)\nclass BaseTable:\n \"\"\"\n Base class that has information necessary to access a SQL Table. It is agnostic to the database type.\n If no name is given, it auto-generates a name for the Table and considers it temporary.\n\n Temporary tables are prefixed with the prefix TEMP_PREFIX.\n\n :param name: The name of the database table. If name not provided then it would create a temporary name\n :param conn_id: The Airflow connection id. This will be used to identify the right database type at the runtime\n :param metadata: A metadata object which will have database or schema name\n :param columns: columns which define the database table schema.\n :sphinx-autoapi-skip:\n \"\"\"\n\n template_fields = (\"name\",)\n\n # TODO: discuss alternative names to this class, since it contains metadata as opposed to be the\n # SQL table itself\n # Some ideas: TableRef, TableMetadata, TableData, TableDataset\n _name: str = field(default=\"\")\n conn_id: str = field(default=\"\")\n # Setting converter allows passing a dictionary to metadata arg\n metadata: Metadata = field(\n factory=Metadata,\n converter=lambda val: Metadata(**val) if isinstance(val, dict) else val,\n )\n columns: list[Column] = field(factory=list)\n temp: bool = field(default=False)\n\n # We need this method to pickle Table object, without this we cannot push/pull this object from xcom.\n def __getstate__(self):\n return self.__dict__\n\n def __attrs_post_init__(self) -> None:\n if not self._name or self._name.startswith(\"_tmp\"):\n self.temp = True\n\n def _create_unique_table_name(self, prefix: str = \"\") -> str:\n \"\"\"\n If a table is instantiated without a name, create a unique table for it.\n This new name should be compatible with all supported databases.\n \"\"\"\n schema_length = len((self.metadata and self.metadata.schema) or \"\") + 1\n prefix_length = len(prefix)\n\n unique_id = random.choice(string.ascii_lowercase) + \"\".join(\n random.choice(string.ascii_lowercase + string.digits)\n for _ in range(MAX_TABLE_NAME_LENGTH - schema_length - prefix_length)\n )\n if prefix:\n unique_id = f\"{prefix}{unique_id}\"\n\n return unique_id\n\n def create_similar_table(self) -> Table:\n \"\"\"\n Create a new table with a unique name but with the same metadata.\n \"\"\"\n return Table( # type: ignore\n name=self._create_unique_table_name(),\n conn_id=self.conn_id,\n metadata=self.metadata,\n )\n\n @property\n def sqlalchemy_metadata(self) -> MetaData:\n \"\"\"Return the Sqlalchemy metadata for the given table.\"\"\"\n if self.metadata and self.metadata.schema:\n alchemy_metadata = MetaData(schema=self.metadata.schema)\n else:\n alchemy_metadata = MetaData()\n return alchemy_metadata\n\n @property\n def name(self) -> str:\n \"\"\"\n Return either the user-defined name or auto-generate one.\n :sphinx-autoapi-skip:\n \"\"\"\n if self.temp and not self._name:\n self._name = self._create_unique_table_name(TEMP_PREFIX)\n return self._name\n\n @name.setter\n def name(self, value: str) -> None:\n \"\"\"\n Set the table name. Once this happens, the table is no longer considered temporary.\n \"\"\"\n if not isinstance(value, property) and value != self._name:\n self._name = value\n self.temp = False\n\n @property\n def row_count(self) -> Any:\n \"\"\"\n Return the row count of table.\n \"\"\"\n db = create_database(self.conn_id)\n tb = db.get_sqla_table(table=self)\n query = select(func.count(\"*\")).select_from(tb)\n return db.run_sql(query).scalar()\n\n def to_json(self):\n return {\n \"class\": \"Table\",\n \"name\": self.name,\n \"metadata\": {\n \"schema\": self.metadata.schema,\n \"database\": self.metadata.database,\n },\n \"temp\": self.temp,\n \"conn_id\": self.conn_id,\n }\n\n @classmethod\n def from_json(cls, obj: dict):\n return Table(\n name=obj[\"name\"],\n metadata=Metadata(**obj[\"metadata\"]),\n temp=obj[\"temp\"],\n conn_id=obj[\"conn_id\"],\n )\n\n def openlineage_dataset_name(self) -> str:\n \"\"\"\n Returns the open lineage dataset name as per\n https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md\n \"\"\"\n database = create_database(self.conn_id)\n return database.openlineage_dataset_name(table=self)\n\n def openlineage_dataset_namespace(self) -> str:\n \"\"\"\n Returns the open lineage dataset namespace as per\n https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md\n \"\"\"\n database = create_database(self.conn_id)\n return database.openlineage_dataset_namespace()\n\n\n@define(slots=False)\nclass TempTable(BaseTable):\n \"\"\"\n Internal class to represent a Temporary table\n\n :sphinx-autoapi-skip:\n \"\"\"\n\n temp: bool = field(default=True)\n\n\n@define(slots=False)\nclass Table(BaseTable, Dataset):\n \"\"\"\n User-facing class that has information necessary to access a SQL Table. It is agnostic to the database type.\n If no name is given, it auto-generates a name for the Table and considers it temporary.\n\n Temporary tables are prefixed with the prefix TEMP_PREFIX.\n\n :param name: The name of the database table. If name not provided then it would create a temporary name\n :param conn_id: The Airflow connection id. This will be used to identify the right database type at the runtime\n :param metadata: A metadata object which will have database or schema name\n :param columns: columns which define the database table schema.\n \"\"\"\n\n uri: str = field(init=False)\n extra: dict | None = field(init=False, factory=dict)\n\n def __new__(cls, *args, **kwargs):\n name = kwargs.get(\"name\") or args and args[0] or \"\"\n temp = kwargs.get(\"temp\", False)\n if temp or (not name or name.startswith(\"_tmp\")):\n return TempTable(*args, **kwargs)\n return super().__new__(cls)\n\n @uri.default\n def _path_to_dataset_uri(self) -> str:\n \"\"\"Build a URI to be passed to Dataset obj introduced in Airflow 2.4\"\"\"\n from urllib.parse import urlencode, urlparse\n\n path = f\"astro://{self.conn_id}@\"\n db_extra = {\"table\": self.name}\n if self.metadata.schema:\n db_extra[\"schema\"] = self.metadata.schema\n if self.metadata.database:\n db_extra[\"database\"] = self.metadata.database\n parsed_url = urlparse(url=path)\n new_parsed_url = parsed_url._replace(query=urlencode(db_extra))\n return new_parsed_url.geturl()\n", "path": "python-sdk/src/astro/table.py"}]}
2,886
228
gh_patches_debug_3801
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-167
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Host docs, build on release Decide where to host generated docs, configure CI to generate (and possibly upload) new docs on each release. See #6 for CI bootstrapping. </issue> <code> [start of docs/conf.py] 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # This file only contains a selection of the most common options. For a full 4 # list see the documentation: 5 # http://www.sphinx-doc.org/en/master/config 6 7 # -- Path setup -------------------------------------------------------------- 8 9 # If extensions (or modules to document with autodoc) are in another directory, 10 # add these directories to sys.path here. If the directory is relative to the 11 # documentation root, use os.path.abspath to make it absolute, like shown here. 12 13 import os 14 import sys 15 16 sys.path.insert(0, os.path.abspath("../opentelemetry-api/src/")) 17 18 19 # -- Project information ----------------------------------------------------- 20 21 project = "OpenTelemetry" 22 copyright = "2019, OpenTelemetry Authors" 23 author = "OpenTelemetry Authors" 24 25 26 # -- General configuration --------------------------------------------------- 27 28 # Easy automatic cross-references for `code in backticks` 29 default_role = "any" 30 31 # Add any Sphinx extension module names here, as strings. They can be 32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 # ones. 34 extensions = [ 35 # API doc generation 36 "sphinx.ext.autodoc", 37 # Support for google-style docstrings 38 "sphinx.ext.napoleon", 39 # Infer types from hints instead of docstrings 40 "sphinx_autodoc_typehints", 41 # Add links to source from generated docs 42 "sphinx.ext.viewcode", 43 # Link to other sphinx docs 44 "sphinx.ext.intersphinx", 45 ] 46 47 intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)} 48 49 # http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky 50 # Sphinx will warn about all references where the target cannot be found. 51 nitpicky = True 52 nitpick_ignore = [] 53 54 # Add any paths that contain templates here, relative to this directory. 55 templates_path = ["_templates"] 56 57 # List of patterns, relative to source directory, that match files and 58 # directories to ignore when looking for source files. 59 # This pattern also affects html_static_path and html_extra_path. 60 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 61 62 autodoc_default_options = { 63 "members": True, 64 "undoc-members": True, 65 "show-inheritance": True, 66 "member-order": "bysource", 67 } 68 69 # -- Options for HTML output ------------------------------------------------- 70 71 # The theme to use for HTML and HTML Help pages. See the documentation for 72 # a list of builtin themes. 73 # 74 html_theme = "sphinx_rtd_theme" 75 76 # Add any paths that contain custom static files (such as style sheets) here, 77 # relative to this directory. They are copied after the builtin static files, 78 # so a file named "default.css" will overwrite the builtin "default.css". 79 html_static_path = [] 80 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -42,6 +42,9 @@ "sphinx.ext.viewcode", # Link to other sphinx docs "sphinx.ext.intersphinx", + # Add a .nojekyll file to the generated HTML docs + # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing + "sphinx.ext.githubpages", ] intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)}
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -42,6 +42,9 @@\n \"sphinx.ext.viewcode\",\n # Link to other sphinx docs\n \"sphinx.ext.intersphinx\",\n+ # Add a .nojekyll file to the generated HTML docs\n+ # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing\n+ \"sphinx.ext.githubpages\",\n ]\n \n intersphinx_mapping = {\"python\": (\"https://docs.python.org/3/\", None)}\n", "issue": "Host docs, build on release\nDecide where to host generated docs, configure CI to generate (and possibly upload) new docs on each release.\r\n\r\nSee #6 for CI bootstrapping.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../opentelemetry-api/src/\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"OpenTelemetry\"\ncopyright = \"2019, OpenTelemetry Authors\"\nauthor = \"OpenTelemetry Authors\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Easy automatic cross-references for `code in backticks`\ndefault_role = \"any\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n # API doc generation\n \"sphinx.ext.autodoc\",\n # Support for google-style docstrings\n \"sphinx.ext.napoleon\",\n # Infer types from hints instead of docstrings\n \"sphinx_autodoc_typehints\",\n # Add links to source from generated docs\n \"sphinx.ext.viewcode\",\n # Link to other sphinx docs\n \"sphinx.ext.intersphinx\",\n]\n\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3/\", None)}\n\n# http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky\n# Sphinx will warn about all references where the target cannot be found.\nnitpicky = True\nnitpick_ignore = []\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\nautodoc_default_options = {\n \"members\": True,\n \"undoc-members\": True,\n \"show-inheritance\": True,\n \"member-order\": \"bysource\",\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n", "path": "docs/conf.py"}]}
1,331
129
gh_patches_debug_35773
rasdani/github-patches
git_diff
tiangolo__fastapi-756
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> asyncpg uuid implementation not recognized by fastapi because of too strict typechecking ### Describe the bug An alternative implementation of the `UUID` type is not serialized by fastapi but throws an error instead. This is because it is of type `asyncpg.pgproto.pgproto.UUID` instead of type `uuid.UUID`. However it is implemented in such a way that it does the exact same things as the regular UUID but just faster. Also, ```python isinstance(asyncpg.pgproto.pgproto.UUID(), uuid.UUID) == True ``` this should make fastapi able to recognize it as a uuid. ### To Reproduce 1. Create a file called uuid_error.py with: ```Python import uuid from fastapi import FastAPI from asyncpg.pgproto import pgproto app = FastAPI() @app.get("/normal_uuid") def return_normal_uuid(): regular_uuid = uuid.UUID("a10ff360-3b1e-4984-a26f-d3ab460bdb51") assert isinstance(regular_uuid, uuid.UUID) return {"normal_uuid": regular_uuid} @app.get("/fast_uuid") def return_fast_uuid(): asyncpg_uuid = pgproto.UUID("a10ff360-3b1e-4984-a26f-d3ab460bdb51") assert isinstance(asyncpg_uuid, uuid.UUID) return {"fast_uuid": asyncpg_uuid} ``` 2. Run it with uvicorn uuid_error:app 3. Open the browser and call the endpoint `/fast_uuid`. 4. It returns 'internal server error' . 5. But I expected it to return `{"fast_uuid":"a10ff360-3b1e-4984-a26f-d3ab460bdb51"}`. ### Expected behavior I expected fastapi to jsonify the uuid and send, but instead it returns 'internal server error'. The precise error being thrown in the server is: ``` ValueError: [KeyError(<class 'asyncpg.pgproto.pgproto.UUID'>), TypeError("'asyncpg.pgproto.pgproto.UUID' object is not iterable"), TypeError('vars() argument must have __dict__ attribute')] ``` ### Environment - OS: Linux Mint 19 Tara - FastAPI Version: 0.44.0 - Python version: 3.8.0 ### Additional context I initially made an issue in the asyncpg repo asking them if they could fix it: https://github.com/MagicStack/asyncpg/issues/512 I think it's usefull as additional context for this issue. The problem can be solved here in the fastapi code by improving the code a little. I'm working on a PR, this line seems to be the issue: https://github.com/tiangolo/fastapi/blob/c200bc2240d48a4a27e96fba536351038eafc44f/fastapi/encoders.py#L113 I think it shouldn't use `type()` but it should instead use `isinstance()` this will make for slightly looser typechecking but I think that might be a good thing in this case? </issue> <code> [start of fastapi/encoders.py] 1 from enum import Enum 2 from types import GeneratorType 3 from typing import Any, Dict, List, Set, Union 4 5 from fastapi.logger import logger 6 from fastapi.utils import PYDANTIC_1 7 from pydantic import BaseModel 8 from pydantic.json import ENCODERS_BY_TYPE 9 10 SetIntStr = Set[Union[int, str]] 11 DictIntStrAny = Dict[Union[int, str], Any] 12 13 14 def jsonable_encoder( 15 obj: Any, 16 include: Union[SetIntStr, DictIntStrAny] = None, 17 exclude: Union[SetIntStr, DictIntStrAny] = set(), 18 by_alias: bool = True, 19 skip_defaults: bool = None, 20 exclude_unset: bool = False, 21 include_none: bool = True, 22 custom_encoder: dict = {}, 23 sqlalchemy_safe: bool = True, 24 ) -> Any: 25 if skip_defaults is not None: 26 logger.warning( # pragma: nocover 27 "skip_defaults in jsonable_encoder has been deprecated in favor of " 28 "exclude_unset to keep in line with Pydantic v1, support for it will be " 29 "removed soon." 30 ) 31 if include is not None and not isinstance(include, set): 32 include = set(include) 33 if exclude is not None and not isinstance(exclude, set): 34 exclude = set(exclude) 35 if isinstance(obj, BaseModel): 36 encoder = getattr(obj.Config, "json_encoders", {}) 37 if custom_encoder: 38 encoder.update(custom_encoder) 39 if PYDANTIC_1: 40 obj_dict = obj.dict( 41 include=include, 42 exclude=exclude, 43 by_alias=by_alias, 44 exclude_unset=bool(exclude_unset or skip_defaults), 45 ) 46 else: # pragma: nocover 47 obj_dict = obj.dict( 48 include=include, 49 exclude=exclude, 50 by_alias=by_alias, 51 skip_defaults=bool(exclude_unset or skip_defaults), 52 ) 53 return jsonable_encoder( 54 obj_dict, 55 include_none=include_none, 56 custom_encoder=encoder, 57 sqlalchemy_safe=sqlalchemy_safe, 58 ) 59 if isinstance(obj, Enum): 60 return obj.value 61 if isinstance(obj, (str, int, float, type(None))): 62 return obj 63 if isinstance(obj, dict): 64 encoded_dict = {} 65 for key, value in obj.items(): 66 if ( 67 ( 68 not sqlalchemy_safe 69 or (not isinstance(key, str)) 70 or (not key.startswith("_sa")) 71 ) 72 and (value is not None or include_none) 73 and ((include and key in include) or key not in exclude) 74 ): 75 encoded_key = jsonable_encoder( 76 key, 77 by_alias=by_alias, 78 exclude_unset=exclude_unset, 79 include_none=include_none, 80 custom_encoder=custom_encoder, 81 sqlalchemy_safe=sqlalchemy_safe, 82 ) 83 encoded_value = jsonable_encoder( 84 value, 85 by_alias=by_alias, 86 exclude_unset=exclude_unset, 87 include_none=include_none, 88 custom_encoder=custom_encoder, 89 sqlalchemy_safe=sqlalchemy_safe, 90 ) 91 encoded_dict[encoded_key] = encoded_value 92 return encoded_dict 93 if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)): 94 encoded_list = [] 95 for item in obj: 96 encoded_list.append( 97 jsonable_encoder( 98 item, 99 include=include, 100 exclude=exclude, 101 by_alias=by_alias, 102 exclude_unset=exclude_unset, 103 include_none=include_none, 104 custom_encoder=custom_encoder, 105 sqlalchemy_safe=sqlalchemy_safe, 106 ) 107 ) 108 return encoded_list 109 errors: List[Exception] = [] 110 try: 111 if custom_encoder and type(obj) in custom_encoder: 112 encoder = custom_encoder[type(obj)] 113 else: 114 encoder = ENCODERS_BY_TYPE[type(obj)] 115 return encoder(obj) 116 except KeyError as e: 117 errors.append(e) 118 try: 119 data = dict(obj) 120 except Exception as e: 121 errors.append(e) 122 try: 123 data = vars(obj) 124 except Exception as e: 125 errors.append(e) 126 raise ValueError(errors) 127 return jsonable_encoder( 128 data, 129 by_alias=by_alias, 130 exclude_unset=exclude_unset, 131 include_none=include_none, 132 custom_encoder=custom_encoder, 133 sqlalchemy_safe=sqlalchemy_safe, 134 ) 135 [end of fastapi/encoders.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/fastapi/encoders.py b/fastapi/encoders.py --- a/fastapi/encoders.py +++ b/fastapi/encoders.py @@ -1,6 +1,6 @@ from enum import Enum from types import GeneratorType -from typing import Any, Dict, List, Set, Union +from typing import Any, Callable, Dict, List, Set, Tuple, Union from fastapi.utils import PYDANTIC_1, logger from pydantic import BaseModel @@ -10,6 +10,21 @@ DictIntStrAny = Dict[Union[int, str], Any] +def generate_encoders_by_class_tuples( + type_encoder_map: Dict[Any, Callable] +) -> Dict[Callable, Tuple]: + encoders_by_classes: Dict[Callable, List] = {} + for type_, encoder in type_encoder_map.items(): + encoders_by_classes.setdefault(encoder, []).append(type_) + encoders_by_class_tuples: Dict[Callable, Tuple] = {} + for encoder, classes in encoders_by_classes.items(): + encoders_by_class_tuples[encoder] = tuple(classes) + return encoders_by_class_tuples + + +encoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE) + + def jsonable_encoder( obj: Any, include: Union[SetIntStr, DictIntStrAny] = None, @@ -105,24 +120,31 @@ ) ) return encoded_list + + if custom_encoder: + if type(obj) in custom_encoder: + return custom_encoder[type(obj)](obj) + else: + for encoder_type, encoder in custom_encoder.items(): + if isinstance(obj, encoder_type): + return encoder(obj) + + if type(obj) in ENCODERS_BY_TYPE: + return ENCODERS_BY_TYPE[type(obj)](obj) + for encoder, classes_tuple in encoders_by_class_tuples.items(): + if isinstance(obj, classes_tuple): + return encoder(obj) + errors: List[Exception] = [] try: - if custom_encoder and type(obj) in custom_encoder: - encoder = custom_encoder[type(obj)] - else: - encoder = ENCODERS_BY_TYPE[type(obj)] - return encoder(obj) - except KeyError as e: + data = dict(obj) + except Exception as e: errors.append(e) try: - data = dict(obj) + data = vars(obj) except Exception as e: errors.append(e) - try: - data = vars(obj) - except Exception as e: - errors.append(e) - raise ValueError(errors) + raise ValueError(errors) return jsonable_encoder( data, by_alias=by_alias,
{"golden_diff": "diff --git a/fastapi/encoders.py b/fastapi/encoders.py\n--- a/fastapi/encoders.py\n+++ b/fastapi/encoders.py\n@@ -1,6 +1,6 @@\n from enum import Enum\n from types import GeneratorType\n-from typing import Any, Dict, List, Set, Union\n+from typing import Any, Callable, Dict, List, Set, Tuple, Union\n \n from fastapi.utils import PYDANTIC_1, logger\n from pydantic import BaseModel\n@@ -10,6 +10,21 @@\n DictIntStrAny = Dict[Union[int, str], Any]\n \n \n+def generate_encoders_by_class_tuples(\n+ type_encoder_map: Dict[Any, Callable]\n+) -> Dict[Callable, Tuple]:\n+ encoders_by_classes: Dict[Callable, List] = {}\n+ for type_, encoder in type_encoder_map.items():\n+ encoders_by_classes.setdefault(encoder, []).append(type_)\n+ encoders_by_class_tuples: Dict[Callable, Tuple] = {}\n+ for encoder, classes in encoders_by_classes.items():\n+ encoders_by_class_tuples[encoder] = tuple(classes)\n+ return encoders_by_class_tuples\n+\n+\n+encoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE)\n+\n+\n def jsonable_encoder(\n obj: Any,\n include: Union[SetIntStr, DictIntStrAny] = None,\n@@ -105,24 +120,31 @@\n )\n )\n return encoded_list\n+\n+ if custom_encoder:\n+ if type(obj) in custom_encoder:\n+ return custom_encoder[type(obj)](obj)\n+ else:\n+ for encoder_type, encoder in custom_encoder.items():\n+ if isinstance(obj, encoder_type):\n+ return encoder(obj)\n+\n+ if type(obj) in ENCODERS_BY_TYPE:\n+ return ENCODERS_BY_TYPE[type(obj)](obj)\n+ for encoder, classes_tuple in encoders_by_class_tuples.items():\n+ if isinstance(obj, classes_tuple):\n+ return encoder(obj)\n+\n errors: List[Exception] = []\n try:\n- if custom_encoder and type(obj) in custom_encoder:\n- encoder = custom_encoder[type(obj)]\n- else:\n- encoder = ENCODERS_BY_TYPE[type(obj)]\n- return encoder(obj)\n- except KeyError as e:\n+ data = dict(obj)\n+ except Exception as e:\n errors.append(e)\n try:\n- data = dict(obj)\n+ data = vars(obj)\n except Exception as e:\n errors.append(e)\n- try:\n- data = vars(obj)\n- except Exception as e:\n- errors.append(e)\n- raise ValueError(errors)\n+ raise ValueError(errors)\n return jsonable_encoder(\n data,\n by_alias=by_alias,\n", "issue": "asyncpg uuid implementation not recognized by fastapi because of too strict typechecking\n### Describe the bug\r\n\r\nAn alternative implementation of the `UUID` type is not serialized by fastapi but throws an error instead. This is because it is of type `asyncpg.pgproto.pgproto.UUID` instead of type `uuid.UUID`. However it is implemented in such a way that it does the exact same things as the regular UUID but just faster. Also,\r\n```python\r\nisinstance(asyncpg.pgproto.pgproto.UUID(), uuid.UUID) == True\r\n```\r\nthis should make fastapi able to recognize it as a uuid.\r\n\r\n### To Reproduce\r\n1. Create a file called uuid_error.py with:\r\n\r\n```Python\r\nimport uuid\r\n\r\nfrom fastapi import FastAPI\r\nfrom asyncpg.pgproto import pgproto\r\n\r\napp = FastAPI()\r\n\r\n\r\[email protected](\"/normal_uuid\")\r\ndef return_normal_uuid():\r\n regular_uuid = uuid.UUID(\"a10ff360-3b1e-4984-a26f-d3ab460bdb51\")\r\n assert isinstance(regular_uuid, uuid.UUID)\r\n return {\"normal_uuid\": regular_uuid}\r\n\r\n\r\[email protected](\"/fast_uuid\")\r\ndef return_fast_uuid():\r\n asyncpg_uuid = pgproto.UUID(\"a10ff360-3b1e-4984-a26f-d3ab460bdb51\")\r\n assert isinstance(asyncpg_uuid, uuid.UUID)\r\n return {\"fast_uuid\": asyncpg_uuid}\r\n```\r\n2. Run it with uvicorn uuid_error:app\r\n3. Open the browser and call the endpoint `/fast_uuid`.\r\n4. It returns 'internal server error' .\r\n5. But I expected it to return `{\"fast_uuid\":\"a10ff360-3b1e-4984-a26f-d3ab460bdb51\"}`.\r\n\r\n### Expected behavior\r\n\r\nI expected fastapi to jsonify the uuid and send, but instead it returns 'internal server error'. The precise error being thrown in the server is:\r\n```\r\nValueError: [KeyError(<class 'asyncpg.pgproto.pgproto.UUID'>), TypeError(\"'asyncpg.pgproto.pgproto.UUID' object is not iterable\"), TypeError('vars() argument must have __dict__ attribute')]\r\n```\r\n\r\n### Environment\r\n\r\n- OS: Linux Mint 19 Tara\r\n- FastAPI Version: 0.44.0\r\n- Python version: 3.8.0\r\n\r\n### Additional context\r\n\r\nI initially made an issue in the asyncpg repo asking them if they could fix it: https://github.com/MagicStack/asyncpg/issues/512 I think it's usefull as additional context for this issue.\r\nThe problem can be solved here in the fastapi code by improving the code a little. I'm working on a PR, this line seems to be the issue:\r\nhttps://github.com/tiangolo/fastapi/blob/c200bc2240d48a4a27e96fba536351038eafc44f/fastapi/encoders.py#L113\r\nI think it shouldn't use `type()` but it should instead use `isinstance()` this will make for slightly looser typechecking but I think that might be a good thing in this case?\r\n\n", "before_files": [{"content": "from enum import Enum\nfrom types import GeneratorType\nfrom typing import Any, Dict, List, Set, Union\n\nfrom fastapi.logger import logger\nfrom fastapi.utils import PYDANTIC_1\nfrom pydantic import BaseModel\nfrom pydantic.json import ENCODERS_BY_TYPE\n\nSetIntStr = Set[Union[int, str]]\nDictIntStrAny = Dict[Union[int, str], Any]\n\n\ndef jsonable_encoder(\n obj: Any,\n include: Union[SetIntStr, DictIntStrAny] = None,\n exclude: Union[SetIntStr, DictIntStrAny] = set(),\n by_alias: bool = True,\n skip_defaults: bool = None,\n exclude_unset: bool = False,\n include_none: bool = True,\n custom_encoder: dict = {},\n sqlalchemy_safe: bool = True,\n) -> Any:\n if skip_defaults is not None:\n logger.warning( # pragma: nocover\n \"skip_defaults in jsonable_encoder has been deprecated in favor of \"\n \"exclude_unset to keep in line with Pydantic v1, support for it will be \"\n \"removed soon.\"\n )\n if include is not None and not isinstance(include, set):\n include = set(include)\n if exclude is not None and not isinstance(exclude, set):\n exclude = set(exclude)\n if isinstance(obj, BaseModel):\n encoder = getattr(obj.Config, \"json_encoders\", {})\n if custom_encoder:\n encoder.update(custom_encoder)\n if PYDANTIC_1:\n obj_dict = obj.dict(\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n exclude_unset=bool(exclude_unset or skip_defaults),\n )\n else: # pragma: nocover\n obj_dict = obj.dict(\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n skip_defaults=bool(exclude_unset or skip_defaults),\n )\n return jsonable_encoder(\n obj_dict,\n include_none=include_none,\n custom_encoder=encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n if isinstance(obj, Enum):\n return obj.value\n if isinstance(obj, (str, int, float, type(None))):\n return obj\n if isinstance(obj, dict):\n encoded_dict = {}\n for key, value in obj.items():\n if (\n (\n not sqlalchemy_safe\n or (not isinstance(key, str))\n or (not key.startswith(\"_sa\"))\n )\n and (value is not None or include_none)\n and ((include and key in include) or key not in exclude)\n ):\n encoded_key = jsonable_encoder(\n key,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n encoded_value = jsonable_encoder(\n value,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n encoded_dict[encoded_key] = encoded_value\n return encoded_dict\n if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):\n encoded_list = []\n for item in obj:\n encoded_list.append(\n jsonable_encoder(\n item,\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n )\n return encoded_list\n errors: List[Exception] = []\n try:\n if custom_encoder and type(obj) in custom_encoder:\n encoder = custom_encoder[type(obj)]\n else:\n encoder = ENCODERS_BY_TYPE[type(obj)]\n return encoder(obj)\n except KeyError as e:\n errors.append(e)\n try:\n data = dict(obj)\n except Exception as e:\n errors.append(e)\n try:\n data = vars(obj)\n except Exception as e:\n errors.append(e)\n raise ValueError(errors)\n return jsonable_encoder(\n data,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n", "path": "fastapi/encoders.py"}]}
2,449
613
gh_patches_debug_16499
rasdani/github-patches
git_diff
bentoml__BentoML-979
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Failed to package local pip installed dependency in 0.8.4 An issue first reported in #bentoml-users slack channel by @fernandocamargoti In some python environment, we see the following error when doing a `BenoService#save`: ``` TypeError: expected str, bytes or os.PathLike object, not NoneType ``` The reason is some packages ended up with a `module_path == None` when accessing form `pkg_resources.working_set`: ```python import pkg_resources [dist for dist in pkg_resources.working_set if not dist.module_path] Out[4]: [terminado 0.8.3 (/home/fernandocamargo/anaconda3/envs/logit/lib/python3.6/site-packages)] ``` Here is a conda environment yaml file that can reproduce this issue: ``` name: logit channels: #- anaconda - conda-forge - anaconda - pytorch - https://public.dhe.ibm.com/ibmdl/export/pub/software/server/ibm-ai/conda - powerai dependencies: - python=3.6 - pandas=1.0 - pyarrow=0.15 - matplotlib=3.1 - plotly=4.6 - scipy=1.3 - numpy=1.16 - seaborn=0.10 - scikit-learn=0.22 - pytorch=1.2 - statsmodels=0.11 - holidays=0.9 - convertdate=2.2 - python-dateutil=2.8 - fbprophet=0.6 - luigi=2.8 - tqdm=4.44 - jupyterlab=1.2 - ipywidgets=7.5 - pyspark=2.4.5 - psutil=5.5 - pydantic=1.6 - sqlalchemy=1.3 - psycopg2=2.8 # GPU: - cudatoolkit=10.1 - pip - pip: # CPU: # - mxnet==1.4.1 # GPU: - mxnet-cu101mkl==1.4.1 # Power-9: # - /root/packages/mxnet/mxnet-1.4.1-cp36-cp36m-linux_ppc64le.whl - gluonts==0.5 - pmdarima==1.4 - torchbearer==0.5 - imbalanced-learn==0.6 - tensorboardx==2.0 - diskcache==3.0 - bentoml==0.8.3 ``` Interestingly, installing terminado alone does not cause this problem. I'm not sure if this is caused by other packages in this list. But will submit a fix to ensure BentoML handles this scenario properly. We will release a new version including this fix very soon. </issue> <code> [start of bentoml/saved_bundle/pip_pkg.py] 1 # Copyright 2019 Atalaya Tech, Inc. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 import sys 17 import logging 18 import pkgutil 19 import ast 20 import zipimport 21 22 23 EPP_NO_ERROR = 0 24 EPP_PKG_NOT_EXIST = 1 25 EPP_PKG_VERSION_MISMATCH = 2 26 27 __mm = None 28 29 30 logger = logging.getLogger(__name__) 31 32 33 def parse_requirement_string(rs): 34 name, _, version = rs.partition("==") 35 return name, version 36 37 38 def verify_pkg(pkg_name, pkg_version): 39 global __mm # pylint: disable=global-statement 40 if __mm is None: 41 __mm = ModuleManager() 42 return __mm.verify_pkg(pkg_name, pkg_version) 43 44 45 def seek_pip_dependencies(target_py_file_path): 46 global __mm # pylint: disable=global-statement 47 if __mm is None: 48 __mm = ModuleManager() 49 return __mm.seek_pip_dependencies(target_py_file_path) 50 51 52 def get_all_pip_installed_modules(): 53 global __mm # pylint: disable=global-statement 54 if __mm is None: 55 __mm = ModuleManager() 56 57 installed_modules = list( 58 # local modules are the ones imported from current directory, either from a 59 # module.py file or a module directory that contains a `__init__.py` file 60 filter(lambda m: not m.is_local, __mm.searched_modules.values()) 61 ) 62 return list(map(lambda m: m.name, installed_modules)) 63 64 65 class ModuleInfo(object): 66 def __init__(self, name, path, is_local, is_pkg): 67 super(ModuleInfo, self).__init__() 68 self.name = name 69 self.path = path 70 self.is_local = is_local 71 self.is_pkg = is_pkg 72 73 74 class ModuleManager(object): 75 def __init__(self): 76 super(ModuleManager, self).__init__() 77 self.pip_pkg_map = {} 78 self.pip_module_map = {} 79 self.setuptools_module_set = set() 80 self.nonlocal_package_path = set() 81 82 import pkg_resources 83 84 for dist in pkg_resources.working_set: # pylint: disable=not-an-iterable 85 if os.path.realpath(dist.module_path) != os.getcwd(): 86 # add to nonlocal_package path only if it's not current directory 87 self.nonlocal_package_path.add(dist.module_path) 88 89 self.pip_pkg_map[dist._key] = dist._version 90 for mn in dist._get_metadata("top_level.txt"): 91 if dist._key != "setuptools": 92 self.pip_module_map.setdefault(mn, []).append( 93 (dist._key, dist._version) 94 ) 95 else: 96 self.setuptools_module_set.add(mn) 97 98 self.searched_modules = {} 99 for m in pkgutil.iter_modules(): 100 if isinstance(m.module_finder, zipimport.zipimporter): 101 logger.warning(f"Skipped unsupported zipimporter {m.module_finder}") 102 continue 103 if m.name not in self.searched_modules: 104 path = m.module_finder.path 105 is_local = self.is_local_path(path) 106 self.searched_modules[m.name] = ModuleInfo( 107 m.name, path, is_local, m.ispkg 108 ) 109 110 def verify_pkg(self, pkg_name, pkg_version): 111 if pkg_name not in self.pip_pkg_map: 112 # package does not exist in the current python session 113 return EPP_PKG_NOT_EXIST 114 if pkg_version and pkg_version != self.pip_pkg_map[pkg_name]: 115 # package version is different from the version being used 116 # in the current python session 117 return EPP_PKG_VERSION_MISMATCH 118 return EPP_NO_ERROR 119 120 def seek_pip_dependencies(self, target_py_file_path): 121 work = DepSeekWork(self, target_py_file_path) 122 work.do() 123 requirements = {} 124 for _, pkg_info_list in work.dependencies.items(): 125 for pkg_name, pkg_version in pkg_info_list: 126 requirements[pkg_name] = pkg_version 127 128 return requirements, work.unknown_module_set 129 130 def is_local_path(self, path): 131 if path in self.nonlocal_package_path: 132 return False 133 134 dir_name = os.path.split(path)[1] 135 136 if ( 137 "site-packages" in path 138 or "anaconda" in path 139 or path.endswith("packages") 140 or dir_name == "bin" 141 or dir_name.startswith("lib") 142 or dir_name.startswith("python") 143 or dir_name.startswith("plat") 144 ): 145 self.nonlocal_package_path.add(path) 146 return False 147 148 return True 149 150 151 class DepSeekWork(object): 152 def __init__(self, module_manager, target_py_file_path): 153 super(DepSeekWork, self).__init__() 154 self.module_manager = module_manager 155 self.target_py_file_path = target_py_file_path 156 157 self.dependencies = {} 158 self.unknown_module_set = set() 159 self.parsed_module_set = set() 160 161 def do(self): 162 self.seek_in_file(self.target_py_file_path) 163 164 def seek_in_file(self, file_path): 165 # Extract all dependency modules by searching through the trees of the Python 166 # abstract syntax grammar with Python's built-in ast module 167 try: 168 with open(file_path) as f: 169 content = f.read() 170 except UnicodeDecodeError: 171 with open(file_path, encoding='utf-8') as f: 172 content = f.read() 173 174 tree = ast.parse(content) 175 import_set = set() 176 for node in ast.walk(tree): 177 if isinstance(node, ast.Import): 178 for name in node.names: 179 import_set.add(name.name.partition(".")[0]) 180 elif isinstance(node, ast.ImportFrom): 181 if node.module is not None and node.level == 0: 182 import_set.add(node.module.partition(".")[0]) 183 for module_name in import_set: 184 # Avoid parsing BentoML when BentoML is imported from local source code repo 185 if module_name == 'bentoml': 186 continue 187 if module_name in self.parsed_module_set: 188 continue 189 self.parsed_module_set.add(module_name) 190 191 if module_name in self.module_manager.searched_modules: 192 m = self.module_manager.searched_modules[module_name] 193 if m.is_local: 194 # Recursively search dependencies in sub-modules 195 if m.is_pkg: 196 self.seek_in_dir(os.path.join(m.path, m.name)) 197 else: 198 self.seek_in_file(os.path.join(m.path, "{}.py".format(m.name))) 199 else: 200 # check if the package has already been added to the list 201 if ( 202 module_name in self.module_manager.pip_module_map 203 and module_name not in self.dependencies 204 and module_name not in self.module_manager.setuptools_module_set 205 ): 206 self.dependencies[ 207 module_name 208 ] = self.module_manager.pip_module_map[module_name] 209 else: 210 if module_name in self.module_manager.pip_module_map: 211 if module_name not in self.dependencies: 212 # In some special cases, the pip-installed module can not 213 # be located in the searched_modules 214 self.dependencies[ 215 module_name 216 ] = self.module_manager.pip_module_map[module_name] 217 else: 218 if module_name not in sys.builtin_module_names: 219 self.unknown_module_set.add(module_name) 220 221 def seek_in_dir(self, dir_path): 222 for path, dir_list, file_list in os.walk(dir_path): 223 for file_name in file_list: 224 if not file_name.endswith(".py"): 225 continue 226 self.seek_in_file(os.path.join(path, file_name)) 227 for dir_name in dir_list: 228 if dir_name == '__pycache__': 229 continue 230 self.seek_in_dir(os.path.join(path, dir_name)) 231 [end of bentoml/saved_bundle/pip_pkg.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bentoml/saved_bundle/pip_pkg.py b/bentoml/saved_bundle/pip_pkg.py --- a/bentoml/saved_bundle/pip_pkg.py +++ b/bentoml/saved_bundle/pip_pkg.py @@ -82,9 +82,14 @@ import pkg_resources for dist in pkg_resources.working_set: # pylint: disable=not-an-iterable - if os.path.realpath(dist.module_path) != os.getcwd(): + module_path = dist.module_path or dist.location + if not module_path: + # Skip if no module path was found for pkg distribution + continue + + if os.path.realpath(module_path) != os.getcwd(): # add to nonlocal_package path only if it's not current directory - self.nonlocal_package_path.add(dist.module_path) + self.nonlocal_package_path.add(module_path) self.pip_pkg_map[dist._key] = dist._version for mn in dist._get_metadata("top_level.txt"):
{"golden_diff": "diff --git a/bentoml/saved_bundle/pip_pkg.py b/bentoml/saved_bundle/pip_pkg.py\n--- a/bentoml/saved_bundle/pip_pkg.py\n+++ b/bentoml/saved_bundle/pip_pkg.py\n@@ -82,9 +82,14 @@\n import pkg_resources\n \n for dist in pkg_resources.working_set: # pylint: disable=not-an-iterable\n- if os.path.realpath(dist.module_path) != os.getcwd():\n+ module_path = dist.module_path or dist.location\n+ if not module_path:\n+ # Skip if no module path was found for pkg distribution\n+ continue\n+\n+ if os.path.realpath(module_path) != os.getcwd():\n # add to nonlocal_package path only if it's not current directory\n- self.nonlocal_package_path.add(dist.module_path)\n+ self.nonlocal_package_path.add(module_path)\n \n self.pip_pkg_map[dist._key] = dist._version\n for mn in dist._get_metadata(\"top_level.txt\"):\n", "issue": "Failed to package local pip installed dependency in 0.8.4\nAn issue first reported in #bentoml-users slack channel by @fernandocamargoti\r\n\r\nIn some python environment, we see the following error when doing a `BenoService#save`:\r\n```\r\nTypeError: expected str, bytes or os.PathLike object, not NoneType\r\n```\r\n\r\nThe reason is some packages ended up with a `module_path == None` when accessing form `pkg_resources.working_set`:\r\n```python\r\nimport pkg_resources\r\n[dist for dist in pkg_resources.working_set if not dist.module_path]\r\nOut[4]: [terminado 0.8.3 (/home/fernandocamargo/anaconda3/envs/logit/lib/python3.6/site-packages)]\r\n```\r\n\r\nHere is a conda environment yaml file that can reproduce this issue:\r\n```\r\nname: logit\r\nchannels:\r\n#- anaconda\r\n- conda-forge\r\n- anaconda\r\n- pytorch\r\n- https://public.dhe.ibm.com/ibmdl/export/pub/software/server/ibm-ai/conda\r\n- powerai\r\ndependencies:\r\n- python=3.6\r\n- pandas=1.0\r\n- pyarrow=0.15\r\n- matplotlib=3.1\r\n- plotly=4.6\r\n- scipy=1.3\r\n- numpy=1.16\r\n- seaborn=0.10\r\n- scikit-learn=0.22\r\n- pytorch=1.2\r\n- statsmodels=0.11\r\n- holidays=0.9\r\n- convertdate=2.2\r\n- python-dateutil=2.8\r\n- fbprophet=0.6\r\n- luigi=2.8\r\n- tqdm=4.44\r\n- jupyterlab=1.2\r\n- ipywidgets=7.5\r\n- pyspark=2.4.5\r\n- psutil=5.5\r\n- pydantic=1.6\r\n- sqlalchemy=1.3\r\n- psycopg2=2.8\r\n# GPU:\r\n- cudatoolkit=10.1\r\n- pip\r\n- pip:\r\n# CPU:\r\n# - mxnet==1.4.1\r\n# GPU:\r\n - mxnet-cu101mkl==1.4.1\r\n# Power-9:\r\n# - /root/packages/mxnet/mxnet-1.4.1-cp36-cp36m-linux_ppc64le.whl\r\n - gluonts==0.5\r\n - pmdarima==1.4\r\n - torchbearer==0.5\r\n - imbalanced-learn==0.6\r\n - tensorboardx==2.0\r\n - diskcache==3.0\r\n - bentoml==0.8.3\r\n```\r\n\r\nInterestingly, installing terminado alone does not cause this problem. I'm not sure if this is caused by other packages in this list. But will submit a fix to ensure BentoML handles this scenario properly.\r\n\r\nWe will release a new version including this fix very soon.\r\n\n", "before_files": [{"content": "# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport logging\nimport pkgutil\nimport ast\nimport zipimport\n\n\nEPP_NO_ERROR = 0\nEPP_PKG_NOT_EXIST = 1\nEPP_PKG_VERSION_MISMATCH = 2\n\n__mm = None\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef parse_requirement_string(rs):\n name, _, version = rs.partition(\"==\")\n return name, version\n\n\ndef verify_pkg(pkg_name, pkg_version):\n global __mm # pylint: disable=global-statement\n if __mm is None:\n __mm = ModuleManager()\n return __mm.verify_pkg(pkg_name, pkg_version)\n\n\ndef seek_pip_dependencies(target_py_file_path):\n global __mm # pylint: disable=global-statement\n if __mm is None:\n __mm = ModuleManager()\n return __mm.seek_pip_dependencies(target_py_file_path)\n\n\ndef get_all_pip_installed_modules():\n global __mm # pylint: disable=global-statement\n if __mm is None:\n __mm = ModuleManager()\n\n installed_modules = list(\n # local modules are the ones imported from current directory, either from a\n # module.py file or a module directory that contains a `__init__.py` file\n filter(lambda m: not m.is_local, __mm.searched_modules.values())\n )\n return list(map(lambda m: m.name, installed_modules))\n\n\nclass ModuleInfo(object):\n def __init__(self, name, path, is_local, is_pkg):\n super(ModuleInfo, self).__init__()\n self.name = name\n self.path = path\n self.is_local = is_local\n self.is_pkg = is_pkg\n\n\nclass ModuleManager(object):\n def __init__(self):\n super(ModuleManager, self).__init__()\n self.pip_pkg_map = {}\n self.pip_module_map = {}\n self.setuptools_module_set = set()\n self.nonlocal_package_path = set()\n\n import pkg_resources\n\n for dist in pkg_resources.working_set: # pylint: disable=not-an-iterable\n if os.path.realpath(dist.module_path) != os.getcwd():\n # add to nonlocal_package path only if it's not current directory\n self.nonlocal_package_path.add(dist.module_path)\n\n self.pip_pkg_map[dist._key] = dist._version\n for mn in dist._get_metadata(\"top_level.txt\"):\n if dist._key != \"setuptools\":\n self.pip_module_map.setdefault(mn, []).append(\n (dist._key, dist._version)\n )\n else:\n self.setuptools_module_set.add(mn)\n\n self.searched_modules = {}\n for m in pkgutil.iter_modules():\n if isinstance(m.module_finder, zipimport.zipimporter):\n logger.warning(f\"Skipped unsupported zipimporter {m.module_finder}\")\n continue\n if m.name not in self.searched_modules:\n path = m.module_finder.path\n is_local = self.is_local_path(path)\n self.searched_modules[m.name] = ModuleInfo(\n m.name, path, is_local, m.ispkg\n )\n\n def verify_pkg(self, pkg_name, pkg_version):\n if pkg_name not in self.pip_pkg_map:\n # package does not exist in the current python session\n return EPP_PKG_NOT_EXIST\n if pkg_version and pkg_version != self.pip_pkg_map[pkg_name]:\n # package version is different from the version being used\n # in the current python session\n return EPP_PKG_VERSION_MISMATCH\n return EPP_NO_ERROR\n\n def seek_pip_dependencies(self, target_py_file_path):\n work = DepSeekWork(self, target_py_file_path)\n work.do()\n requirements = {}\n for _, pkg_info_list in work.dependencies.items():\n for pkg_name, pkg_version in pkg_info_list:\n requirements[pkg_name] = pkg_version\n\n return requirements, work.unknown_module_set\n\n def is_local_path(self, path):\n if path in self.nonlocal_package_path:\n return False\n\n dir_name = os.path.split(path)[1]\n\n if (\n \"site-packages\" in path\n or \"anaconda\" in path\n or path.endswith(\"packages\")\n or dir_name == \"bin\"\n or dir_name.startswith(\"lib\")\n or dir_name.startswith(\"python\")\n or dir_name.startswith(\"plat\")\n ):\n self.nonlocal_package_path.add(path)\n return False\n\n return True\n\n\nclass DepSeekWork(object):\n def __init__(self, module_manager, target_py_file_path):\n super(DepSeekWork, self).__init__()\n self.module_manager = module_manager\n self.target_py_file_path = target_py_file_path\n\n self.dependencies = {}\n self.unknown_module_set = set()\n self.parsed_module_set = set()\n\n def do(self):\n self.seek_in_file(self.target_py_file_path)\n\n def seek_in_file(self, file_path):\n # Extract all dependency modules by searching through the trees of the Python\n # abstract syntax grammar with Python's built-in ast module\n try:\n with open(file_path) as f:\n content = f.read()\n except UnicodeDecodeError:\n with open(file_path, encoding='utf-8') as f:\n content = f.read()\n\n tree = ast.parse(content)\n import_set = set()\n for node in ast.walk(tree):\n if isinstance(node, ast.Import):\n for name in node.names:\n import_set.add(name.name.partition(\".\")[0])\n elif isinstance(node, ast.ImportFrom):\n if node.module is not None and node.level == 0:\n import_set.add(node.module.partition(\".\")[0])\n for module_name in import_set:\n # Avoid parsing BentoML when BentoML is imported from local source code repo\n if module_name == 'bentoml':\n continue\n if module_name in self.parsed_module_set:\n continue\n self.parsed_module_set.add(module_name)\n\n if module_name in self.module_manager.searched_modules:\n m = self.module_manager.searched_modules[module_name]\n if m.is_local:\n # Recursively search dependencies in sub-modules\n if m.is_pkg:\n self.seek_in_dir(os.path.join(m.path, m.name))\n else:\n self.seek_in_file(os.path.join(m.path, \"{}.py\".format(m.name)))\n else:\n # check if the package has already been added to the list\n if (\n module_name in self.module_manager.pip_module_map\n and module_name not in self.dependencies\n and module_name not in self.module_manager.setuptools_module_set\n ):\n self.dependencies[\n module_name\n ] = self.module_manager.pip_module_map[module_name]\n else:\n if module_name in self.module_manager.pip_module_map:\n if module_name not in self.dependencies:\n # In some special cases, the pip-installed module can not\n # be located in the searched_modules\n self.dependencies[\n module_name\n ] = self.module_manager.pip_module_map[module_name]\n else:\n if module_name not in sys.builtin_module_names:\n self.unknown_module_set.add(module_name)\n\n def seek_in_dir(self, dir_path):\n for path, dir_list, file_list in os.walk(dir_path):\n for file_name in file_list:\n if not file_name.endswith(\".py\"):\n continue\n self.seek_in_file(os.path.join(path, file_name))\n for dir_name in dir_list:\n if dir_name == '__pycache__':\n continue\n self.seek_in_dir(os.path.join(path, dir_name))\n", "path": "bentoml/saved_bundle/pip_pkg.py"}]}
3,573
230
gh_patches_debug_23109
rasdani/github-patches
git_diff
OCHA-DAP__hdx-ckan-1054
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Test for unintended permissions via API Have we left any security holes via the API? - A non-sysadmin should not be able to add/edit/delete a group (country) - A non-sysadmin should not be able to add/edit/delete an org - A non-sysadmin should not be able to see user emails (other than their own) (note: Luis is pulling a list of user emails via the API, presumably with his sysadmin account) - A NEAO should not be able to add a dataset for the org - A NEAO should not be able to become a member of an org - No one should be able to become a member of a group (country) </issue> <code> [start of ckanext-hdx_theme/ckanext/hdx_theme/auth.py] 1 import ckan.new_authz as new_authz 2 3 from ckan.lib.base import _ 4 5 def hdx_basic_user_info(context, data_dict): 6 logged_in = new_authz.auth_is_loggedin_user() 7 if logged_in: 8 return {'success': True} 9 else: 10 return {'success': False, 11 'msg': _("You must be logged in to access basic organization member info.")} [end of ckanext-hdx_theme/ckanext/hdx_theme/auth.py] [start of ckanext-hdx_theme/ckanext/hdx_theme/plugin.py] 1 import ckanext.hdx_theme.licenses as hdx_licenses 2 3 import ckan.plugins as plugins 4 import ckan.plugins.toolkit as toolkit 5 import ckan.model.package as package 6 import ckan.model.license as license 7 import pylons.config as config 8 import version 9 10 import ckanext.hdx_theme.caching as caching 11 import ckanext.hdx_theme.auth as auth 12 13 14 def run_on_startup(): 15 cache_on_startup = config.get('hdx.cache.onstartup', 'true') 16 if 'true' == cache_on_startup: 17 _generate_license_list() 18 caching.cached_get_group_package_stuff() 19 20 21 def _generate_license_list(): 22 package.Package._license_register = license.LicenseRegister() 23 package.Package._license_register.licenses = [ 24 license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()), 25 license.License(license.LicenseCreativeCommonsAttribution()), 26 license.License(license.LicenseCreativeCommonsAttributionShareAlike()), 27 license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()), 28 license.License(hdx_licenses.LicenseHdxMultiple()), 29 license.License(hdx_licenses.LicenseHdxOther()) 30 ] 31 32 class HDXThemePlugin(plugins.SingletonPlugin): 33 plugins.implements(plugins.IConfigurer) 34 plugins.implements(plugins.IRoutes, inherit=True) 35 plugins.implements(plugins.ITemplateHelpers) 36 plugins.implements(plugins.IActions) 37 plugins.implements(plugins.IAuthFunctions) 38 plugins.implements(plugins.IGroupController, inherit=True) 39 plugins.implements(plugins.IMiddleware, inherit=True) 40 41 def update_config(self, config): 42 toolkit.add_template_directory(config, 'templates') 43 toolkit.add_public_directory(config, 'public') 44 toolkit.add_resource('fanstatic', 'hdx_theme') 45 46 47 def before_map(self, map): 48 map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index') 49 map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset') 50 map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country') 51 map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source') 52 map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in') 53 map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute') 54 55 map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test') 56 57 map.connect('request_membership', '/organization/{org_id}/request_membership', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_membership') 58 map.connect('request_editing_rights', '/organization/{org_id}/request_editing_rights', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_editor_for_org') 59 map.connect('/organization/request_new', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_new_organization') 60 map.connect('/organization/members/{id}', controller='ckanext.hdx_theme.member_controller:HDXOrgMemberController', action='members') 61 map.connect('dataset_preselect','/dataset/preselect', controller='ckanext.hdx_theme.preselect_dsform_controller:HDXPreselectOrgController', action='preselect') 62 map.connect('/organization/member_new/{id}', controller='ckanext.hdx_theme.member_controller:HDXOrgMemberController', action='member_new') 63 64 map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about') 65 66 map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit') 67 68 return map 69 70 def create(self, entity): 71 caching.invalidate_group_caches() 72 73 def edit(self, entity): 74 caching.invalidate_group_caches() 75 76 def get_helpers(self): 77 from ckanext.hdx_theme import helpers as hdx_helpers 78 return { 79 'is_downloadable': hdx_helpers.is_downloadable, 80 'get_facet_items_dict':hdx_helpers.get_facet_items_dict, 81 'get_last_modifier_user': hdx_helpers.get_last_modifier_user, 82 'get_filtered_params_list':hdx_helpers.get_filtered_params_list, 83 'get_last_revision_package':hdx_helpers.get_last_revision_package, 84 'get_last_modifier_user':hdx_helpers.get_last_modifier_user, 85 'get_last_revision_group':hdx_helpers.get_last_revision_group, 86 'get_group_followers':hdx_helpers.get_group_followers, 87 'get_group_members':hdx_helpers.get_group_members, 88 'markdown_extract_strip':hdx_helpers.markdown_extract_strip, 89 'render_date_from_concat_str':hdx_helpers.render_date_from_concat_str, 90 'hdx_version':hdx_helpers.hdx_version, 91 'hdx_build_nav_icon_with_message':hdx_helpers.hdx_build_nav_icon_with_message, 92 'hdx_num_of_new_related_items':hdx_helpers.hdx_num_of_new_related_items, 93 'hdx_get_extras_element':hdx_helpers.hdx_get_extras_element, 94 'hdx_get_user_info':hdx_helpers.hdx_get_user_info, 95 'hdx_linked_user':hdx_helpers.hdx_linked_user, 96 'hdx_show_singular_plural':hdx_helpers.hdx_show_singular_plural, 97 'hdx_member_roles_list':hdx_helpers.hdx_member_roles_list, 98 'hdx_organizations_available_with_roles':hdx_helpers.hdx_organizations_available_with_roles 99 100 } 101 102 def get_actions(self): 103 from ckanext.hdx_theme import actions as hdx_actions 104 return { 105 'organization_list_for_user':hdx_actions.organization_list_for_user, 106 'cached_group_list': hdx_actions.cached_group_list, 107 'hdx_basic_user_info': hdx_actions.hdx_basic_user_info, 108 'member_list': hdx_actions.member_list, 109 'hdx_get_sys_admins': hdx_actions.hdx_get_sys_admins 110 111 } 112 def get_auth_functions(self): 113 return { 114 'hdx_basic_user_info': auth.hdx_basic_user_info 115 } 116 117 def make_middleware(self, app, config): 118 run_on_startup() 119 return app 120 121 122 123 124 [end of ckanext-hdx_theme/ckanext/hdx_theme/plugin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/auth.py b/ckanext-hdx_theme/ckanext/hdx_theme/auth.py --- a/ckanext-hdx_theme/ckanext/hdx_theme/auth.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/auth.py @@ -1,11 +1,18 @@ import ckan.new_authz as new_authz +import ckan.logic as logic from ckan.lib.base import _ + def hdx_basic_user_info(context, data_dict): logged_in = new_authz.auth_is_loggedin_user() if logged_in: return {'success': True} else: return {'success': False, - 'msg': _("You must be logged in to access basic organization member info.")} \ No newline at end of file + 'msg': _("You must be logged in to access basic organization member info.")} + + [email protected]_sysadmins_check +def group_member_create(context, data_dict): + return {'success': False, 'msg': _('Nobody can add a member to a country in HDX')} \ No newline at end of file diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py --- a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py @@ -111,7 +111,8 @@ } def get_auth_functions(self): return { - 'hdx_basic_user_info': auth.hdx_basic_user_info + 'hdx_basic_user_info': auth.hdx_basic_user_info, + 'group_member_create': auth.group_member_create } def make_middleware(self, app, config):
{"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/auth.py b/ckanext-hdx_theme/ckanext/hdx_theme/auth.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/auth.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/auth.py\n@@ -1,11 +1,18 @@\n import ckan.new_authz as new_authz\n+import ckan.logic as logic\n \n from ckan.lib.base import _\n \n+\n def hdx_basic_user_info(context, data_dict):\n logged_in = new_authz.auth_is_loggedin_user()\n if logged_in:\n return {'success': True}\n else:\n return {'success': False,\n- 'msg': _(\"You must be logged in to access basic organization member info.\")}\n\\ No newline at end of file\n+ 'msg': _(\"You must be logged in to access basic organization member info.\")}\n+\n+\[email protected]_sysadmins_check\n+def group_member_create(context, data_dict):\n+ return {'success': False, 'msg': _('Nobody can add a member to a country in HDX')}\n\\ No newline at end of file\ndiff --git a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n@@ -111,7 +111,8 @@\n }\n def get_auth_functions(self):\n return {\n- 'hdx_basic_user_info': auth.hdx_basic_user_info\n+ 'hdx_basic_user_info': auth.hdx_basic_user_info,\n+ 'group_member_create': auth.group_member_create\n }\n \n def make_middleware(self, app, config):\n", "issue": "Test for unintended permissions via API\nHave we left any security holes via the API?\n- A non-sysadmin should not be able to add/edit/delete a group (country)\n- A non-sysadmin should not be able to add/edit/delete an org\n- A non-sysadmin should not be able to see user emails (other than their own) (note: Luis is pulling a list of user emails via the API, presumably with his sysadmin account)\n- A NEAO should not be able to add a dataset for the org\n- A NEAO should not be able to become a member of an org\n- No one should be able to become a member of a group (country)\n\n", "before_files": [{"content": "import ckan.new_authz as new_authz\n\nfrom ckan.lib.base import _\n\ndef hdx_basic_user_info(context, data_dict):\n logged_in = new_authz.auth_is_loggedin_user()\n if logged_in:\n return {'success': True}\n else:\n return {'success': False,\n 'msg': _(\"You must be logged in to access basic organization member info.\")}", "path": "ckanext-hdx_theme/ckanext/hdx_theme/auth.py"}, {"content": "import ckanext.hdx_theme.licenses as hdx_licenses\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as toolkit\nimport ckan.model.package as package\nimport ckan.model.license as license\nimport pylons.config as config\nimport version\n\nimport ckanext.hdx_theme.caching as caching\nimport ckanext.hdx_theme.auth as auth\n\n\ndef run_on_startup():\n cache_on_startup = config.get('hdx.cache.onstartup', 'true')\n if 'true' == cache_on_startup:\n _generate_license_list()\n caching.cached_get_group_package_stuff()\n\n\ndef _generate_license_list():\n package.Package._license_register = license.LicenseRegister() \n package.Package._license_register.licenses = [\n license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),\n license.License(license.LicenseCreativeCommonsAttribution()),\n license.License(license.LicenseCreativeCommonsAttributionShareAlike()),\n license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),\n license.License(hdx_licenses.LicenseHdxMultiple()),\n license.License(hdx_licenses.LicenseHdxOther())\n ]\n\nclass HDXThemePlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n plugins.implements(plugins.IAuthFunctions)\n plugins.implements(plugins.IGroupController, inherit=True)\n plugins.implements(plugins.IMiddleware, inherit=True)\n \n def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_public_directory(config, 'public')\n toolkit.add_resource('fanstatic', 'hdx_theme')\n \n\n def before_map(self, map):\n map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')\n map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')\n map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')\n map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')\n map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')\n map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')\n \n map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')\n \n map.connect('request_membership', '/organization/{org_id}/request_membership', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_membership')\n map.connect('request_editing_rights', '/organization/{org_id}/request_editing_rights', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_editor_for_org')\n map.connect('/organization/request_new', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_new_organization')\n map.connect('/organization/members/{id}', controller='ckanext.hdx_theme.member_controller:HDXOrgMemberController', action='members')\n map.connect('dataset_preselect','/dataset/preselect', controller='ckanext.hdx_theme.preselect_dsform_controller:HDXPreselectOrgController', action='preselect')\n map.connect('/organization/member_new/{id}', controller='ckanext.hdx_theme.member_controller:HDXOrgMemberController', action='member_new')\n\n map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')\n\n map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit')\n\n return map\n \n def create(self, entity):\n caching.invalidate_group_caches()\n\n def edit(self, entity):\n caching.invalidate_group_caches()\n\n def get_helpers(self):\n from ckanext.hdx_theme import helpers as hdx_helpers\n return {\n 'is_downloadable': hdx_helpers.is_downloadable,\n 'get_facet_items_dict':hdx_helpers.get_facet_items_dict,\n 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,\n 'get_filtered_params_list':hdx_helpers.get_filtered_params_list,\n 'get_last_revision_package':hdx_helpers.get_last_revision_package,\n 'get_last_modifier_user':hdx_helpers.get_last_modifier_user,\n 'get_last_revision_group':hdx_helpers.get_last_revision_group,\n 'get_group_followers':hdx_helpers.get_group_followers,\n 'get_group_members':hdx_helpers.get_group_members,\n 'markdown_extract_strip':hdx_helpers.markdown_extract_strip,\n 'render_date_from_concat_str':hdx_helpers.render_date_from_concat_str,\n 'hdx_version':hdx_helpers.hdx_version,\n 'hdx_build_nav_icon_with_message':hdx_helpers.hdx_build_nav_icon_with_message,\n 'hdx_num_of_new_related_items':hdx_helpers.hdx_num_of_new_related_items,\n 'hdx_get_extras_element':hdx_helpers.hdx_get_extras_element,\n 'hdx_get_user_info':hdx_helpers.hdx_get_user_info,\n 'hdx_linked_user':hdx_helpers.hdx_linked_user,\n 'hdx_show_singular_plural':hdx_helpers.hdx_show_singular_plural,\n 'hdx_member_roles_list':hdx_helpers.hdx_member_roles_list,\n 'hdx_organizations_available_with_roles':hdx_helpers.hdx_organizations_available_with_roles\n \n }\n \n def get_actions(self):\n from ckanext.hdx_theme import actions as hdx_actions\n return {\n 'organization_list_for_user':hdx_actions.organization_list_for_user, \n 'cached_group_list': hdx_actions.cached_group_list,\n 'hdx_basic_user_info': hdx_actions.hdx_basic_user_info,\n 'member_list': hdx_actions.member_list,\n 'hdx_get_sys_admins': hdx_actions.hdx_get_sys_admins\n \n }\n def get_auth_functions(self):\n return {\n 'hdx_basic_user_info': auth.hdx_basic_user_info\n }\n \n def make_middleware(self, app, config):\n run_on_startup()\n return app\n\n \n \n\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/plugin.py"}]}
2,505
420
gh_patches_debug_641
rasdani/github-patches
git_diff
pex-tool__pex-2219
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.143 On the docket: + [x] pex fails to build pycryptodome due to filename too long #2087 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.142" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.142" +__version__ = "2.1.143"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.142\"\n+__version__ = \"2.1.143\"\n", "issue": "Release 2.1.143\nOn the docket:\r\n+ [x] pex fails to build pycryptodome due to filename too long #2087 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.142\"\n", "path": "pex/version.py"}]}
623
98
gh_patches_debug_21265
rasdani/github-patches
git_diff
scikit-hep__pyhf-436
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Test Deploy feature is broken # Description See https://github.com/FaradayRF/faradayio/issues/35 for a related issue - I think. Failing job on master here: https://travis-ci.org/diana-hep/pyhf/builds/517678508?utm_source=github_status&utm_medium=notification # Expected Behavior Expect it to pass. # Actual Behavior Observe it failing. # Checklist - [x] Run `git fetch` to get the most up to date version of `master` - [x] Searched through existing Issues to confirm this is not a duplicate issue - [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 from setuptools import setup, find_packages 4 from os import path 5 import sys 6 7 this_directory = path.abspath(path.dirname(__file__)) 8 if sys.version_info.major < 3: 9 from io import open 10 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md: 11 long_description = readme_md.read() 12 13 extras_require = { 14 'tensorflow': [ 15 'tensorflow~=1.13', 16 'tensorflow-probability~=0.5', 17 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass 18 'setuptools<=39.1.0', 19 ], 20 'torch': ['torch~=1.0'], 21 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'], 22 # 'dask': [ 23 # 'dask[array]' 24 # ], 25 'xmlio': ['uproot'], 26 'minuit': ['iminuit'], 27 'develop': [ 28 'pyflakes', 29 'pytest~=3.5', 30 'pytest-cov>=2.5.1', 31 'pytest-mock', 32 'pytest-benchmark[histogram]', 33 'pytest-console-scripts', 34 'python-coveralls', 35 'coverage>=4.0', # coveralls 36 'matplotlib', 37 'jupyter', 38 'nbdime', 39 'uproot~=3.3', 40 'papermill~=0.16', 41 'graphviz', 42 'bumpversion', 43 'sphinx', 44 'sphinxcontrib-bibtex', 45 'sphinxcontrib-napoleon', 46 'sphinx_rtd_theme', 47 'nbsphinx', 48 'sphinx-issues', 49 'm2r', 50 'jsonpatch', 51 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now 52 'pre-commit', 53 'black;python_version>="3.6"', # Black is Python3 only 54 'twine', 55 ], 56 } 57 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 58 59 setup( 60 name='pyhf', 61 version='0.0.16', 62 description='(partial) pure python histfactory implementation', 63 long_description=long_description, 64 long_description_content_type='text/markdown', 65 url='https://github.com/diana-hep/pyhf', 66 author='Lukas Heinrich', 67 author_email='[email protected]', 68 license='Apache', 69 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask', 70 classifiers=[ 71 "Programming Language :: Python :: 2", 72 "Programming Language :: Python :: 2.7", 73 "Programming Language :: Python :: 3", 74 "Programming Language :: Python :: 3.6", 75 "Programming Language :: Python :: 3.7", 76 ], 77 packages=find_packages(), 78 include_package_data=True, 79 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*", 80 install_requires=[ 81 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet 82 'click>=6.0', # for console scripts, 83 'tqdm', # for readxml 84 'six', # for modifiers 85 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6 86 'jsonpatch', 87 ], 88 extras_require=extras_require, 89 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']}, 90 dependency_links=[], 91 ) 92 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -56,6 +56,29 @@ } extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) + +def _is_test_pypi(): + """ + Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and + set to true (c.f. .travis.yml) + + The use_scm_version kwarg accepts a callable for the local_scheme + configuration parameter with argument "version". This can be replaced + with a lambda as the desired version structure is {next_version}.dev{distance} + c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy + + As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version + controlled through bumpversion is used. + """ + from os import getenv + + return ( + {'local_scheme': lambda version: ''} + if getenv('TESTPYPI_UPLOAD') == 'true' + else False + ) + + setup( name='pyhf', version='0.0.16', @@ -88,4 +111,5 @@ extras_require=extras_require, entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']}, dependency_links=[], + use_scm_version=_is_test_pypi(), )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -56,6 +56,29 @@\n }\n extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n \n+\n+def _is_test_pypi():\n+ \"\"\"\n+ Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n+ set to true (c.f. .travis.yml)\n+\n+ The use_scm_version kwarg accepts a callable for the local_scheme\n+ configuration parameter with argument \"version\". This can be replaced\n+ with a lambda as the desired version structure is {next_version}.dev{distance}\n+ c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy\n+\n+ As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version\n+ controlled through bumpversion is used.\n+ \"\"\"\n+ from os import getenv\n+\n+ return (\n+ {'local_scheme': lambda version: ''}\n+ if getenv('TESTPYPI_UPLOAD') == 'true'\n+ else False\n+ )\n+\n+\n setup(\n name='pyhf',\n version='0.0.16',\n@@ -88,4 +111,5 @@\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n+ use_scm_version=_is_test_pypi(),\n )\n", "issue": "Test Deploy feature is broken\n# Description\r\n\r\nSee https://github.com/FaradayRF/faradayio/issues/35 for a related issue - I think. Failing job on master here: https://travis-ci.org/diana-hep/pyhf/builds/517678508?utm_source=github_status&utm_medium=notification\r\n\r\n# Expected Behavior\r\n\r\nExpect it to pass.\r\n\r\n# Actual Behavior\r\n\r\nObserve it failing.\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=1.13',\n 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch~=1.0'],\n 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=0.16',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.16',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}]}
1,725
326
gh_patches_debug_29031
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-1495
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Forbid setting user and chat data attributes Inevitably users will attempt to do `context.user_data = {'my': 'dict'}` or `context.chat_data = whatever`. This will lead to confusing behaviour. I propose making those fields properties without a setter, which will more helpfully error. Depending on how far we want to go, we can also suggest the user to use `update(other_dict)` if they attempt to set the field. See https://t.me/pythontelegrambotgroup/222138 onwards. </issue> <code> [start of telegram/ext/callbackcontext.py] 1 #!/usr/bin/env python 2 # 3 # A library that provides a Python interface to the Telegram Bot API 4 # Copyright (C) 2015-2018 5 # Leandro Toledo de Souza <[email protected]> 6 # 7 # This program is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU Lesser Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # This program is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU Lesser Public License for more details. 16 # 17 # You should have received a copy of the GNU Lesser Public License 18 # along with this program. If not, see [http://www.gnu.org/licenses/]. 19 """This module contains the CallbackContext class.""" 20 21 from telegram import Update 22 23 24 class CallbackContext(object): 25 """ 26 This is a context object passed to the callback called by :class:`telegram.ext.Handler` 27 or by the :class:`telegram.ext.Dispatcher` in an error handler added by 28 :attr:`telegram.ext.Dispatcher.add_error_handler` or to the callback of a 29 :class:`telegram.ext.Job`. 30 31 Note: 32 :class:`telegram.ext.Dispatcher` will create a single context for an entire update. This 33 means that if you got 2 handlers in different groups and they both get called, they will 34 get passed the same `CallbackContext` object (of course with proper attributes like 35 `.matches` differing). This allows you to add custom attributes in a lower handler group 36 callback, and then subsequently access those attributes in a higher handler group callback. 37 Note that the attributes on `CallbackContext` might change in the future, so make sure to 38 use a fairly unique name for the attributes. 39 40 Warning: 41 Do not combine custom attributes and @run_async. Due to how @run_async works, it will 42 almost certainly execute the callbacks for an update out of order, and the attributes 43 that you think you added will not be present. 44 45 Attributes: 46 chat_data (:obj:`dict`, optional): A dict that can be used to keep any data in. For each 47 update from the same chat it will be the same ``dict``. 48 user_data (:obj:`dict`, optional): A dict that can be used to keep any data in. For each 49 update from the same user it will be the same ``dict``. 50 matches (List[:obj:`re match object`], optional): If the associated update originated from 51 a regex-supported handler or had a :class:`Filters.regex`, this will contain a list of 52 match objects for every pattern where ``re.search(pattern, string)`` returned a match. 53 Note that filters short circuit, so combined regex filters will not always 54 be evaluated. 55 args (List[:obj:`str`], optional): Arguments passed to a command if the associated update 56 is handled by :class:`telegram.ext.CommandHandler`, :class:`telegram.ext.PrefixHandler` 57 or :class:`telegram.ext.StringCommandHandler`. It contains a list of the words in the 58 text after the command, using any whitespace string as a delimiter. 59 error (:class:`telegram.TelegramError`, optional): The Telegram error that was raised. 60 Only present when passed to a error handler registered with 61 :attr:`telegram.ext.Dispatcher.add_error_handler`. 62 job (:class:`telegram.ext.Job`): The job that that originated this callback. 63 Only present when passed to the callback of :class:`telegram.ext.Job`. 64 65 """ 66 67 def __init__(self, dispatcher): 68 """ 69 Args: 70 dispatcher (:class:`telegram.ext.Dispatcher`): 71 """ 72 if not dispatcher.use_context: 73 raise ValueError('CallbackContext should not be used with a non context aware ' 74 'dispatcher!') 75 self._dispatcher = dispatcher 76 self.chat_data = None 77 self.user_data = None 78 self.args = None 79 self.matches = None 80 self.error = None 81 self.job = None 82 83 @classmethod 84 def from_error(cls, update, error, dispatcher): 85 self = cls.from_update(update, dispatcher) 86 self.error = error 87 return self 88 89 @classmethod 90 def from_update(cls, update, dispatcher): 91 self = cls(dispatcher) 92 if update is not None and isinstance(update, Update): 93 chat = update.effective_chat 94 user = update.effective_user 95 96 if chat: 97 self.chat_data = dispatcher.chat_data[chat.id] 98 if user: 99 self.user_data = dispatcher.user_data[user.id] 100 return self 101 102 @classmethod 103 def from_job(cls, job, dispatcher): 104 self = cls(dispatcher) 105 self.job = job 106 return self 107 108 def update(self, data): 109 self.__dict__.update(data) 110 111 @property 112 def bot(self): 113 """:class:`telegram.Bot`: The bot associated with this context.""" 114 return self._dispatcher.bot 115 116 @property 117 def job_queue(self): 118 """ 119 :class:`telegram.ext.JobQueue`: The ``JobQueue`` used by the 120 :class:`telegram.ext.Dispatcher` and (usually) the :class:`telegram.ext.Updater` 121 associated with this context. 122 123 """ 124 return self._dispatcher.job_queue 125 126 @property 127 def update_queue(self): 128 """ 129 :class:`queue.Queue`: The ``Queue`` instance used by the 130 :class:`telegram.ext.Dispatcher` and (usually) the :class:`telegram.ext.Updater` 131 associated with this context. 132 133 """ 134 return self._dispatcher.update_queue 135 136 @property 137 def match(self): 138 """ 139 `Regex match type`: The first match from :attr:`matches`. 140 Useful if you are only filtering using a single regex filter. 141 Returns `None` if :attr:`matches` is empty. 142 """ 143 try: 144 return self.matches[0] # pylint: disable=unsubscriptable-object 145 except (IndexError, TypeError): 146 return None 147 [end of telegram/ext/callbackcontext.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/telegram/ext/callbackcontext.py b/telegram/ext/callbackcontext.py --- a/telegram/ext/callbackcontext.py +++ b/telegram/ext/callbackcontext.py @@ -73,13 +73,31 @@ raise ValueError('CallbackContext should not be used with a non context aware ' 'dispatcher!') self._dispatcher = dispatcher - self.chat_data = None - self.user_data = None + self._chat_data = None + self._user_data = None self.args = None self.matches = None self.error = None self.job = None + @property + def chat_data(self): + return self._chat_data + + @chat_data.setter + def chat_data(self, value): + raise AttributeError("You can not assign a new value to chat_data, see " + "https://git.io/fjxKe") + + @property + def user_data(self): + return self._user_data + + @user_data.setter + def user_data(self, value): + raise AttributeError("You can not assign a new value to user_data, see " + "https://git.io/fjxKe") + @classmethod def from_error(cls, update, error, dispatcher): self = cls.from_update(update, dispatcher) @@ -94,9 +112,9 @@ user = update.effective_user if chat: - self.chat_data = dispatcher.chat_data[chat.id] + self._chat_data = dispatcher.chat_data[chat.id] if user: - self.user_data = dispatcher.user_data[user.id] + self._user_data = dispatcher.user_data[user.id] return self @classmethod
{"golden_diff": "diff --git a/telegram/ext/callbackcontext.py b/telegram/ext/callbackcontext.py\n--- a/telegram/ext/callbackcontext.py\n+++ b/telegram/ext/callbackcontext.py\n@@ -73,13 +73,31 @@\n raise ValueError('CallbackContext should not be used with a non context aware '\n 'dispatcher!')\n self._dispatcher = dispatcher\n- self.chat_data = None\n- self.user_data = None\n+ self._chat_data = None\n+ self._user_data = None\n self.args = None\n self.matches = None\n self.error = None\n self.job = None\n \n+ @property\n+ def chat_data(self):\n+ return self._chat_data\n+\n+ @chat_data.setter\n+ def chat_data(self, value):\n+ raise AttributeError(\"You can not assign a new value to chat_data, see \"\n+ \"https://git.io/fjxKe\")\n+\n+ @property\n+ def user_data(self):\n+ return self._user_data\n+\n+ @user_data.setter\n+ def user_data(self, value):\n+ raise AttributeError(\"You can not assign a new value to user_data, see \"\n+ \"https://git.io/fjxKe\")\n+\n @classmethod\n def from_error(cls, update, error, dispatcher):\n self = cls.from_update(update, dispatcher)\n@@ -94,9 +112,9 @@\n user = update.effective_user\n \n if chat:\n- self.chat_data = dispatcher.chat_data[chat.id]\n+ self._chat_data = dispatcher.chat_data[chat.id]\n if user:\n- self.user_data = dispatcher.user_data[user.id]\n+ self._user_data = dispatcher.user_data[user.id]\n return self\n \n @classmethod\n", "issue": "Forbid setting user and chat data attributes\nInevitably users will attempt to do `context.user_data = {'my': 'dict'}` or `context.chat_data = whatever`. This will lead to confusing behaviour.\r\n\r\nI propose making those fields properties without a setter, which will more helpfully error. Depending on how far we want to go, we can also suggest the user to use `update(other_dict)` if they attempt to set the field.\r\n\r\nSee https://t.me/pythontelegrambotgroup/222138 onwards.\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the CallbackContext class.\"\"\"\n\nfrom telegram import Update\n\n\nclass CallbackContext(object):\n \"\"\"\n This is a context object passed to the callback called by :class:`telegram.ext.Handler`\n or by the :class:`telegram.ext.Dispatcher` in an error handler added by\n :attr:`telegram.ext.Dispatcher.add_error_handler` or to the callback of a\n :class:`telegram.ext.Job`.\n\n Note:\n :class:`telegram.ext.Dispatcher` will create a single context for an entire update. This\n means that if you got 2 handlers in different groups and they both get called, they will\n get passed the same `CallbackContext` object (of course with proper attributes like\n `.matches` differing). This allows you to add custom attributes in a lower handler group\n callback, and then subsequently access those attributes in a higher handler group callback.\n Note that the attributes on `CallbackContext` might change in the future, so make sure to\n use a fairly unique name for the attributes.\n\n Warning:\n Do not combine custom attributes and @run_async. Due to how @run_async works, it will\n almost certainly execute the callbacks for an update out of order, and the attributes\n that you think you added will not be present.\n\n Attributes:\n chat_data (:obj:`dict`, optional): A dict that can be used to keep any data in. For each\n update from the same chat it will be the same ``dict``.\n user_data (:obj:`dict`, optional): A dict that can be used to keep any data in. For each\n update from the same user it will be the same ``dict``.\n matches (List[:obj:`re match object`], optional): If the associated update originated from\n a regex-supported handler or had a :class:`Filters.regex`, this will contain a list of\n match objects for every pattern where ``re.search(pattern, string)`` returned a match.\n Note that filters short circuit, so combined regex filters will not always\n be evaluated.\n args (List[:obj:`str`], optional): Arguments passed to a command if the associated update\n is handled by :class:`telegram.ext.CommandHandler`, :class:`telegram.ext.PrefixHandler`\n or :class:`telegram.ext.StringCommandHandler`. It contains a list of the words in the\n text after the command, using any whitespace string as a delimiter.\n error (:class:`telegram.TelegramError`, optional): The Telegram error that was raised.\n Only present when passed to a error handler registered with\n :attr:`telegram.ext.Dispatcher.add_error_handler`.\n job (:class:`telegram.ext.Job`): The job that that originated this callback.\n Only present when passed to the callback of :class:`telegram.ext.Job`.\n\n \"\"\"\n\n def __init__(self, dispatcher):\n \"\"\"\n Args:\n dispatcher (:class:`telegram.ext.Dispatcher`):\n \"\"\"\n if not dispatcher.use_context:\n raise ValueError('CallbackContext should not be used with a non context aware '\n 'dispatcher!')\n self._dispatcher = dispatcher\n self.chat_data = None\n self.user_data = None\n self.args = None\n self.matches = None\n self.error = None\n self.job = None\n\n @classmethod\n def from_error(cls, update, error, dispatcher):\n self = cls.from_update(update, dispatcher)\n self.error = error\n return self\n\n @classmethod\n def from_update(cls, update, dispatcher):\n self = cls(dispatcher)\n if update is not None and isinstance(update, Update):\n chat = update.effective_chat\n user = update.effective_user\n\n if chat:\n self.chat_data = dispatcher.chat_data[chat.id]\n if user:\n self.user_data = dispatcher.user_data[user.id]\n return self\n\n @classmethod\n def from_job(cls, job, dispatcher):\n self = cls(dispatcher)\n self.job = job\n return self\n\n def update(self, data):\n self.__dict__.update(data)\n\n @property\n def bot(self):\n \"\"\":class:`telegram.Bot`: The bot associated with this context.\"\"\"\n return self._dispatcher.bot\n\n @property\n def job_queue(self):\n \"\"\"\n :class:`telegram.ext.JobQueue`: The ``JobQueue`` used by the\n :class:`telegram.ext.Dispatcher` and (usually) the :class:`telegram.ext.Updater`\n associated with this context.\n\n \"\"\"\n return self._dispatcher.job_queue\n\n @property\n def update_queue(self):\n \"\"\"\n :class:`queue.Queue`: The ``Queue`` instance used by the\n :class:`telegram.ext.Dispatcher` and (usually) the :class:`telegram.ext.Updater`\n associated with this context.\n\n \"\"\"\n return self._dispatcher.update_queue\n\n @property\n def match(self):\n \"\"\"\n `Regex match type`: The first match from :attr:`matches`.\n Useful if you are only filtering using a single regex filter.\n Returns `None` if :attr:`matches` is empty.\n \"\"\"\n try:\n return self.matches[0] # pylint: disable=unsubscriptable-object\n except (IndexError, TypeError):\n return None\n", "path": "telegram/ext/callbackcontext.py"}]}
2,296
386
gh_patches_debug_30825
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-2484
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ordering by a non-orderable column (implicitly or explicitly) causes a backend crash ## Bug Brent [writes](https://github.com/centerofci/mathesar/pull/2253#pullrequestreview-1251718380): > At one point, you note in a comment that there's a bug whenever we have internal columns of JSON type, and no primary key in a table. This bug is still present for _any_ non-orderable column type if there is no primary key in the table (e.g., if the user attaches to a preexisting DB). It's easy to figure out whether or not a type is orderable in PostgreSQL. See the [docs page for ORDER BY](https://www.postgresql.org/docs/current/queries-order.html#id-1.5.6.9.5.10) and the [docs page for Operator Classes](https://www.postgresql.org/docs/9.5/indexes-opclass.html). Brent is referring to [this comment](https://github.com/centerofci/mathesar/blob/8c215b2efdd740da0d530b624763cf260e12ca90/db/records/operations/sort.py#L47). The error message received upon such a crash is: `could not identify an ordering operator for type json` (`json` is the type of the unorderable column in this case. ## Solution The solution is, as explained by Brent, to make sure that every column we order by (implicitly or explicitly) is orderable. ## Severity Because of this, a JSON column (it's non-orderable), can make a query/table view unrenderable, but only when the table/query does not have a primary key, which cannot happen in a Mathesar-created table. It can only happen when Mathesar is used to access a pre-existing database. </issue> <code> [start of db/records/operations/sort.py] 1 from collections import namedtuple 2 from sqlalchemy import select 3 from db.columns import utils as col_utils 4 from db.records.exceptions import BadSortFormat, SortFieldNotFound 5 6 7 def make_order_by_deterministic(relation, order_by=None): 8 """ 9 Makes an order_by deterministic (totally ordering). 10 11 Given a relation, and a `order_by` spec, that defines the ordering to be applied to the 12 relation, returns a new order_by that is the totally ordered (deterministic) version of the 13 input order_by. 14 15 Appending primary key sort guarantees determinism, but if that fails, we revert to ordering by 16 all columns. 17 """ 18 if order_by is None: 19 order_by = [] 20 order_by = _append_primary_key_sort(relation, order_by) 21 if not order_by: 22 order_by = _build_order_by_all_columns_clause(relation) 23 return order_by 24 25 26 def _append_primary_key_sort(relation, order_by): 27 """ 28 Given an order by clause, we can guarantee a deterministic ordering 29 overall by appending a final ordering by primary key if one exists. 30 """ 31 pk_cols = col_utils.get_primary_key_column_collection_from_relation(relation) 32 order_by = list(order_by) 33 if pk_cols is not None: 34 order_by += [ 35 {'field': col, 'direction': 'asc'} 36 for col 37 in set(pk_cols).intersection(relation.columns) 38 ] 39 return order_by 40 41 42 def _build_order_by_all_columns_clause(relation): 43 """ 44 To be used when we have failed to find any other ordering criteria, 45 since ordering by all columns is inherently inefficient. 46 47 Note the filtering out of internal columns. Before applying this fix, psycopg was throwing an error 48 like "could not identify an ordering operator for type json", because we were trying to 49 sort by an internal column like `__mathesar_group_metadata`, which has type `json`, which 50 requires special handling to be sorted. The problem is bypassed by not attempting to sort on 51 internal columns. 52 """ 53 return [ 54 {'field': col, 'direction': 'asc'} 55 for col 56 in relation.columns 57 if not _is_internal_column(col) 58 ] 59 60 61 def _is_internal_column(col): 62 """ 63 Might not be exhaustive, take care. 64 """ 65 return col.name == '__mathesar_group_metadata' 66 67 68 def apply_relation_sorting(relation, sort_spec): 69 order_by_list = [ 70 _get_sorted_column_obj_from_spec(relation, spec) for spec in sort_spec 71 ] 72 return select(relation).order_by(*order_by_list) 73 74 75 def _get_sorted_column_obj_from_spec(relation, spec): 76 try: 77 sort_spec = _deserialize_sort_spec(spec) 78 except (KeyError, TypeError, AssertionError): 79 raise BadSortFormat 80 81 try: 82 column = col_utils.get_column_obj_from_relation(relation, sort_spec.field) 83 except KeyError as e: 84 raise SortFieldNotFound(e) 85 except AttributeError: 86 raise BadSortFormat 87 88 try: 89 directed_col = _build_directed_column_expr(column, sort_spec) 90 except AttributeError: 91 raise BadSortFormat 92 93 return directed_col 94 95 96 def _deserialize_sort_spec(spec): 97 sort_spec = namedtuple( 98 '_sort_spec', 99 ['field', 'direction', 'nullsfirst', 'nullslast'] 100 )( 101 field=spec['field'], 102 direction=spec['direction'], 103 nullsfirst=spec.get('nullsfirst', False), 104 nullslast=spec.get('nullslast', False) 105 ) 106 # Since it's not valid to have both nullsfirst and nullslast. 107 assert not sort_spec.nullsfirst or not sort_spec.nullslast 108 return sort_spec 109 110 111 def _build_directed_column_expr(column, sort_spec): 112 directed_col = getattr(column, sort_spec.direction)() 113 if sort_spec.nullsfirst: 114 directed_col = directed_col.nulls_first() 115 elif sort_spec.nullslast: 116 directed_col = directed_col.nulls_last() 117 return directed_col 118 [end of db/records/operations/sort.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/db/records/operations/sort.py b/db/records/operations/sort.py --- a/db/records/operations/sort.py +++ b/db/records/operations/sort.py @@ -44,27 +44,37 @@ To be used when we have failed to find any other ordering criteria, since ordering by all columns is inherently inefficient. - Note the filtering out of internal columns. Before applying this fix, psycopg was throwing an error - like "could not identify an ordering operator for type json", because we were trying to - sort by an internal column like `__mathesar_group_metadata`, which has type `json`, which - requires special handling to be sorted. The problem is bypassed by not attempting to sort on - internal columns. + Note the filtering out some columns, namely internal columns and non-orderable columns. See + their docstrings for details. """ return [ {'field': col, 'direction': 'asc'} for col in relation.columns - if not _is_internal_column(col) + if _is_col_orderable(col) and not _is_internal_column(col) ] def _is_internal_column(col): """ + Columns that Mathesar adds for its own devices and does not expose to the user. We don't want + to sort by these. + Might not be exhaustive, take care. """ return col.name == '__mathesar_group_metadata' +def _is_col_orderable(col): + """ + Some columns are not orderable (or at least don't have a non-ambiguous way to define order + without additional logic). We only want to order by orderably columns. + """ + data_type = col.type + non_orderable_type = ['Binary', 'LargeBinary', 'PickleType', 'ARRAY', 'JSON', 'JSONB'] + return str(data_type) not in non_orderable_type + + def apply_relation_sorting(relation, sort_spec): order_by_list = [ _get_sorted_column_obj_from_spec(relation, spec) for spec in sort_spec
{"golden_diff": "diff --git a/db/records/operations/sort.py b/db/records/operations/sort.py\n--- a/db/records/operations/sort.py\n+++ b/db/records/operations/sort.py\n@@ -44,27 +44,37 @@\n To be used when we have failed to find any other ordering criteria,\n since ordering by all columns is inherently inefficient.\n \n- Note the filtering out of internal columns. Before applying this fix, psycopg was throwing an error\n- like \"could not identify an ordering operator for type json\", because we were trying to\n- sort by an internal column like `__mathesar_group_metadata`, which has type `json`, which\n- requires special handling to be sorted. The problem is bypassed by not attempting to sort on\n- internal columns.\n+ Note the filtering out some columns, namely internal columns and non-orderable columns. See\n+ their docstrings for details.\n \"\"\"\n return [\n {'field': col, 'direction': 'asc'}\n for col\n in relation.columns\n- if not _is_internal_column(col)\n+ if _is_col_orderable(col) and not _is_internal_column(col)\n ]\n \n \n def _is_internal_column(col):\n \"\"\"\n+ Columns that Mathesar adds for its own devices and does not expose to the user. We don't want\n+ to sort by these.\n+\n Might not be exhaustive, take care.\n \"\"\"\n return col.name == '__mathesar_group_metadata'\n \n \n+def _is_col_orderable(col):\n+ \"\"\"\n+ Some columns are not orderable (or at least don't have a non-ambiguous way to define order\n+ without additional logic). We only want to order by orderably columns.\n+ \"\"\"\n+ data_type = col.type\n+ non_orderable_type = ['Binary', 'LargeBinary', 'PickleType', 'ARRAY', 'JSON', 'JSONB']\n+ return str(data_type) not in non_orderable_type\n+\n+\n def apply_relation_sorting(relation, sort_spec):\n order_by_list = [\n _get_sorted_column_obj_from_spec(relation, spec) for spec in sort_spec\n", "issue": "Ordering by a non-orderable column (implicitly or explicitly) causes a backend crash\n## Bug\r\n\r\nBrent [writes](https://github.com/centerofci/mathesar/pull/2253#pullrequestreview-1251718380):\r\n\r\n> At one point, you note in a comment that there's a bug whenever we have internal columns of JSON type, and no primary key in a table. This bug is still present for _any_ non-orderable column type if there is no primary key in the table (e.g., if the user attaches to a preexisting DB). It's easy to figure out whether or not a type is orderable in PostgreSQL. See the [docs page for ORDER BY](https://www.postgresql.org/docs/current/queries-order.html#id-1.5.6.9.5.10) and the [docs page for Operator Classes](https://www.postgresql.org/docs/9.5/indexes-opclass.html).\r\n\r\nBrent is referring to [this comment](https://github.com/centerofci/mathesar/blob/8c215b2efdd740da0d530b624763cf260e12ca90/db/records/operations/sort.py#L47).\r\n\r\nThe error message received upon such a crash is: `could not identify an ordering operator for type json` (`json` is the type of the unorderable column in this case.\r\n\r\n## Solution\r\n\r\nThe solution is, as explained by Brent, to make sure that every column we order by (implicitly or explicitly) is orderable.\r\n\r\n## Severity\r\n\r\nBecause of this, a JSON column (it's non-orderable), can make a query/table view unrenderable, but only when the table/query does not have a primary key, which cannot happen in a Mathesar-created table. It can only happen when Mathesar is used to access a pre-existing database.\n", "before_files": [{"content": "from collections import namedtuple\nfrom sqlalchemy import select\nfrom db.columns import utils as col_utils\nfrom db.records.exceptions import BadSortFormat, SortFieldNotFound\n\n\ndef make_order_by_deterministic(relation, order_by=None):\n \"\"\"\n Makes an order_by deterministic (totally ordering).\n\n Given a relation, and a `order_by` spec, that defines the ordering to be applied to the\n relation, returns a new order_by that is the totally ordered (deterministic) version of the\n input order_by.\n\n Appending primary key sort guarantees determinism, but if that fails, we revert to ordering by\n all columns.\n \"\"\"\n if order_by is None:\n order_by = []\n order_by = _append_primary_key_sort(relation, order_by)\n if not order_by:\n order_by = _build_order_by_all_columns_clause(relation)\n return order_by\n\n\ndef _append_primary_key_sort(relation, order_by):\n \"\"\"\n Given an order by clause, we can guarantee a deterministic ordering\n overall by appending a final ordering by primary key if one exists.\n \"\"\"\n pk_cols = col_utils.get_primary_key_column_collection_from_relation(relation)\n order_by = list(order_by)\n if pk_cols is not None:\n order_by += [\n {'field': col, 'direction': 'asc'}\n for col\n in set(pk_cols).intersection(relation.columns)\n ]\n return order_by\n\n\ndef _build_order_by_all_columns_clause(relation):\n \"\"\"\n To be used when we have failed to find any other ordering criteria,\n since ordering by all columns is inherently inefficient.\n\n Note the filtering out of internal columns. Before applying this fix, psycopg was throwing an error\n like \"could not identify an ordering operator for type json\", because we were trying to\n sort by an internal column like `__mathesar_group_metadata`, which has type `json`, which\n requires special handling to be sorted. The problem is bypassed by not attempting to sort on\n internal columns.\n \"\"\"\n return [\n {'field': col, 'direction': 'asc'}\n for col\n in relation.columns\n if not _is_internal_column(col)\n ]\n\n\ndef _is_internal_column(col):\n \"\"\"\n Might not be exhaustive, take care.\n \"\"\"\n return col.name == '__mathesar_group_metadata'\n\n\ndef apply_relation_sorting(relation, sort_spec):\n order_by_list = [\n _get_sorted_column_obj_from_spec(relation, spec) for spec in sort_spec\n ]\n return select(relation).order_by(*order_by_list)\n\n\ndef _get_sorted_column_obj_from_spec(relation, spec):\n try:\n sort_spec = _deserialize_sort_spec(spec)\n except (KeyError, TypeError, AssertionError):\n raise BadSortFormat\n\n try:\n column = col_utils.get_column_obj_from_relation(relation, sort_spec.field)\n except KeyError as e:\n raise SortFieldNotFound(e)\n except AttributeError:\n raise BadSortFormat\n\n try:\n directed_col = _build_directed_column_expr(column, sort_spec)\n except AttributeError:\n raise BadSortFormat\n\n return directed_col\n\n\ndef _deserialize_sort_spec(spec):\n sort_spec = namedtuple(\n '_sort_spec',\n ['field', 'direction', 'nullsfirst', 'nullslast']\n )(\n field=spec['field'],\n direction=spec['direction'],\n nullsfirst=spec.get('nullsfirst', False),\n nullslast=spec.get('nullslast', False)\n )\n # Since it's not valid to have both nullsfirst and nullslast.\n assert not sort_spec.nullsfirst or not sort_spec.nullslast\n return sort_spec\n\n\ndef _build_directed_column_expr(column, sort_spec):\n directed_col = getattr(column, sort_spec.direction)()\n if sort_spec.nullsfirst:\n directed_col = directed_col.nulls_first()\n elif sort_spec.nullslast:\n directed_col = directed_col.nulls_last()\n return directed_col\n", "path": "db/records/operations/sort.py"}]}
2,070
467
gh_patches_debug_25806
rasdani/github-patches
git_diff
huggingface__dataset-viewer-2058
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Better handling of k8s SIGTERM when stopping workers We've observed recently that worker pods can take more than 1h to get killed by kubernetes (see internal [slack thread](https://huggingface.slack.com/archives/C04L6P8KNQ5/p1697641144036899) for example). According to the k8s docs, at one point it sends a TERM and then after a grace period a SIGKILL on all the running processes. If this keeps happening we need to improve the way we handle those to kill the pods faster. One idea is to catch SIGTERM in the worker executor (main python process of workers) that would stop the worker loop (its subprocess where the actual code runs). </issue> <code> [start of services/worker/src/worker/executor.py] 1 # SPDX-License-Identifier: Apache-2.0 2 # Copyright 2022 The HuggingFace Authors. 3 import asyncio 4 import logging 5 import os 6 import sys 7 from collections.abc import Callable 8 from datetime import datetime, timedelta 9 from random import random 10 from typing import Any, Optional, Union 11 12 import orjson 13 from filelock import FileLock 14 from libcommon.processing_graph import ProcessingGraph 15 from libcommon.queue import Queue 16 from libcommon.utils import get_datetime 17 from mirakuru import OutputExecutor, ProcessExitedWithError 18 19 from worker import start_worker_loop 20 from worker.config import AppConfig 21 from worker.job_manager import JobManager 22 from worker.job_runner_factory import JobRunnerFactory 23 from worker.loop import WorkerState 24 25 START_WORKER_LOOP_PATH = start_worker_loop.__file__ 26 27 28 async def every( 29 func: Callable[..., Optional[Any]], 30 *args: Any, 31 seconds: Union[float, tuple[float, float]], 32 stop_on: Optional[Any] = None, 33 **kwargs: Any, 34 ) -> None: 35 while True: 36 out = func(*args, **kwargs) 37 if stop_on is not None and out == stop_on: 38 break 39 delay = ( 40 seconds[0] + (seconds[1] - seconds[0]) * random() if isinstance(seconds, tuple) else seconds # nosec B311 41 ) 42 await asyncio.sleep(delay) 43 44 45 class BadWorkerState(RuntimeError): 46 """Raised when the worker state from the worker read by the executor is not valid.""" 47 48 pass 49 50 51 class WorkerExecutor: 52 def __init__(self, app_config: AppConfig, job_runner_factory: JobRunnerFactory, state_file_path: str) -> None: 53 self.app_config = app_config 54 self.job_runner_factory = job_runner_factory 55 self.state_file_path = state_file_path 56 self.processing_graph = ProcessingGraph(self.app_config.processing_graph) 57 58 max_missing_heartbeats = self.app_config.worker.max_missing_heartbeats 59 heartbeat_interval_seconds = self.app_config.worker.heartbeat_interval_seconds 60 self.max_seconds_without_heartbeat_for_zombies = heartbeat_interval_seconds * max_missing_heartbeats 61 62 self.heartbeat_interval_seconds = self.app_config.worker.heartbeat_interval_seconds 63 self.max_job_duration_seconds = self.app_config.worker.max_job_duration_seconds 64 self.kill_zombies_interval_seconds = self.app_config.worker.kill_zombies_interval_seconds 65 self.kill_long_job_interval_seconds = self.app_config.worker.kill_long_job_interval_seconds 66 67 def _create_worker_loop_executor(self) -> OutputExecutor: 68 banner = self.state_file_path 69 start_worker_loop_command = [ 70 sys.executable, 71 START_WORKER_LOOP_PATH, 72 "--print-worker-state-path", 73 ] 74 return OutputExecutor(start_worker_loop_command, banner, timeout=10) 75 76 def start(self) -> None: 77 exceptions = [] 78 worker_loop_executor = self._create_worker_loop_executor() 79 worker_loop_executor.start() # blocking until the banner is printed 80 81 def custom_exception_handler(loop: asyncio.AbstractEventLoop, context: dict[str, Any]) -> None: 82 nonlocal exceptions 83 # first, handle with default handler 84 loop.default_exception_handler(context) 85 86 exception = context.get("exception") 87 if exception: 88 exceptions.append(repr(exception)) 89 loop.stop() 90 91 loop = asyncio.get_event_loop() 92 loop.set_exception_handler(custom_exception_handler) 93 logging.info("Starting heartbeat.") 94 loop.create_task(every(self.heartbeat, seconds=self.heartbeat_interval_seconds)) 95 loop.create_task( 96 every( 97 self.kill_zombies, 98 seconds=( 99 self.kill_zombies_interval_seconds * 0.5, 100 self.kill_zombies_interval_seconds * 1.5, 101 ), 102 ) 103 ) 104 loop.create_task( 105 every( 106 self.kill_long_job, 107 worker_loop_executor=worker_loop_executor, 108 seconds=( 109 self.kill_long_job_interval_seconds * 0.5, 110 self.kill_long_job_interval_seconds * 1.5, 111 ), 112 ) 113 ) 114 loop.run_until_complete( 115 every(self.is_worker_alive, worker_loop_executor=worker_loop_executor, seconds=1.0, stop_on=False) 116 ) 117 if exceptions: 118 raise RuntimeError(f"Some async tasks failed: {exceptions}") 119 120 def get_state(self) -> Optional[WorkerState]: 121 worker_state_file_path = self.state_file_path 122 if not os.path.exists(worker_state_file_path): 123 return None 124 with FileLock(f"{worker_state_file_path}.lock"): 125 try: 126 with open(worker_state_file_path, "rb") as worker_state_f: 127 worker_state = orjson.loads(worker_state_f.read()) 128 return WorkerState( 129 current_job_info=worker_state.get("current_job_info"), 130 last_updated=datetime.fromisoformat(worker_state["last_updated"]), 131 ) 132 except (orjson.JSONDecodeError, KeyError) as err: 133 raise BadWorkerState(f"Failed to read worker state at {worker_state_file_path}") from err 134 135 def heartbeat(self) -> None: 136 worker_state = self.get_state() 137 if worker_state and worker_state["current_job_info"]: 138 Queue().heartbeat(job_id=worker_state["current_job_info"]["job_id"]) 139 140 def kill_zombies(self) -> None: 141 queue = Queue() 142 zombies = queue.get_zombies(max_seconds_without_heartbeat=self.max_seconds_without_heartbeat_for_zombies) 143 message = "Job manager crashed while running this job (missing heartbeats)." 144 for zombie in zombies: 145 job_runner = self.job_runner_factory.create_job_runner(zombie) 146 job_manager = JobManager( 147 job_info=zombie, 148 app_config=self.app_config, 149 job_runner=job_runner, 150 processing_graph=self.processing_graph, 151 ) 152 job_manager.set_crashed(message=message) 153 logging.info(f"Killing zombie. Job info = {zombie}") 154 155 def kill_long_job(self, worker_loop_executor: OutputExecutor) -> None: 156 worker_state = self.get_state() 157 if worker_state and worker_state["current_job_info"]: 158 long_job = worker_state["current_job_info"] 159 last_updated = worker_state["last_updated"] 160 coefficient = 10 if long_job["params"]["dataset"] == "cerebras/SlimPajama-627B" else 1 161 if last_updated + timedelta(seconds=coefficient * self.max_job_duration_seconds) <= get_datetime(): 162 _duration_seconds = int((get_datetime() - last_updated).total_seconds()) 163 logging.warning( 164 f"Job {long_job} exceeded maximum duration of" 165 f" {self.max_job_duration_seconds} seconds ({_duration_seconds} seconds)." 166 ) 167 try: 168 worker_loop_executor.stop() # raises an error if the worker returned exit code 1 169 finally: 170 logging.info(f"Killing a long job. Job info = {long_job}") 171 job_runner = self.job_runner_factory.create_job_runner(long_job) 172 job_manager = JobManager( 173 job_info=long_job, 174 app_config=self.app_config, 175 job_runner=job_runner, 176 processing_graph=self.processing_graph, 177 ) 178 message = "Job manager was killed while running this job (job exceeded maximum duration)." 179 job_manager.set_exceeded_maximum_duration(message=message) 180 181 def is_worker_alive(self, worker_loop_executor: OutputExecutor) -> bool: 182 if worker_loop_executor.running(): 183 return True 184 try: 185 worker_loop_executor.stop() # raises an error if the worker returned unexpected exit code 186 except ProcessExitedWithError as err: 187 explanation = f"exit code f{err.exit_code}" 188 if err.exit_code == -9: 189 explanation += " SIGKILL - surely an OOM" 190 error_msg = f"Worker crashed ({explanation})" 191 state = self.get_state() 192 if state and state["current_job_info"]: 193 error_msg += f"when running job_id={state['current_job_info']['job_id']}" 194 logging.error(error_msg) 195 raise 196 return False 197 [end of services/worker/src/worker/executor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/services/worker/src/worker/executor.py b/services/worker/src/worker/executor.py --- a/services/worker/src/worker/executor.py +++ b/services/worker/src/worker/executor.py @@ -3,6 +3,7 @@ import asyncio import logging import os +import signal import sys from collections.abc import Callable from datetime import datetime, timedelta @@ -89,6 +90,7 @@ loop.stop() loop = asyncio.get_event_loop() + loop.add_signal_handler(signal.SIGTERM, self.stop, worker_loop_executor) loop.set_exception_handler(custom_exception_handler) logging.info("Starting heartbeat.") loop.create_task(every(self.heartbeat, seconds=self.heartbeat_interval_seconds)) @@ -117,6 +119,9 @@ if exceptions: raise RuntimeError(f"Some async tasks failed: {exceptions}") + def stop(self, worker_loop_executor: OutputExecutor) -> None: + worker_loop_executor.stop() + def get_state(self) -> Optional[WorkerState]: worker_state_file_path = self.state_file_path if not os.path.exists(worker_state_file_path):
{"golden_diff": "diff --git a/services/worker/src/worker/executor.py b/services/worker/src/worker/executor.py\n--- a/services/worker/src/worker/executor.py\n+++ b/services/worker/src/worker/executor.py\n@@ -3,6 +3,7 @@\n import asyncio\n import logging\n import os\n+import signal\n import sys\n from collections.abc import Callable\n from datetime import datetime, timedelta\n@@ -89,6 +90,7 @@\n loop.stop()\n \n loop = asyncio.get_event_loop()\n+ loop.add_signal_handler(signal.SIGTERM, self.stop, worker_loop_executor)\n loop.set_exception_handler(custom_exception_handler)\n logging.info(\"Starting heartbeat.\")\n loop.create_task(every(self.heartbeat, seconds=self.heartbeat_interval_seconds))\n@@ -117,6 +119,9 @@\n if exceptions:\n raise RuntimeError(f\"Some async tasks failed: {exceptions}\")\n \n+ def stop(self, worker_loop_executor: OutputExecutor) -> None:\n+ worker_loop_executor.stop()\n+\n def get_state(self) -> Optional[WorkerState]:\n worker_state_file_path = self.state_file_path\n if not os.path.exists(worker_state_file_path):\n", "issue": "Better handling of k8s SIGTERM when stopping workers\nWe've observed recently that worker pods can take more than 1h to get killed by kubernetes (see internal [slack thread](https://huggingface.slack.com/archives/C04L6P8KNQ5/p1697641144036899) for example).\r\nAccording to the k8s docs, at one point it sends a TERM and then after a grace period a SIGKILL on all the running processes.\r\nIf this keeps happening we need to improve the way we handle those to kill the pods faster.\r\n\r\nOne idea is to catch SIGTERM in the worker executor (main python process of workers) that would stop the worker loop (its subprocess where the actual code runs).\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\nimport asyncio\nimport logging\nimport os\nimport sys\nfrom collections.abc import Callable\nfrom datetime import datetime, timedelta\nfrom random import random\nfrom typing import Any, Optional, Union\n\nimport orjson\nfrom filelock import FileLock\nfrom libcommon.processing_graph import ProcessingGraph\nfrom libcommon.queue import Queue\nfrom libcommon.utils import get_datetime\nfrom mirakuru import OutputExecutor, ProcessExitedWithError\n\nfrom worker import start_worker_loop\nfrom worker.config import AppConfig\nfrom worker.job_manager import JobManager\nfrom worker.job_runner_factory import JobRunnerFactory\nfrom worker.loop import WorkerState\n\nSTART_WORKER_LOOP_PATH = start_worker_loop.__file__\n\n\nasync def every(\n func: Callable[..., Optional[Any]],\n *args: Any,\n seconds: Union[float, tuple[float, float]],\n stop_on: Optional[Any] = None,\n **kwargs: Any,\n) -> None:\n while True:\n out = func(*args, **kwargs)\n if stop_on is not None and out == stop_on:\n break\n delay = (\n seconds[0] + (seconds[1] - seconds[0]) * random() if isinstance(seconds, tuple) else seconds # nosec B311\n )\n await asyncio.sleep(delay)\n\n\nclass BadWorkerState(RuntimeError):\n \"\"\"Raised when the worker state from the worker read by the executor is not valid.\"\"\"\n\n pass\n\n\nclass WorkerExecutor:\n def __init__(self, app_config: AppConfig, job_runner_factory: JobRunnerFactory, state_file_path: str) -> None:\n self.app_config = app_config\n self.job_runner_factory = job_runner_factory\n self.state_file_path = state_file_path\n self.processing_graph = ProcessingGraph(self.app_config.processing_graph)\n\n max_missing_heartbeats = self.app_config.worker.max_missing_heartbeats\n heartbeat_interval_seconds = self.app_config.worker.heartbeat_interval_seconds\n self.max_seconds_without_heartbeat_for_zombies = heartbeat_interval_seconds * max_missing_heartbeats\n\n self.heartbeat_interval_seconds = self.app_config.worker.heartbeat_interval_seconds\n self.max_job_duration_seconds = self.app_config.worker.max_job_duration_seconds\n self.kill_zombies_interval_seconds = self.app_config.worker.kill_zombies_interval_seconds\n self.kill_long_job_interval_seconds = self.app_config.worker.kill_long_job_interval_seconds\n\n def _create_worker_loop_executor(self) -> OutputExecutor:\n banner = self.state_file_path\n start_worker_loop_command = [\n sys.executable,\n START_WORKER_LOOP_PATH,\n \"--print-worker-state-path\",\n ]\n return OutputExecutor(start_worker_loop_command, banner, timeout=10)\n\n def start(self) -> None:\n exceptions = []\n worker_loop_executor = self._create_worker_loop_executor()\n worker_loop_executor.start() # blocking until the banner is printed\n\n def custom_exception_handler(loop: asyncio.AbstractEventLoop, context: dict[str, Any]) -> None:\n nonlocal exceptions\n # first, handle with default handler\n loop.default_exception_handler(context)\n\n exception = context.get(\"exception\")\n if exception:\n exceptions.append(repr(exception))\n loop.stop()\n\n loop = asyncio.get_event_loop()\n loop.set_exception_handler(custom_exception_handler)\n logging.info(\"Starting heartbeat.\")\n loop.create_task(every(self.heartbeat, seconds=self.heartbeat_interval_seconds))\n loop.create_task(\n every(\n self.kill_zombies,\n seconds=(\n self.kill_zombies_interval_seconds * 0.5,\n self.kill_zombies_interval_seconds * 1.5,\n ),\n )\n )\n loop.create_task(\n every(\n self.kill_long_job,\n worker_loop_executor=worker_loop_executor,\n seconds=(\n self.kill_long_job_interval_seconds * 0.5,\n self.kill_long_job_interval_seconds * 1.5,\n ),\n )\n )\n loop.run_until_complete(\n every(self.is_worker_alive, worker_loop_executor=worker_loop_executor, seconds=1.0, stop_on=False)\n )\n if exceptions:\n raise RuntimeError(f\"Some async tasks failed: {exceptions}\")\n\n def get_state(self) -> Optional[WorkerState]:\n worker_state_file_path = self.state_file_path\n if not os.path.exists(worker_state_file_path):\n return None\n with FileLock(f\"{worker_state_file_path}.lock\"):\n try:\n with open(worker_state_file_path, \"rb\") as worker_state_f:\n worker_state = orjson.loads(worker_state_f.read())\n return WorkerState(\n current_job_info=worker_state.get(\"current_job_info\"),\n last_updated=datetime.fromisoformat(worker_state[\"last_updated\"]),\n )\n except (orjson.JSONDecodeError, KeyError) as err:\n raise BadWorkerState(f\"Failed to read worker state at {worker_state_file_path}\") from err\n\n def heartbeat(self) -> None:\n worker_state = self.get_state()\n if worker_state and worker_state[\"current_job_info\"]:\n Queue().heartbeat(job_id=worker_state[\"current_job_info\"][\"job_id\"])\n\n def kill_zombies(self) -> None:\n queue = Queue()\n zombies = queue.get_zombies(max_seconds_without_heartbeat=self.max_seconds_without_heartbeat_for_zombies)\n message = \"Job manager crashed while running this job (missing heartbeats).\"\n for zombie in zombies:\n job_runner = self.job_runner_factory.create_job_runner(zombie)\n job_manager = JobManager(\n job_info=zombie,\n app_config=self.app_config,\n job_runner=job_runner,\n processing_graph=self.processing_graph,\n )\n job_manager.set_crashed(message=message)\n logging.info(f\"Killing zombie. Job info = {zombie}\")\n\n def kill_long_job(self, worker_loop_executor: OutputExecutor) -> None:\n worker_state = self.get_state()\n if worker_state and worker_state[\"current_job_info\"]:\n long_job = worker_state[\"current_job_info\"]\n last_updated = worker_state[\"last_updated\"]\n coefficient = 10 if long_job[\"params\"][\"dataset\"] == \"cerebras/SlimPajama-627B\" else 1\n if last_updated + timedelta(seconds=coefficient * self.max_job_duration_seconds) <= get_datetime():\n _duration_seconds = int((get_datetime() - last_updated).total_seconds())\n logging.warning(\n f\"Job {long_job} exceeded maximum duration of\"\n f\" {self.max_job_duration_seconds} seconds ({_duration_seconds} seconds).\"\n )\n try:\n worker_loop_executor.stop() # raises an error if the worker returned exit code 1\n finally:\n logging.info(f\"Killing a long job. Job info = {long_job}\")\n job_runner = self.job_runner_factory.create_job_runner(long_job)\n job_manager = JobManager(\n job_info=long_job,\n app_config=self.app_config,\n job_runner=job_runner,\n processing_graph=self.processing_graph,\n )\n message = \"Job manager was killed while running this job (job exceeded maximum duration).\"\n job_manager.set_exceeded_maximum_duration(message=message)\n\n def is_worker_alive(self, worker_loop_executor: OutputExecutor) -> bool:\n if worker_loop_executor.running():\n return True\n try:\n worker_loop_executor.stop() # raises an error if the worker returned unexpected exit code\n except ProcessExitedWithError as err:\n explanation = f\"exit code f{err.exit_code}\"\n if err.exit_code == -9:\n explanation += \" SIGKILL - surely an OOM\"\n error_msg = f\"Worker crashed ({explanation})\"\n state = self.get_state()\n if state and state[\"current_job_info\"]:\n error_msg += f\"when running job_id={state['current_job_info']['job_id']}\"\n logging.error(error_msg)\n raise\n return False\n", "path": "services/worker/src/worker/executor.py"}]}
2,877
252
gh_patches_debug_3357
rasdani/github-patches
git_diff
nipy__nipype-1574
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Build documentation fails if dipy is not installed </issue> <code> [start of nipype/interfaces/dipy/base.py] 1 # -*- coding: utf-8 -*- 2 """ Base interfaces for dipy """ 3 import os.path as op 4 import numpy as np 5 from nipype.interfaces.base import (traits, File, isdefined, 6 BaseInterface, BaseInterfaceInputSpec) 7 from ... import logging 8 9 IFLOGGER = logging.getLogger('interface') 10 11 HAVE_DIPY = True 12 try: 13 import dipy 14 except ImportError: 15 HAVE_DIPY = False 16 17 18 def no_dipy(): 19 """ Check if dipy is available """ 20 global HAVE_DIPY 21 return not HAVE_DIPY 22 23 24 def dipy_version(): 25 """ Check dipy version """ 26 if no_dipy(): 27 return None 28 29 return dipy.__version__ 30 31 32 class DipyBaseInterface(BaseInterface): 33 34 """ 35 A base interface for py:mod:`dipy` computations 36 """ 37 def __init__(self, **inputs): 38 if no_dipy(): 39 IFLOGGER.error('dipy was not found') 40 # raise ImportError('dipy was not found') 41 super(DipyBaseInterface, self).__init__(**inputs) 42 43 44 class DipyBaseInterfaceInputSpec(BaseInterfaceInputSpec): 45 in_file = File(exists=True, mandatory=True, desc=('input diffusion data')) 46 in_bval = File(exists=True, mandatory=True, desc=('input b-values table')) 47 in_bvec = File(exists=True, mandatory=True, desc=('input b-vectors table')) 48 b0_thres = traits.Int(700, usedefault=True, desc=('b0 threshold')) 49 out_prefix = traits.Str(desc=('output prefix for file names')) 50 51 52 class DipyDiffusionInterface(DipyBaseInterface): 53 54 """ 55 A base interface for py:mod:`dipy` computations 56 """ 57 input_spec = DipyBaseInterfaceInputSpec 58 59 def _get_gradient_table(self): 60 bval = np.loadtxt(self.inputs.in_bval) 61 bvec = np.loadtxt(self.inputs.in_bvec).T 62 from dipy.core.gradients import gradient_table 63 gtab = gradient_table(bval, bvec) 64 65 gtab.b0_threshold = self.inputs.b0_thres 66 return gtab 67 68 def _gen_filename(self, name, ext=None): 69 fname, fext = op.splitext(op.basename(self.inputs.in_file)) 70 if fext == '.gz': 71 fname, fext2 = op.splitext(fname) 72 fext = fext2 + fext 73 74 if not isdefined(self.inputs.out_prefix): 75 out_prefix = op.abspath(fname) 76 else: 77 out_prefix = self.inputs.out_prefix 78 79 if ext is None: 80 ext = fext 81 82 return out_prefix + '_' + name + ext 83 [end of nipype/interfaces/dipy/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py --- a/nipype/interfaces/dipy/base.py +++ b/nipype/interfaces/dipy/base.py @@ -36,7 +36,7 @@ """ def __init__(self, **inputs): if no_dipy(): - IFLOGGER.error('dipy was not found') + IFLOGGER.warn('dipy was not found') # raise ImportError('dipy was not found') super(DipyBaseInterface, self).__init__(**inputs)
{"golden_diff": "diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py\n--- a/nipype/interfaces/dipy/base.py\n+++ b/nipype/interfaces/dipy/base.py\n@@ -36,7 +36,7 @@\n \"\"\"\n def __init__(self, **inputs):\n if no_dipy():\n- IFLOGGER.error('dipy was not found')\n+ IFLOGGER.warn('dipy was not found')\n # raise ImportError('dipy was not found')\n super(DipyBaseInterface, self).__init__(**inputs)\n", "issue": "[BUG] Build documentation fails if dipy is not installed\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\" Base interfaces for dipy \"\"\"\nimport os.path as op\nimport numpy as np\nfrom nipype.interfaces.base import (traits, File, isdefined,\n BaseInterface, BaseInterfaceInputSpec)\nfrom ... import logging\n\nIFLOGGER = logging.getLogger('interface')\n\nHAVE_DIPY = True\ntry:\n import dipy\nexcept ImportError:\n HAVE_DIPY = False\n\n\ndef no_dipy():\n \"\"\" Check if dipy is available \"\"\"\n global HAVE_DIPY\n return not HAVE_DIPY\n\n\ndef dipy_version():\n \"\"\" Check dipy version \"\"\"\n if no_dipy():\n return None\n\n return dipy.__version__\n\n\nclass DipyBaseInterface(BaseInterface):\n\n \"\"\"\n A base interface for py:mod:`dipy` computations\n \"\"\"\n def __init__(self, **inputs):\n if no_dipy():\n IFLOGGER.error('dipy was not found')\n # raise ImportError('dipy was not found')\n super(DipyBaseInterface, self).__init__(**inputs)\n\n\nclass DipyBaseInterfaceInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc=('input diffusion data'))\n in_bval = File(exists=True, mandatory=True, desc=('input b-values table'))\n in_bvec = File(exists=True, mandatory=True, desc=('input b-vectors table'))\n b0_thres = traits.Int(700, usedefault=True, desc=('b0 threshold'))\n out_prefix = traits.Str(desc=('output prefix for file names'))\n\n\nclass DipyDiffusionInterface(DipyBaseInterface):\n\n \"\"\"\n A base interface for py:mod:`dipy` computations\n \"\"\"\n input_spec = DipyBaseInterfaceInputSpec\n\n def _get_gradient_table(self):\n bval = np.loadtxt(self.inputs.in_bval)\n bvec = np.loadtxt(self.inputs.in_bvec).T\n from dipy.core.gradients import gradient_table\n gtab = gradient_table(bval, bvec)\n\n gtab.b0_threshold = self.inputs.b0_thres\n return gtab\n\n def _gen_filename(self, name, ext=None):\n fname, fext = op.splitext(op.basename(self.inputs.in_file))\n if fext == '.gz':\n fname, fext2 = op.splitext(fname)\n fext = fext2 + fext\n\n if not isdefined(self.inputs.out_prefix):\n out_prefix = op.abspath(fname)\n else:\n out_prefix = self.inputs.out_prefix\n\n if ext is None:\n ext = fext\n\n return out_prefix + '_' + name + ext\n", "path": "nipype/interfaces/dipy/base.py"}]}
1,283
123
gh_patches_debug_60523
rasdani/github-patches
git_diff
streamlit__streamlit-2811
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Sliders should show current value [regression] [Baseweb] # Summary (via @tvst: ) Our sliders right now require you to hover in order to see the selected value. This makes it really hard to understand what the user selected. I reported this before, but I just spent some time debugging my app thinking it was broken because I was reading the wrong slider value. Frustrating. I understand this is the new behavior of sliders in Base Web, but we have alternatives: **1. Roll back Base Web to a previous version** This is the preferable solution in order to get this fix out ASAP. Even if we decide it's only a temporary solution. 2. Try to find a solution using the latest Base Web 3. Copy/paste the old Baseweb slider into our own repo and modify it there. Their slider is based on another library, btw (I forget which), so maybe we should just use that library directly instead? ## Is this a regression? yes # Debug info - Streamlit version: 0.75-special Allow hiding tracebacks Currently, when a Streamlit app throws an exception, we print the traceback to the browser. This isn't necessarily the right thing to do for all apps; we should allow this to be configurable. Maybe a `[client] showTracebacks = false` option? And presumably, if tracebacks are disabled, we should filter them at the server level, so that the client never even receives the string, in case the user is worried about leaking internal app details. (Related discussion here: https://discuss.streamlit.io/t/dont-show-users-tracebacks/1746) </issue> <code> [start of e2e/scripts/st_columns.py] 1 # Copyright 2018-2021 Streamlit Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import streamlit as st 16 17 CAT_IMAGE = "https://images.unsplash.com/photo-1552933529-e359b2477252?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=950&q=80" 18 19 if st.button("Layout should not shift when this is pressed"): 20 st.write("Pressed!") 21 22 # Same-width columns 23 c1, c2, c3 = st.beta_columns(3) 24 c1.image(CAT_IMAGE, use_column_width=True) 25 c2.image(CAT_IMAGE, use_column_width=True) 26 c3.image(CAT_IMAGE, use_column_width=True) 27 28 29 # Variable-width columns 30 for c in st.beta_columns((1, 2, 4, 8)): 31 c.image(CAT_IMAGE, use_column_width=True) 32 [end of e2e/scripts/st_columns.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/e2e/scripts/st_columns.py b/e2e/scripts/st_columns.py --- a/e2e/scripts/st_columns.py +++ b/e2e/scripts/st_columns.py @@ -27,5 +27,5 @@ # Variable-width columns -for c in st.beta_columns((1, 2, 4, 8)): +for c in st.beta_columns((1, 2, 3, 4)): c.image(CAT_IMAGE, use_column_width=True)
{"golden_diff": "diff --git a/e2e/scripts/st_columns.py b/e2e/scripts/st_columns.py\n--- a/e2e/scripts/st_columns.py\n+++ b/e2e/scripts/st_columns.py\n@@ -27,5 +27,5 @@\n \n \n # Variable-width columns\n-for c in st.beta_columns((1, 2, 4, 8)):\n+for c in st.beta_columns((1, 2, 3, 4)):\n c.image(CAT_IMAGE, use_column_width=True)\n", "issue": "Sliders should show current value [regression] [Baseweb]\n# Summary\r\n\r\n(via @tvst: )\r\n\r\nOur sliders right now require you to hover in order to see the selected value. This makes it really hard to understand what the user selected. I reported this before, but I just spent some time debugging my app thinking it was broken because I was reading the wrong slider value. Frustrating.\r\n\r\nI understand this is the new behavior of sliders in Base Web, but we have alternatives:\r\n\r\n**1. Roll back Base Web to a previous version**\r\n\r\n This is the preferable solution in order to get this fix out ASAP. Even if we decide it's only a temporary solution.\r\n\r\n2. Try to find a solution using the latest Base Web\r\n\r\n3. Copy/paste the old Baseweb slider into our own repo and modify it there. Their slider is based on another library, btw (I forget which), so maybe we should just use that library directly instead?\r\n\r\n\r\n## Is this a regression?\r\n\r\nyes \r\n\r\n# Debug info\r\n\r\n- Streamlit version: 0.75-special\nAllow hiding tracebacks\nCurrently, when a Streamlit app throws an exception, we print the traceback to the browser. This isn't necessarily the right thing to do for all apps; we should allow this to be configurable.\r\n\r\nMaybe a `[client] showTracebacks = false` option? And presumably, if tracebacks are disabled, we should filter them at the server level, so that the client never even receives the string, in case the user is worried about leaking internal app details.\r\n\r\n(Related discussion here: https://discuss.streamlit.io/t/dont-show-users-tracebacks/1746)\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nCAT_IMAGE = \"https://images.unsplash.com/photo-1552933529-e359b2477252?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=950&q=80\"\n\nif st.button(\"Layout should not shift when this is pressed\"):\n st.write(\"Pressed!\")\n\n# Same-width columns\nc1, c2, c3 = st.beta_columns(3)\nc1.image(CAT_IMAGE, use_column_width=True)\nc2.image(CAT_IMAGE, use_column_width=True)\nc3.image(CAT_IMAGE, use_column_width=True)\n\n\n# Variable-width columns\nfor c in st.beta_columns((1, 2, 4, 8)):\n c.image(CAT_IMAGE, use_column_width=True)\n", "path": "e2e/scripts/st_columns.py"}]}
1,283
107
gh_patches_debug_18992
rasdani/github-patches
git_diff
pytorch__vision-6397
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Inconsistency between RoIPool's forward and roi_pool ### 🚀 The feature Currently the functional `roi_pool` method supports both `Tensor` and `List[Tensor]` for boxes, but the module forward function of `RoIPool` expects boxes(rois) to be `Tensor` only. Can we change the function signature to support both? The inconsistency can cause issue during torchscripting, for example: ``` from typing import List import torch from torch import Tensor from torchvision.ops import RoIPool class Model(torch.nn.Module): def __init__(self): super().__init__() self.pool = RoIPool(output_size=[3,3], spatial_scale=1.0) def forward(self, imgs, boxes: List[Tensor]): return self.pool(imgs, boxes) model = Model() torch.jit.script(model) ``` Raise the following error: > forward(__torch__.torchvision.ops.roi_pool.RoIPool self, Tensor input, Tensor rois) -> Tensor: > Expected a value of type 'Tensor' for argument 'rois' but instead found type 'List[Tensor]'. > Empty lists default to List[Tensor]. Add a variable annotation to the assignment to create an empty list of another type (torch.jit.annotate(List[T, []]) where T is the type of elements in the list for Python 2) > : > File "<ipython-input-8-9536dd2f0cc1>", line 12 > def forward(self, imgs, boxes: List[Tensor]): > return self.pool(imgs, boxes) > ~~~~~~~~~ <--- HERE ### Motivation, pitch Make sure the model can be scriptable when using list of boxes as the input. ### Alternatives Only support Tensor as the input, user are required to convert bbox of different images into one tensor. Potentially make `_utils.convert_boxes_to_roi_format` public so that users don't need to write the conversion function. ### Additional context _No response_ </issue> <code> [start of torchvision/ops/roi_align.py] 1 from typing import List, Union 2 3 import torch 4 from torch import nn, Tensor 5 from torch.jit.annotations import BroadcastingList2 6 from torch.nn.modules.utils import _pair 7 from torchvision.extension import _assert_has_ops 8 9 from ..utils import _log_api_usage_once 10 from ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format 11 12 13 def roi_align( 14 input: Tensor, 15 boxes: Union[Tensor, List[Tensor]], 16 output_size: BroadcastingList2[int], 17 spatial_scale: float = 1.0, 18 sampling_ratio: int = -1, 19 aligned: bool = False, 20 ) -> Tensor: 21 """ 22 Performs Region of Interest (RoI) Align operator with average pooling, as described in Mask R-CNN. 23 24 Args: 25 input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element 26 contains ``C`` feature maps of dimensions ``H x W``. 27 If the tensor is quantized, we expect a batch size of ``N == 1``. 28 boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) 29 format where the regions will be taken from. 30 The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``. 31 If a single Tensor is passed, then the first column should 32 contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``. 33 If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i 34 in the batch. 35 output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling 36 is performed, as (height, width). 37 spatial_scale (float): a scaling factor that maps the box coordinates to 38 the input coordinates. For example, if your boxes are defined on the scale 39 of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of 40 the original image), you'll want to set this to 0.5. Default: 1.0 41 sampling_ratio (int): number of sampling points in the interpolation grid 42 used to compute the output value of each pooled output bin. If > 0, 43 then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If 44 <= 0, then an adaptive number of grid points are used (computed as 45 ``ceil(roi_width / output_width)``, and likewise for height). Default: -1 46 aligned (bool): If False, use the legacy implementation. 47 If True, pixel shift the box coordinates it by -0.5 for a better alignment with the two 48 neighboring pixel indices. This version is used in Detectron2 49 50 Returns: 51 Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs. 52 """ 53 if not torch.jit.is_scripting() and not torch.jit.is_tracing(): 54 _log_api_usage_once(roi_align) 55 _assert_has_ops() 56 check_roi_boxes_shape(boxes) 57 rois = boxes 58 output_size = _pair(output_size) 59 if not isinstance(rois, torch.Tensor): 60 rois = convert_boxes_to_roi_format(rois) 61 return torch.ops.torchvision.roi_align( 62 input, rois, spatial_scale, output_size[0], output_size[1], sampling_ratio, aligned 63 ) 64 65 66 class RoIAlign(nn.Module): 67 """ 68 See :func:`roi_align`. 69 """ 70 71 def __init__( 72 self, 73 output_size: BroadcastingList2[int], 74 spatial_scale: float, 75 sampling_ratio: int, 76 aligned: bool = False, 77 ): 78 super().__init__() 79 _log_api_usage_once(self) 80 self.output_size = output_size 81 self.spatial_scale = spatial_scale 82 self.sampling_ratio = sampling_ratio 83 self.aligned = aligned 84 85 def forward(self, input: Tensor, rois: Tensor) -> Tensor: 86 return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned) 87 88 def __repr__(self) -> str: 89 s = ( 90 f"{self.__class__.__name__}(" 91 f"output_size={self.output_size}" 92 f", spatial_scale={self.spatial_scale}" 93 f", sampling_ratio={self.sampling_ratio}" 94 f", aligned={self.aligned}" 95 f")" 96 ) 97 return s 98 [end of torchvision/ops/roi_align.py] [start of torchvision/ops/roi_pool.py] 1 from typing import List, Union 2 3 import torch 4 from torch import nn, Tensor 5 from torch.jit.annotations import BroadcastingList2 6 from torch.nn.modules.utils import _pair 7 from torchvision.extension import _assert_has_ops 8 9 from ..utils import _log_api_usage_once 10 from ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format 11 12 13 def roi_pool( 14 input: Tensor, 15 boxes: Union[Tensor, List[Tensor]], 16 output_size: BroadcastingList2[int], 17 spatial_scale: float = 1.0, 18 ) -> Tensor: 19 """ 20 Performs Region of Interest (RoI) Pool operator described in Fast R-CNN 21 22 Args: 23 input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element 24 contains ``C`` feature maps of dimensions ``H x W``. 25 boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) 26 format where the regions will be taken from. 27 The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``. 28 If a single Tensor is passed, then the first column should 29 contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``. 30 If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i 31 in the batch. 32 output_size (int or Tuple[int, int]): the size of the output after the cropping 33 is performed, as (height, width) 34 spatial_scale (float): a scaling factor that maps the box coordinates to 35 the input coordinates. For example, if your boxes are defined on the scale 36 of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of 37 the original image), you'll want to set this to 0.5. Default: 1.0 38 39 Returns: 40 Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs. 41 """ 42 if not torch.jit.is_scripting() and not torch.jit.is_tracing(): 43 _log_api_usage_once(roi_pool) 44 _assert_has_ops() 45 check_roi_boxes_shape(boxes) 46 rois = boxes 47 output_size = _pair(output_size) 48 if not isinstance(rois, torch.Tensor): 49 rois = convert_boxes_to_roi_format(rois) 50 output, _ = torch.ops.torchvision.roi_pool(input, rois, spatial_scale, output_size[0], output_size[1]) 51 return output 52 53 54 class RoIPool(nn.Module): 55 """ 56 See :func:`roi_pool`. 57 """ 58 59 def __init__(self, output_size: BroadcastingList2[int], spatial_scale: float): 60 super().__init__() 61 _log_api_usage_once(self) 62 self.output_size = output_size 63 self.spatial_scale = spatial_scale 64 65 def forward(self, input: Tensor, rois: Tensor) -> Tensor: 66 return roi_pool(input, rois, self.output_size, self.spatial_scale) 67 68 def __repr__(self) -> str: 69 s = f"{self.__class__.__name__}(output_size={self.output_size}, spatial_scale={self.spatial_scale})" 70 return s 71 [end of torchvision/ops/roi_pool.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchvision/ops/roi_align.py b/torchvision/ops/roi_align.py --- a/torchvision/ops/roi_align.py +++ b/torchvision/ops/roi_align.py @@ -82,7 +82,7 @@ self.sampling_ratio = sampling_ratio self.aligned = aligned - def forward(self, input: Tensor, rois: Tensor) -> Tensor: + def forward(self, input: Tensor, rois: Union[Tensor, List[Tensor]]) -> Tensor: return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned) def __repr__(self) -> str: diff --git a/torchvision/ops/roi_pool.py b/torchvision/ops/roi_pool.py --- a/torchvision/ops/roi_pool.py +++ b/torchvision/ops/roi_pool.py @@ -62,7 +62,7 @@ self.output_size = output_size self.spatial_scale = spatial_scale - def forward(self, input: Tensor, rois: Tensor) -> Tensor: + def forward(self, input: Tensor, rois: Union[Tensor, List[Tensor]]) -> Tensor: return roi_pool(input, rois, self.output_size, self.spatial_scale) def __repr__(self) -> str:
{"golden_diff": "diff --git a/torchvision/ops/roi_align.py b/torchvision/ops/roi_align.py\n--- a/torchvision/ops/roi_align.py\n+++ b/torchvision/ops/roi_align.py\n@@ -82,7 +82,7 @@\n self.sampling_ratio = sampling_ratio\n self.aligned = aligned\n \n- def forward(self, input: Tensor, rois: Tensor) -> Tensor:\n+ def forward(self, input: Tensor, rois: Union[Tensor, List[Tensor]]) -> Tensor:\n return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned)\n \n def __repr__(self) -> str:\ndiff --git a/torchvision/ops/roi_pool.py b/torchvision/ops/roi_pool.py\n--- a/torchvision/ops/roi_pool.py\n+++ b/torchvision/ops/roi_pool.py\n@@ -62,7 +62,7 @@\n self.output_size = output_size\n self.spatial_scale = spatial_scale\n \n- def forward(self, input: Tensor, rois: Tensor) -> Tensor:\n+ def forward(self, input: Tensor, rois: Union[Tensor, List[Tensor]]) -> Tensor:\n return roi_pool(input, rois, self.output_size, self.spatial_scale)\n \n def __repr__(self) -> str:\n", "issue": "Inconsistency between RoIPool's forward and roi_pool\n### \ud83d\ude80 The feature\n\nCurrently the functional `roi_pool` method supports both `Tensor` and `List[Tensor]` for boxes, but the module forward function of `RoIPool` expects boxes(rois) to be `Tensor` only. Can we change the function signature to support both? \r\n\r\nThe inconsistency can cause issue during torchscripting, for example:\r\n```\r\nfrom typing import List\r\nimport torch\r\nfrom torch import Tensor\r\nfrom torchvision.ops import RoIPool\r\n\r\nclass Model(torch.nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.pool = RoIPool(output_size=[3,3], spatial_scale=1.0)\r\n\r\n def forward(self, imgs, boxes: List[Tensor]):\r\n return self.pool(imgs, boxes)\r\n\r\nmodel = Model()\r\ntorch.jit.script(model)\r\n```\r\nRaise the following error:\r\n\r\n> forward(__torch__.torchvision.ops.roi_pool.RoIPool self, Tensor input, Tensor rois) -> Tensor:\r\n> Expected a value of type 'Tensor' for argument 'rois' but instead found type 'List[Tensor]'.\r\n> Empty lists default to List[Tensor]. Add a variable annotation to the assignment to create an empty list of another type (torch.jit.annotate(List[T, []]) where T is the type of elements in the list for Python 2)\r\n> :\r\n> File \"<ipython-input-8-9536dd2f0cc1>\", line 12\r\n> def forward(self, imgs, boxes: List[Tensor]):\r\n> return self.pool(imgs, boxes)\r\n> ~~~~~~~~~ <--- HERE\n\n### Motivation, pitch\n\nMake sure the model can be scriptable when using list of boxes as the input.\n\n### Alternatives\n\nOnly support Tensor as the input, user are required to convert bbox of different images into one tensor. Potentially make `_utils.convert_boxes_to_roi_format` public so that users don't need to write the conversion function.\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "from typing import List, Union\n\nimport torch\nfrom torch import nn, Tensor\nfrom torch.jit.annotations import BroadcastingList2\nfrom torch.nn.modules.utils import _pair\nfrom torchvision.extension import _assert_has_ops\n\nfrom ..utils import _log_api_usage_once\nfrom ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format\n\n\ndef roi_align(\n input: Tensor,\n boxes: Union[Tensor, List[Tensor]],\n output_size: BroadcastingList2[int],\n spatial_scale: float = 1.0,\n sampling_ratio: int = -1,\n aligned: bool = False,\n) -> Tensor:\n \"\"\"\n Performs Region of Interest (RoI) Align operator with average pooling, as described in Mask R-CNN.\n\n Args:\n input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element\n contains ``C`` feature maps of dimensions ``H x W``.\n If the tensor is quantized, we expect a batch size of ``N == 1``.\n boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)\n format where the regions will be taken from.\n The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.\n If a single Tensor is passed, then the first column should\n contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.\n If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i\n in the batch.\n output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling\n is performed, as (height, width).\n spatial_scale (float): a scaling factor that maps the box coordinates to\n the input coordinates. For example, if your boxes are defined on the scale\n of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of\n the original image), you'll want to set this to 0.5. Default: 1.0\n sampling_ratio (int): number of sampling points in the interpolation grid\n used to compute the output value of each pooled output bin. If > 0,\n then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If\n <= 0, then an adaptive number of grid points are used (computed as\n ``ceil(roi_width / output_width)``, and likewise for height). Default: -1\n aligned (bool): If False, use the legacy implementation.\n If True, pixel shift the box coordinates it by -0.5 for a better alignment with the two\n neighboring pixel indices. This version is used in Detectron2\n\n Returns:\n Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.\n \"\"\"\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(roi_align)\n _assert_has_ops()\n check_roi_boxes_shape(boxes)\n rois = boxes\n output_size = _pair(output_size)\n if not isinstance(rois, torch.Tensor):\n rois = convert_boxes_to_roi_format(rois)\n return torch.ops.torchvision.roi_align(\n input, rois, spatial_scale, output_size[0], output_size[1], sampling_ratio, aligned\n )\n\n\nclass RoIAlign(nn.Module):\n \"\"\"\n See :func:`roi_align`.\n \"\"\"\n\n def __init__(\n self,\n output_size: BroadcastingList2[int],\n spatial_scale: float,\n sampling_ratio: int,\n aligned: bool = False,\n ):\n super().__init__()\n _log_api_usage_once(self)\n self.output_size = output_size\n self.spatial_scale = spatial_scale\n self.sampling_ratio = sampling_ratio\n self.aligned = aligned\n\n def forward(self, input: Tensor, rois: Tensor) -> Tensor:\n return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned)\n\n def __repr__(self) -> str:\n s = (\n f\"{self.__class__.__name__}(\"\n f\"output_size={self.output_size}\"\n f\", spatial_scale={self.spatial_scale}\"\n f\", sampling_ratio={self.sampling_ratio}\"\n f\", aligned={self.aligned}\"\n f\")\"\n )\n return s\n", "path": "torchvision/ops/roi_align.py"}, {"content": "from typing import List, Union\n\nimport torch\nfrom torch import nn, Tensor\nfrom torch.jit.annotations import BroadcastingList2\nfrom torch.nn.modules.utils import _pair\nfrom torchvision.extension import _assert_has_ops\n\nfrom ..utils import _log_api_usage_once\nfrom ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format\n\n\ndef roi_pool(\n input: Tensor,\n boxes: Union[Tensor, List[Tensor]],\n output_size: BroadcastingList2[int],\n spatial_scale: float = 1.0,\n) -> Tensor:\n \"\"\"\n Performs Region of Interest (RoI) Pool operator described in Fast R-CNN\n\n Args:\n input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element\n contains ``C`` feature maps of dimensions ``H x W``.\n boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)\n format where the regions will be taken from.\n The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.\n If a single Tensor is passed, then the first column should\n contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.\n If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i\n in the batch.\n output_size (int or Tuple[int, int]): the size of the output after the cropping\n is performed, as (height, width)\n spatial_scale (float): a scaling factor that maps the box coordinates to\n the input coordinates. For example, if your boxes are defined on the scale\n of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of\n the original image), you'll want to set this to 0.5. Default: 1.0\n\n Returns:\n Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.\n \"\"\"\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(roi_pool)\n _assert_has_ops()\n check_roi_boxes_shape(boxes)\n rois = boxes\n output_size = _pair(output_size)\n if not isinstance(rois, torch.Tensor):\n rois = convert_boxes_to_roi_format(rois)\n output, _ = torch.ops.torchvision.roi_pool(input, rois, spatial_scale, output_size[0], output_size[1])\n return output\n\n\nclass RoIPool(nn.Module):\n \"\"\"\n See :func:`roi_pool`.\n \"\"\"\n\n def __init__(self, output_size: BroadcastingList2[int], spatial_scale: float):\n super().__init__()\n _log_api_usage_once(self)\n self.output_size = output_size\n self.spatial_scale = spatial_scale\n\n def forward(self, input: Tensor, rois: Tensor) -> Tensor:\n return roi_pool(input, rois, self.output_size, self.spatial_scale)\n\n def __repr__(self) -> str:\n s = f\"{self.__class__.__name__}(output_size={self.output_size}, spatial_scale={self.spatial_scale})\"\n return s\n", "path": "torchvision/ops/roi_pool.py"}]}
3,072
301
gh_patches_debug_23561
rasdani/github-patches
git_diff
rasterio__rasterio-2110
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cast nodata value before filling in rio calc At line 192, `res = res.filled(kwargs['nodata'])`, we need to convert that string to a numpy scalar or array of `res` dtype. </issue> <code> [start of rasterio/rio/calc.py] 1 """$ rio calc""" 2 3 from __future__ import division 4 5 from collections import OrderedDict 6 from distutils.version import LooseVersion 7 import math 8 9 import click 10 import snuggs 11 12 import rasterio 13 from rasterio.features import sieve 14 from rasterio.fill import fillnodata 15 from rasterio.windows import Window 16 from rasterio.rio import options 17 from rasterio.rio.helpers import resolve_inout 18 19 20 def _get_bands(inputs, sources, d, i=None): 21 """Get a rasterio.Band object from calc's inputs""" 22 idx = d if d in dict(inputs) else int(d) - 1 23 src = sources[idx] 24 return (rasterio.band(src, i) if i else 25 [rasterio.band(src, j) for j in src.indexes]) 26 27 28 def _read_array(ix, subix=None, dtype=None): 29 """Change the type of a read array""" 30 arr = snuggs._ctx.lookup(ix, subix) 31 if dtype: 32 arr = arr.astype(dtype) 33 return arr 34 35 36 def _chunk_output(width, height, count, itemsize, mem_limit=1): 37 """Divide the calculation output into chunks 38 39 This function determines the chunk size such that an array of shape 40 (chunk_size, chunk_size, count) with itemsize bytes per element 41 requires no more than mem_limit megabytes of memory. 42 43 Output chunks are described by rasterio Windows. 44 45 Parameters 46 ---------- 47 width : int 48 Output width 49 height : int 50 Output height 51 count : int 52 Number of output bands 53 itemsize : int 54 Number of bytes per pixel 55 mem_limit : int, default 56 The maximum size in memory of a chunk array 57 58 Returns 59 ------- 60 sequence of Windows 61 """ 62 max_pixels = mem_limit * 1.0e+6 / itemsize * count 63 chunk_size = int(math.floor(math.sqrt(max_pixels))) 64 ncols = int(math.ceil(width / chunk_size)) 65 nrows = int(math.ceil(height / chunk_size)) 66 chunk_windows = [] 67 68 for col in range(ncols): 69 col_offset = col * chunk_size 70 w = min(chunk_size, width - col_offset) 71 for row in range(nrows): 72 row_offset = row * chunk_size 73 h = min(chunk_size, height - row_offset) 74 chunk_windows.append(((row, col), Window(col_offset, row_offset, w, h))) 75 76 return chunk_windows 77 78 79 @click.command(short_help="Raster data calculator.") 80 @click.argument('command') 81 @options.files_inout_arg 82 @options.output_opt 83 @options.format_opt 84 @click.option('--name', multiple=True, 85 help='Specify an input file with a unique short (alphas only) ' 86 'name for use in commands like ' 87 '"a=tests/data/RGB.byte.tif".') 88 @options.dtype_opt 89 @options.masked_opt 90 @options.overwrite_opt 91 @click.option("--mem-limit", type=int, default=64, help="Limit on memory used to perform calculations, in MB.") 92 @options.creation_options 93 @click.pass_context 94 def calc(ctx, command, files, output, driver, name, dtype, masked, overwrite, mem_limit, creation_options): 95 """A raster data calculator 96 97 Evaluates an expression using input datasets and writes the result 98 to a new dataset. 99 100 Command syntax is lisp-like. An expression consists of an operator 101 or function name and one or more strings, numbers, or expressions 102 enclosed in parentheses. Functions include ``read`` (gets a raster 103 array) and ``asarray`` (makes a 3-D array from 2-D arrays). 104 105 \b 106 * (read i) evaluates to the i-th input dataset (a 3-D array). 107 * (read i j) evaluates to the j-th band of the i-th dataset (a 108 2-D array). 109 * (take foo j) evaluates to the j-th band of a dataset named foo 110 (see help on the --name option above). 111 * Standard numpy array operators (+, -, *, /) are available. 112 * When the final result is a list of arrays, a multiple band 113 output file is written. 114 * When the final result is a single array, a single band output 115 file is written. 116 117 Example: 118 119 \b 120 $ rio calc "(+ 2 (* 0.95 (read 1)))" tests/data/RGB.byte.tif \\ 121 > /tmp/out.tif 122 123 The command above produces a 3-band GeoTIFF with all values scaled 124 by 0.95 and incremented by 2. 125 126 \b 127 $ rio calc "(asarray (+ 125 (read 1)) (read 1) (read 1))" \\ 128 > tests/data/shade.tif /tmp/out.tif 129 130 The command above produces a 3-band RGB GeoTIFF, with red levels 131 incremented by 125, from the single-band input. 132 133 The maximum amount of memory used to perform caculations defaults to 134 64 MB. This number can be increased to improve speed of calculation. 135 136 """ 137 import numpy as np 138 139 dst = None 140 sources = [] 141 142 try: 143 with ctx.obj['env']: 144 output, files = resolve_inout(files=files, output=output, 145 overwrite=overwrite) 146 inputs = ([tuple(n.split('=')) for n in name] + 147 [(None, n) for n in files]) 148 sources = [rasterio.open(path) for name, path in inputs] 149 150 first = sources[0] 151 kwargs = first.profile 152 kwargs.update(**creation_options) 153 dtype = dtype or first.meta['dtype'] 154 kwargs['dtype'] = dtype 155 kwargs.pop("driver", None) 156 if driver: 157 kwargs['driver'] = driver 158 159 # Extend snuggs. 160 snuggs.func_map['read'] = _read_array 161 snuggs.func_map['band'] = lambda d, i: _get_bands(inputs, sources, d, i) 162 snuggs.func_map['bands'] = lambda d: _get_bands(inputs, sources, d) 163 snuggs.func_map['fillnodata'] = lambda *args: fillnodata(*args) 164 snuggs.func_map['sieve'] = lambda *args: sieve(*args) 165 166 # The windows iterator is initialized with a single sample. 167 # The actual work windows will be added in the second 168 # iteration of the loop. 169 work_windows = [(None, Window(0, 0, 16, 16))] 170 171 for ij, window in work_windows: 172 173 ctxkwds = OrderedDict() 174 175 for i, ((name, path), src) in enumerate(zip(inputs, sources)): 176 177 # Using the class method instead of instance 178 # method. Latter raises 179 # 180 # TypeError: astype() got an unexpected keyword 181 # argument 'copy' 182 # 183 # possibly something to do with the instance being 184 # a masked array. 185 ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked, window=window) 186 187 res = snuggs.eval(command, **ctxkwds) 188 189 if (isinstance(res, np.ma.core.MaskedArray) and ( 190 tuple(LooseVersion(np.__version__).version) < (1, 9) or 191 tuple(LooseVersion(np.__version__).version) > (1, 10))): 192 res = res.filled(kwargs['nodata']) 193 194 if len(res.shape) == 3: 195 results = np.ndarray.astype(res, dtype, copy=False) 196 else: 197 results = np.asanyarray( 198 [np.ndarray.astype(res, dtype, copy=False)]) 199 200 # The first iteration is only to get sample results and from them 201 # compute some properties of the output dataset. 202 if dst is None: 203 kwargs['count'] = results.shape[0] 204 dst = rasterio.open(output, 'w', **kwargs) 205 work_windows.extend(_chunk_output(dst.width, dst.height, dst.count, np.dtype(dst.dtypes[0]).itemsize, mem_limit=mem_limit)) 206 207 # In subsequent iterations we write results. 208 else: 209 dst.write(results, window=window) 210 211 except snuggs.ExpressionError as err: 212 click.echo("Expression Error:") 213 click.echo(" {}".format(err.text)) 214 click.echo(" {}^".format(" " * err.offset)) 215 click.echo(err) 216 raise click.Abort() 217 218 finally: 219 if dst: 220 dst.close() 221 for src in sources: 222 src.close() 223 [end of rasterio/rio/calc.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rasterio/rio/calc.py b/rasterio/rio/calc.py --- a/rasterio/rio/calc.py +++ b/rasterio/rio/calc.py @@ -185,17 +185,14 @@ ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked, window=window) res = snuggs.eval(command, **ctxkwds) - - if (isinstance(res, np.ma.core.MaskedArray) and ( - tuple(LooseVersion(np.__version__).version) < (1, 9) or - tuple(LooseVersion(np.__version__).version) > (1, 10))): - res = res.filled(kwargs['nodata']) - - if len(res.shape) == 3: - results = np.ndarray.astype(res, dtype, copy=False) - else: - results = np.asanyarray( - [np.ndarray.astype(res, dtype, copy=False)]) + results = res.astype(dtype, copy=False) + + if isinstance(results, np.ma.core.MaskedArray): + results = results.filled(float(kwargs['nodata'])) + if len(results.shape) == 2: + results = np.ma.asanyarray([results]) + elif len(results.shape) == 2: + results = np.asanyarray([results]) # The first iteration is only to get sample results and from them # compute some properties of the output dataset.
{"golden_diff": "diff --git a/rasterio/rio/calc.py b/rasterio/rio/calc.py\n--- a/rasterio/rio/calc.py\n+++ b/rasterio/rio/calc.py\n@@ -185,17 +185,14 @@\n ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked, window=window)\n \n res = snuggs.eval(command, **ctxkwds)\n-\n- if (isinstance(res, np.ma.core.MaskedArray) and (\n- tuple(LooseVersion(np.__version__).version) < (1, 9) or\n- tuple(LooseVersion(np.__version__).version) > (1, 10))):\n- res = res.filled(kwargs['nodata'])\n-\n- if len(res.shape) == 3:\n- results = np.ndarray.astype(res, dtype, copy=False)\n- else:\n- results = np.asanyarray(\n- [np.ndarray.astype(res, dtype, copy=False)])\n+ results = res.astype(dtype, copy=False)\n+\n+ if isinstance(results, np.ma.core.MaskedArray):\n+ results = results.filled(float(kwargs['nodata']))\n+ if len(results.shape) == 2:\n+ results = np.ma.asanyarray([results])\n+ elif len(results.shape) == 2:\n+ results = np.asanyarray([results])\n \n # The first iteration is only to get sample results and from them\n # compute some properties of the output dataset.\n", "issue": "Cast nodata value before filling in rio calc\nAt line 192, `res = res.filled(kwargs['nodata'])`, we need to convert that string to a numpy scalar or array of `res` dtype.\n", "before_files": [{"content": "\"\"\"$ rio calc\"\"\"\n\nfrom __future__ import division\n\nfrom collections import OrderedDict\nfrom distutils.version import LooseVersion\nimport math\n\nimport click\nimport snuggs\n\nimport rasterio\nfrom rasterio.features import sieve\nfrom rasterio.fill import fillnodata\nfrom rasterio.windows import Window\nfrom rasterio.rio import options\nfrom rasterio.rio.helpers import resolve_inout\n\n\ndef _get_bands(inputs, sources, d, i=None):\n \"\"\"Get a rasterio.Band object from calc's inputs\"\"\"\n idx = d if d in dict(inputs) else int(d) - 1\n src = sources[idx]\n return (rasterio.band(src, i) if i else\n [rasterio.band(src, j) for j in src.indexes])\n\n\ndef _read_array(ix, subix=None, dtype=None):\n \"\"\"Change the type of a read array\"\"\"\n arr = snuggs._ctx.lookup(ix, subix)\n if dtype:\n arr = arr.astype(dtype)\n return arr\n\n\ndef _chunk_output(width, height, count, itemsize, mem_limit=1):\n \"\"\"Divide the calculation output into chunks\n\n This function determines the chunk size such that an array of shape\n (chunk_size, chunk_size, count) with itemsize bytes per element\n requires no more than mem_limit megabytes of memory.\n\n Output chunks are described by rasterio Windows.\n\n Parameters\n ----------\n width : int\n Output width\n height : int\n Output height\n count : int\n Number of output bands\n itemsize : int\n Number of bytes per pixel\n mem_limit : int, default\n The maximum size in memory of a chunk array\n\n Returns\n -------\n sequence of Windows\n \"\"\"\n max_pixels = mem_limit * 1.0e+6 / itemsize * count\n chunk_size = int(math.floor(math.sqrt(max_pixels)))\n ncols = int(math.ceil(width / chunk_size))\n nrows = int(math.ceil(height / chunk_size))\n chunk_windows = []\n\n for col in range(ncols):\n col_offset = col * chunk_size\n w = min(chunk_size, width - col_offset)\n for row in range(nrows):\n row_offset = row * chunk_size\n h = min(chunk_size, height - row_offset)\n chunk_windows.append(((row, col), Window(col_offset, row_offset, w, h)))\n\n return chunk_windows\n\n\[email protected](short_help=\"Raster data calculator.\")\[email protected]('command')\[email protected]_inout_arg\[email protected]_opt\[email protected]_opt\[email protected]('--name', multiple=True,\n help='Specify an input file with a unique short (alphas only) '\n 'name for use in commands like '\n '\"a=tests/data/RGB.byte.tif\".')\[email protected]_opt\[email protected]_opt\[email protected]_opt\[email protected](\"--mem-limit\", type=int, default=64, help=\"Limit on memory used to perform calculations, in MB.\")\[email protected]_options\[email protected]_context\ndef calc(ctx, command, files, output, driver, name, dtype, masked, overwrite, mem_limit, creation_options):\n \"\"\"A raster data calculator\n\n Evaluates an expression using input datasets and writes the result\n to a new dataset.\n\n Command syntax is lisp-like. An expression consists of an operator\n or function name and one or more strings, numbers, or expressions\n enclosed in parentheses. Functions include ``read`` (gets a raster\n array) and ``asarray`` (makes a 3-D array from 2-D arrays).\n\n \\b\n * (read i) evaluates to the i-th input dataset (a 3-D array).\n * (read i j) evaluates to the j-th band of the i-th dataset (a\n 2-D array).\n * (take foo j) evaluates to the j-th band of a dataset named foo\n (see help on the --name option above).\n * Standard numpy array operators (+, -, *, /) are available.\n * When the final result is a list of arrays, a multiple band\n output file is written.\n * When the final result is a single array, a single band output\n file is written.\n\n Example:\n\n \\b\n $ rio calc \"(+ 2 (* 0.95 (read 1)))\" tests/data/RGB.byte.tif \\\\\n > /tmp/out.tif\n\n The command above produces a 3-band GeoTIFF with all values scaled\n by 0.95 and incremented by 2.\n\n \\b\n $ rio calc \"(asarray (+ 125 (read 1)) (read 1) (read 1))\" \\\\\n > tests/data/shade.tif /tmp/out.tif\n\n The command above produces a 3-band RGB GeoTIFF, with red levels\n incremented by 125, from the single-band input.\n\n The maximum amount of memory used to perform caculations defaults to\n 64 MB. This number can be increased to improve speed of calculation.\n\n \"\"\"\n import numpy as np\n\n dst = None\n sources = []\n\n try:\n with ctx.obj['env']:\n output, files = resolve_inout(files=files, output=output,\n overwrite=overwrite)\n inputs = ([tuple(n.split('=')) for n in name] +\n [(None, n) for n in files])\n sources = [rasterio.open(path) for name, path in inputs]\n\n first = sources[0]\n kwargs = first.profile\n kwargs.update(**creation_options)\n dtype = dtype or first.meta['dtype']\n kwargs['dtype'] = dtype\n kwargs.pop(\"driver\", None)\n if driver:\n kwargs['driver'] = driver\n\n # Extend snuggs.\n snuggs.func_map['read'] = _read_array\n snuggs.func_map['band'] = lambda d, i: _get_bands(inputs, sources, d, i)\n snuggs.func_map['bands'] = lambda d: _get_bands(inputs, sources, d)\n snuggs.func_map['fillnodata'] = lambda *args: fillnodata(*args)\n snuggs.func_map['sieve'] = lambda *args: sieve(*args)\n\n # The windows iterator is initialized with a single sample.\n # The actual work windows will be added in the second\n # iteration of the loop.\n work_windows = [(None, Window(0, 0, 16, 16))]\n\n for ij, window in work_windows:\n\n ctxkwds = OrderedDict()\n\n for i, ((name, path), src) in enumerate(zip(inputs, sources)):\n\n # Using the class method instead of instance\n # method. Latter raises\n #\n # TypeError: astype() got an unexpected keyword\n # argument 'copy'\n #\n # possibly something to do with the instance being\n # a masked array.\n ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked, window=window)\n\n res = snuggs.eval(command, **ctxkwds)\n\n if (isinstance(res, np.ma.core.MaskedArray) and (\n tuple(LooseVersion(np.__version__).version) < (1, 9) or\n tuple(LooseVersion(np.__version__).version) > (1, 10))):\n res = res.filled(kwargs['nodata'])\n\n if len(res.shape) == 3:\n results = np.ndarray.astype(res, dtype, copy=False)\n else:\n results = np.asanyarray(\n [np.ndarray.astype(res, dtype, copy=False)])\n\n # The first iteration is only to get sample results and from them\n # compute some properties of the output dataset.\n if dst is None:\n kwargs['count'] = results.shape[0]\n dst = rasterio.open(output, 'w', **kwargs)\n work_windows.extend(_chunk_output(dst.width, dst.height, dst.count, np.dtype(dst.dtypes[0]).itemsize, mem_limit=mem_limit))\n\n # In subsequent iterations we write results.\n else:\n dst.write(results, window=window)\n\n except snuggs.ExpressionError as err:\n click.echo(\"Expression Error:\")\n click.echo(\" {}\".format(err.text))\n click.echo(\" {}^\".format(\" \" * err.offset))\n click.echo(err)\n raise click.Abort()\n\n finally:\n if dst:\n dst.close()\n for src in sources:\n src.close()\n", "path": "rasterio/rio/calc.py"}]}
3,025
338
gh_patches_debug_11279
rasdani/github-patches
git_diff
abey79__vpype-440
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Replace cKDTree by KDTree see note here: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.cKDTree.html#scipy.spatial.cKDTree </issue> <code> [start of vpype/line_index.py] 1 from __future__ import annotations 2 3 import logging 4 from typing import Iterable 5 6 import numpy as np 7 from scipy.spatial import cKDTree as KDTree 8 9 # REMINDER: anything added here must be added to docs/api.rst 10 __all__ = ["LineIndex"] 11 12 13 class LineIndex: 14 """Wrapper to scipy.spatial.cKDTree to facilitate systematic processing of a line 15 collection. 16 17 Implementation note: we use the `available` bool array because deleting stuff from the 18 index is costly. 19 """ 20 21 def __init__(self, lines: Iterable[np.ndarray], reverse: bool = False): 22 self.lines = [line for line in lines if len(line) > 0] 23 self.reverse = reverse 24 self._make_index() 25 26 def _make_index(self) -> None: 27 logging.info(f"LineIndex: creating index for {len(self.lines)} lines") 28 self.available = np.ones(shape=len(self.lines), dtype=bool) 29 30 # create rtree index 31 self.index = KDTree( 32 np.array([(line[0].real, line[0].imag) for line in self.lines]).reshape(-1, 2) 33 ) 34 35 # create reverse index 36 if self.reverse: 37 self.rindex = KDTree( 38 np.array([(line[-1].real, line[-1].imag) for line in self.lines]).reshape( 39 -1, 2 40 ) 41 ) 42 43 def _reindex(self) -> None: 44 self.lines = [line for idx, line in enumerate(self.lines) if self.available[idx]] 45 self._make_index() 46 47 def __len__(self) -> int: 48 return np.count_nonzero(self.available) 49 50 def __getitem__(self, item): 51 return self.lines[item] 52 53 def pop_front(self) -> np.ndarray: 54 if len(self) == 0: 55 raise RuntimeError 56 idx = int(np.argmax(self.available)) 57 self.available[idx] = False 58 return self.lines[idx] 59 60 def pop(self, idx: int) -> np.ndarray | None: 61 if not self.available[idx]: 62 return None 63 self.available[idx] = False 64 return self.lines[idx] 65 66 def find_nearest_within(self, p: complex, max_dist: float) -> tuple[int | None, bool]: 67 """Find the closest line, assuming a maximum admissible distance. 68 Returns a tuple of (idx, reverse), where `idx` may be None if nothing is found. 69 `reverse` indicates whether or not a line ending has been matched instead of a start. 70 False is always returned if index was created with `reverse=False`.s 71 """ 72 73 ridx = None 74 rdist: float | None = 0.0 75 76 while True: 77 reindex, idx, dist = self._find_nearest_within_in_index(p, max_dist, self.index) 78 if reindex: 79 self._reindex() 80 continue 81 82 if self.reverse: 83 reindex, ridx, rdist = self._find_nearest_within_in_index( 84 p, max_dist, self.rindex 85 ) 86 if reindex: 87 self._reindex() 88 continue 89 break 90 91 if self.reverse: 92 if idx is None and ridx is None: 93 return None, False 94 elif idx is not None and ridx is None: 95 return idx, False 96 elif idx is None and ridx is not None: 97 return ridx, True 98 elif rdist < dist: # type: ignore 99 return ridx, True 100 else: 101 return idx, False 102 else: 103 return idx, False 104 105 def _find_nearest_within_in_index( 106 self, p: complex, max_dist: float, index: KDTree 107 ) -> tuple[bool, int | None, float | None]: 108 """Find nearest in specific index. Return (reindex, idx, dist) tuple, where 109 reindex indicates if a reindex is needed. 110 """ 111 112 # For performance reason, we query only a max of k candidates. In the special case 113 # where all distances are not inf and none are available, we might have more than k 114 # suitable candidate, so we reindex and loop. Otherwise, we check the query results 115 # for availability and not inf and return anything found 116 dists, idxs = index.query((p.real, p.imag), k=50, distance_upper_bound=max_dist) 117 dists = np.array(dists) 118 119 not_inf = ~np.isinf(dists) 120 if np.all(not_inf) and np.all(~self.available[idxs[not_inf]]): 121 return True, None, 0 122 123 candidates = self.available[idxs[not_inf]] 124 125 if np.any(candidates): 126 idx = np.argmax(candidates) 127 return False, idxs[not_inf][idx], dists[not_inf][idx] 128 else: 129 return False, None, 0 130 131 # noinspection PyUnboundLocalVariable 132 def find_nearest(self, p: complex) -> tuple[int, bool]: 133 while True: 134 idx, dist = self._find_nearest_in_index(p, self.index) 135 if self.reverse: 136 ridx, rdist = self._find_nearest_in_index(p, self.rindex) 137 138 if ridx is not None and idx is not None: 139 break 140 elif idx is not None: 141 break 142 self._reindex() 143 144 if self.reverse: 145 if rdist < dist: 146 return ridx, True # type: ignore 147 else: 148 return idx, False 149 else: 150 return idx, False 151 152 def _find_nearest_in_index(self, p: complex, index: KDTree) -> tuple[int | None, float]: 153 """Check the N nearest lines, hopefully find one that is active.""" 154 155 dists, idxs = index.query((p.real, p.imag), k=100) 156 for dist, idx in zip(dists, idxs): 157 if ~np.isinf(dist) and self.available[idx]: 158 return idx, dist 159 160 return None, 0.0 161 [end of vpype/line_index.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/vpype/line_index.py b/vpype/line_index.py --- a/vpype/line_index.py +++ b/vpype/line_index.py @@ -4,14 +4,14 @@ from typing import Iterable import numpy as np -from scipy.spatial import cKDTree as KDTree +from scipy.spatial import KDTree # REMINDER: anything added here must be added to docs/api.rst __all__ = ["LineIndex"] class LineIndex: - """Wrapper to scipy.spatial.cKDTree to facilitate systematic processing of a line + """Wrapper to scipy.spatial.KDTree to facilitate systematic processing of a line collection. Implementation note: we use the `available` bool array because deleting stuff from the
{"golden_diff": "diff --git a/vpype/line_index.py b/vpype/line_index.py\n--- a/vpype/line_index.py\n+++ b/vpype/line_index.py\n@@ -4,14 +4,14 @@\n from typing import Iterable\n \n import numpy as np\n-from scipy.spatial import cKDTree as KDTree\n+from scipy.spatial import KDTree\n \n # REMINDER: anything added here must be added to docs/api.rst\n __all__ = [\"LineIndex\"]\n \n \n class LineIndex:\n- \"\"\"Wrapper to scipy.spatial.cKDTree to facilitate systematic processing of a line\n+ \"\"\"Wrapper to scipy.spatial.KDTree to facilitate systematic processing of a line\n collection.\n \n Implementation note: we use the `available` bool array because deleting stuff from the\n", "issue": "Replace cKDTree by KDTree\nsee note here: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.cKDTree.html#scipy.spatial.cKDTree\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nfrom typing import Iterable\n\nimport numpy as np\nfrom scipy.spatial import cKDTree as KDTree\n\n# REMINDER: anything added here must be added to docs/api.rst\n__all__ = [\"LineIndex\"]\n\n\nclass LineIndex:\n \"\"\"Wrapper to scipy.spatial.cKDTree to facilitate systematic processing of a line\n collection.\n\n Implementation note: we use the `available` bool array because deleting stuff from the\n index is costly.\n \"\"\"\n\n def __init__(self, lines: Iterable[np.ndarray], reverse: bool = False):\n self.lines = [line for line in lines if len(line) > 0]\n self.reverse = reverse\n self._make_index()\n\n def _make_index(self) -> None:\n logging.info(f\"LineIndex: creating index for {len(self.lines)} lines\")\n self.available = np.ones(shape=len(self.lines), dtype=bool)\n\n # create rtree index\n self.index = KDTree(\n np.array([(line[0].real, line[0].imag) for line in self.lines]).reshape(-1, 2)\n )\n\n # create reverse index\n if self.reverse:\n self.rindex = KDTree(\n np.array([(line[-1].real, line[-1].imag) for line in self.lines]).reshape(\n -1, 2\n )\n )\n\n def _reindex(self) -> None:\n self.lines = [line for idx, line in enumerate(self.lines) if self.available[idx]]\n self._make_index()\n\n def __len__(self) -> int:\n return np.count_nonzero(self.available)\n\n def __getitem__(self, item):\n return self.lines[item]\n\n def pop_front(self) -> np.ndarray:\n if len(self) == 0:\n raise RuntimeError\n idx = int(np.argmax(self.available))\n self.available[idx] = False\n return self.lines[idx]\n\n def pop(self, idx: int) -> np.ndarray | None:\n if not self.available[idx]:\n return None\n self.available[idx] = False\n return self.lines[idx]\n\n def find_nearest_within(self, p: complex, max_dist: float) -> tuple[int | None, bool]:\n \"\"\"Find the closest line, assuming a maximum admissible distance.\n Returns a tuple of (idx, reverse), where `idx` may be None if nothing is found.\n `reverse` indicates whether or not a line ending has been matched instead of a start.\n False is always returned if index was created with `reverse=False`.s\n \"\"\"\n\n ridx = None\n rdist: float | None = 0.0\n\n while True:\n reindex, idx, dist = self._find_nearest_within_in_index(p, max_dist, self.index)\n if reindex:\n self._reindex()\n continue\n\n if self.reverse:\n reindex, ridx, rdist = self._find_nearest_within_in_index(\n p, max_dist, self.rindex\n )\n if reindex:\n self._reindex()\n continue\n break\n\n if self.reverse:\n if idx is None and ridx is None:\n return None, False\n elif idx is not None and ridx is None:\n return idx, False\n elif idx is None and ridx is not None:\n return ridx, True\n elif rdist < dist: # type: ignore\n return ridx, True\n else:\n return idx, False\n else:\n return idx, False\n\n def _find_nearest_within_in_index(\n self, p: complex, max_dist: float, index: KDTree\n ) -> tuple[bool, int | None, float | None]:\n \"\"\"Find nearest in specific index. Return (reindex, idx, dist) tuple, where\n reindex indicates if a reindex is needed.\n \"\"\"\n\n # For performance reason, we query only a max of k candidates. In the special case\n # where all distances are not inf and none are available, we might have more than k\n # suitable candidate, so we reindex and loop. Otherwise, we check the query results\n # for availability and not inf and return anything found\n dists, idxs = index.query((p.real, p.imag), k=50, distance_upper_bound=max_dist)\n dists = np.array(dists)\n\n not_inf = ~np.isinf(dists)\n if np.all(not_inf) and np.all(~self.available[idxs[not_inf]]):\n return True, None, 0\n\n candidates = self.available[idxs[not_inf]]\n\n if np.any(candidates):\n idx = np.argmax(candidates)\n return False, idxs[not_inf][idx], dists[not_inf][idx]\n else:\n return False, None, 0\n\n # noinspection PyUnboundLocalVariable\n def find_nearest(self, p: complex) -> tuple[int, bool]:\n while True:\n idx, dist = self._find_nearest_in_index(p, self.index)\n if self.reverse:\n ridx, rdist = self._find_nearest_in_index(p, self.rindex)\n\n if ridx is not None and idx is not None:\n break\n elif idx is not None:\n break\n self._reindex()\n\n if self.reverse:\n if rdist < dist:\n return ridx, True # type: ignore\n else:\n return idx, False\n else:\n return idx, False\n\n def _find_nearest_in_index(self, p: complex, index: KDTree) -> tuple[int | None, float]:\n \"\"\"Check the N nearest lines, hopefully find one that is active.\"\"\"\n\n dists, idxs = index.query((p.real, p.imag), k=100)\n for dist, idx in zip(dists, idxs):\n if ~np.isinf(dist) and self.available[idx]:\n return idx, dist\n\n return None, 0.0\n", "path": "vpype/line_index.py"}]}
2,274
168
gh_patches_debug_32004
rasdani/github-patches
git_diff
OCA__bank-payment-18
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> many transactions not regonized After importing a dutch bank statements file from the ING, I got the following screen: ![import_log](https://cloud.githubusercontent.com/assets/890128/3571378/ca510fa0-0b5c-11e4-9d32-3ee373c491e4.png) Many transactions are not processed because of not recognizing the correct bankaccount number. The system is complaining about "Statements found for unknown account 68xxx" and "Statements found for account 06xxxxxx, but no default journal was defined. This is a little strange, because I have setup "Default Import Settings for Bank Accounts" in the settings of the accounting section. I also put a default journal in there. My information entered there is the IBAN number of my account, together with the national account (with leading zero). I also tried without the leading zero in the national account, but this doesn't make any difference at all. Why is openerp mixing up the number of the bankaccount. (with/without zero) and not matching the national number. </issue> <code> [start of account_banking/res_partner_bank.py] 1 # -*- coding: utf-8 -*- 2 ############################################################################## 3 # 4 # Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>). 5 # (C) 2011 - 2014 Therp BV (<http://therp.nl>). 6 # 7 # This program is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU Affero General Public License as 9 # published by the Free Software Foundation, either version 3 of the 10 # License, or (at your option) any later version. 11 # 12 # This program is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU Affero General Public License for more details. 16 # 17 # You should have received a copy of the GNU Affero General Public License 18 # along with this program. If not, see <http://www.gnu.org/licenses/>. 19 # 20 ############################################################################## 21 from openerp.osv import orm 22 from openerp.addons.account_banking import sepa 23 24 25 class ResPartnerBank(orm.Model): 26 _inherit = 'res.partner.bank' 27 28 def online_account_info( 29 self, cr, uid, country_code, acc_number, context=None): 30 """ 31 API hook for legacy online lookup of account info, 32 to be removed in OpenERP 8.0. 33 """ 34 return False 35 36 def search(self, cr, uid, args, *rest, **kwargs): 37 """ 38 When a complete IBAN is searched, also search for its BBAN 39 if we have the domestic column. Disregard spaces 40 when comparing IBANs. 41 """ 42 43 def is_term(arg): 44 '''Flag an arg as term or otherwise''' 45 return isinstance(arg, (list, tuple)) and len(arg) == 3 46 47 def extended_filter_term(term): 48 ''' 49 Extend the search criteria in term when appropriate. 50 ''' 51 result = [term] 52 extra_terms = [] 53 if term[0].lower() == 'acc_number' and term[1] in ('=', '=='): 54 iban = sepa.IBAN(term[2]) 55 if iban.valid: 56 # Disregard spaces when comparing IBANs 57 cr.execute( 58 """ 59 SELECT id FROM res_partner_bank 60 WHERE replace(acc_number, ' ', '') = %s 61 """, (term[2].replace(' ', ''),)) 62 ids = [row[0] for row in cr.fetchall()] 63 result = [('id', 'in', ids)] 64 65 if 'acc_number_domestic' in self._columns: 66 bban = iban.localized_BBAN 67 # Prevent empty search filters 68 if bban: 69 extra_terms.append( 70 ('acc_number_domestic', term[1], bban)) 71 for extra_term in extra_terms: 72 result = ['|'] + result + [extra_term] 73 return result 74 75 def extended_search_expression(args): 76 ''' 77 Extend the search expression in args when appropriate. 78 The expression itself is in reverse polish notation, so recursion 79 is not needed. 80 ''' 81 if not args: 82 return [] 83 84 result = [] 85 if is_term(args[0]) and len(args) > 1: 86 # Classic filter, implicit '&' 87 result += ['&'] 88 89 for arg in args: 90 if is_term(arg): 91 result += extended_filter_term(arg) 92 else: 93 result += arg 94 return result 95 96 # Extend search filter 97 newargs = extended_search_expression(args) 98 99 # Original search 100 return super(ResPartnerBank, self).search( 101 cr, uid, newargs, *rest, **kwargs) 102 [end of account_banking/res_partner_bank.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/account_banking/res_partner_bank.py b/account_banking/res_partner_bank.py --- a/account_banking/res_partner_bank.py +++ b/account_banking/res_partner_bank.py @@ -44,6 +44,27 @@ '''Flag an arg as term or otherwise''' return isinstance(arg, (list, tuple)) and len(arg) == 3 + def search_domestic(account): + """ + Performing raw sql search for full account numbers, + ignoring leading zeroes. + """ + account = (account or '').lstrip('0') + if not account: + return ('id', '=', 0) + query = """ + SELECT id FROM res_partner_bank + WHERE ( + STATE != 'iban' + AND TRIM(LEADING '0' FROM acc_number) = %(account)s) + """ + if 'acc_number_domestic' in self._columns: + query += """ + OR TRIM(LEADING '0' FROM acc_number_domestic) + = %(account)s""" + cr.execute(query, {'account': account}) + return ('id', 'in', [row[0] for row in cr.fetchall()]) + def extended_filter_term(term): ''' Extend the search criteria in term when appropriate. @@ -62,12 +83,11 @@ ids = [row[0] for row in cr.fetchall()] result = [('id', 'in', ids)] - if 'acc_number_domestic' in self._columns: - bban = iban.localized_BBAN - # Prevent empty search filters - if bban: - extra_terms.append( - ('acc_number_domestic', term[1], bban)) + bban = iban.localized_BBAN + if bban: + extra_terms.append(search_domestic(bban)) + else: + result = [search_domestic(term[2])] for extra_term in extra_terms: result = ['|'] + result + [extra_term] return result
{"golden_diff": "diff --git a/account_banking/res_partner_bank.py b/account_banking/res_partner_bank.py\n--- a/account_banking/res_partner_bank.py\n+++ b/account_banking/res_partner_bank.py\n@@ -44,6 +44,27 @@\n '''Flag an arg as term or otherwise'''\n return isinstance(arg, (list, tuple)) and len(arg) == 3\n \n+ def search_domestic(account):\n+ \"\"\"\n+ Performing raw sql search for full account numbers,\n+ ignoring leading zeroes.\n+ \"\"\"\n+ account = (account or '').lstrip('0')\n+ if not account:\n+ return ('id', '=', 0)\n+ query = \"\"\"\n+ SELECT id FROM res_partner_bank\n+ WHERE (\n+ STATE != 'iban'\n+ AND TRIM(LEADING '0' FROM acc_number) = %(account)s)\n+ \"\"\"\n+ if 'acc_number_domestic' in self._columns:\n+ query += \"\"\"\n+ OR TRIM(LEADING '0' FROM acc_number_domestic)\n+ = %(account)s\"\"\"\n+ cr.execute(query, {'account': account})\n+ return ('id', 'in', [row[0] for row in cr.fetchall()])\n+\n def extended_filter_term(term):\n '''\n Extend the search criteria in term when appropriate.\n@@ -62,12 +83,11 @@\n ids = [row[0] for row in cr.fetchall()]\n result = [('id', 'in', ids)]\n \n- if 'acc_number_domestic' in self._columns:\n- bban = iban.localized_BBAN\n- # Prevent empty search filters\n- if bban:\n- extra_terms.append(\n- ('acc_number_domestic', term[1], bban))\n+ bban = iban.localized_BBAN\n+ if bban:\n+ extra_terms.append(search_domestic(bban))\n+ else:\n+ result = [search_domestic(term[2])]\n for extra_term in extra_terms:\n result = ['|'] + result + [extra_term]\n return result\n", "issue": "many transactions not regonized\nAfter importing a dutch bank statements file from the ING, I got the following screen:\n\n![import_log](https://cloud.githubusercontent.com/assets/890128/3571378/ca510fa0-0b5c-11e4-9d32-3ee373c491e4.png)\n\nMany transactions are not processed because of not recognizing the correct bankaccount number. \n\nThe system is complaining about \"Statements found for unknown account 68xxx\" and \"Statements found for account 06xxxxxx, but no default journal was defined.\n\nThis is a little strange, because I have setup \"Default Import Settings for Bank Accounts\"\nin the settings of the accounting section.\n\nI also put a default journal in there.\n\nMy information entered there is the IBAN number of my account, together with the national account (with leading zero). I also tried without the leading zero in the national account, but this doesn't make any difference at all.\n\nWhy is openerp mixing up the number of the bankaccount. (with/without zero) and not matching the national number.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).\n# (C) 2011 - 2014 Therp BV (<http://therp.nl>).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\nfrom openerp.osv import orm\nfrom openerp.addons.account_banking import sepa\n\n\nclass ResPartnerBank(orm.Model):\n _inherit = 'res.partner.bank'\n\n def online_account_info(\n self, cr, uid, country_code, acc_number, context=None):\n \"\"\"\n API hook for legacy online lookup of account info,\n to be removed in OpenERP 8.0.\n \"\"\"\n return False\n\n def search(self, cr, uid, args, *rest, **kwargs):\n \"\"\"\n When a complete IBAN is searched, also search for its BBAN\n if we have the domestic column. Disregard spaces\n when comparing IBANs.\n \"\"\"\n\n def is_term(arg):\n '''Flag an arg as term or otherwise'''\n return isinstance(arg, (list, tuple)) and len(arg) == 3\n\n def extended_filter_term(term):\n '''\n Extend the search criteria in term when appropriate.\n '''\n result = [term]\n extra_terms = []\n if term[0].lower() == 'acc_number' and term[1] in ('=', '=='):\n iban = sepa.IBAN(term[2])\n if iban.valid:\n # Disregard spaces when comparing IBANs\n cr.execute(\n \"\"\"\n SELECT id FROM res_partner_bank\n WHERE replace(acc_number, ' ', '') = %s\n \"\"\", (term[2].replace(' ', ''),))\n ids = [row[0] for row in cr.fetchall()]\n result = [('id', 'in', ids)]\n\n if 'acc_number_domestic' in self._columns:\n bban = iban.localized_BBAN\n # Prevent empty search filters\n if bban:\n extra_terms.append(\n ('acc_number_domestic', term[1], bban))\n for extra_term in extra_terms:\n result = ['|'] + result + [extra_term]\n return result\n\n def extended_search_expression(args):\n '''\n Extend the search expression in args when appropriate.\n The expression itself is in reverse polish notation, so recursion\n is not needed.\n '''\n if not args:\n return []\n\n result = []\n if is_term(args[0]) and len(args) > 1:\n # Classic filter, implicit '&'\n result += ['&']\n\n for arg in args:\n if is_term(arg):\n result += extended_filter_term(arg)\n else:\n result += arg\n return result\n\n # Extend search filter\n newargs = extended_search_expression(args)\n\n # Original search\n return super(ResPartnerBank, self).search(\n cr, uid, newargs, *rest, **kwargs)\n", "path": "account_banking/res_partner_bank.py"}]}
1,782
452
gh_patches_debug_17655
rasdani/github-patches
git_diff
sktime__sktime-3168
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] changelog generator picks up merges into branches other than `main` The changelog generator seems to pick up PR that were not merged into `main` but into another branch. Current example for 0.13.0 release at pre-0.13.0 state: #2941, #2952. FYI @lmmentel </issue> <code> [start of build_tools/changelog.py] 1 # -*- coding: utf-8 -*- 2 """RestructuredText changelog generator.""" 3 4 import os 5 from collections import defaultdict 6 from typing import Dict, List 7 8 import httpx 9 from dateutil import parser 10 11 HEADERS = { 12 "Accept": "application/vnd.github.v3+json", 13 } 14 15 if os.getenv("GITHUB_TOKEN") is not None: 16 HEADERS["Authorization"] = f"token {os.getenv('GITHUB_TOKEN')}" 17 18 OWNER = "alan-turing-institute" 19 REPO = "sktime" 20 GITHUB_REPOS = "https://api.github.com/repos" 21 22 23 def fetch_merged_pull_requests(page: int = 1) -> List[Dict]: # noqa 24 "Fetch a page of pull requests" 25 params = { 26 "state": "closed", 27 "page": page, 28 "per_page": 50, 29 "sort": "updated", 30 "direction": "desc", 31 } 32 r = httpx.get( 33 f"{GITHUB_REPOS}/{OWNER}/{REPO}/pulls", 34 headers=HEADERS, 35 params=params, 36 ) 37 return [pr for pr in r.json() if pr["merged_at"]] 38 39 40 def fetch_latest_release(): # noqa 41 response = httpx.get( 42 f"{GITHUB_REPOS}/{OWNER}/{REPO}/releases/latest", headers=HEADERS 43 ) 44 45 if response.status_code == 200: 46 return response.json() 47 else: 48 raise ValueError(response.text, response.status_code) 49 50 51 def fetch_pull_requests_since_last_release() -> List[Dict]: # noqa 52 "Fetch pull requests and filter based on merged date" 53 54 release = fetch_latest_release() 55 published_at = parser.parse(release["published_at"]) 56 print( # noqa 57 f"Latest release {release['tag_name']} was published at {published_at}" 58 ) 59 60 is_exhausted = False 61 page = 1 62 all_pulls = [] 63 while not is_exhausted: 64 pulls = fetch_merged_pull_requests(page=page) 65 all_pulls.extend( 66 [p for p in pulls if parser.parse(p["merged_at"]) > published_at] 67 ) 68 is_exhausted = any(parser.parse(p["merged_at"]) < published_at for p in pulls) 69 page += 1 70 return all_pulls 71 72 73 def github_compare_tags(tag_left: str, tag_right: str = "HEAD"): # noqa 74 "Compare commit between two tags" 75 response = httpx.get( 76 f"{GITHUB_REPOS}/{OWNER}/{REPO}/compare/{tag_left}...{tag_right}" 77 ) 78 if response.status_code == 200: 79 return response.json() 80 else: 81 raise ValueError(response.text, response.status_code) 82 83 84 def render_contributors(prs: List, fmt: str = "rst"): # noqa 85 "Find unique authors and print a list in given format" 86 authors = sorted({pr["user"]["login"] for pr in prs}, key=lambda x: x.lower()) 87 88 header = "Contributors" 89 if fmt == "github": 90 print(f"### {header}") # noqa 91 print(", ".join(f"@{user}" for user in authors)) # noqa 92 elif fmt == "rst": 93 print(header) # noqa 94 print("~" * len(header), end="\n\n") # noqa 95 print(",\n".join(f":user:`{user}`" for user in authors)) # noqa 96 97 98 def assign_prs(prs, categs: List[Dict[str, List[str]]]): # noqa 99 "Assign PR to categories based on labels" 100 assigned = defaultdict(list) 101 102 for i, pr in enumerate(prs): 103 for cat in categs: 104 pr_labels = [label["name"] for label in pr["labels"]] 105 if not set(cat["labels"]).isdisjoint(set(pr_labels)): 106 assigned[cat["title"]].append(i) 107 108 # if any(l.startswith("module") for l in pr_labels): 109 # print(i, pr_labels) 110 111 assigned["Other"] = list( 112 set(range(len(prs))) - {i for _, l in assigned.items() for i in l} 113 ) 114 115 return assigned 116 117 118 def render_row(pr): # noqa 119 "Render a single row with PR in restructuredText format" 120 print( # noqa 121 "*", 122 pr["title"].replace("`", "``"), 123 f"(:pr:`{pr['number']}`)", 124 f":user:`{pr['user']['login']}`", 125 ) 126 127 128 def render_changelog(prs, assigned): # noqa 129 # sourcery skip: use-named-expression 130 "Render changelog" 131 for title, _ in assigned.items(): 132 pr_group = [prs[i] for i in assigned[title]] 133 if pr_group: 134 print(f"\n{title}") # noqa 135 print("~" * len(title), end="\n\n") # noqa 136 137 for pr in sorted(pr_group, key=lambda x: parser.parse(x["merged_at"])): 138 render_row(pr) 139 140 141 if __name__ == "__main__": 142 143 categories = [ 144 {"title": "Enhancements", "labels": ["feature", "enhancement"]}, 145 {"title": "Fixes", "labels": ["bug", "fix", "bugfix"]}, 146 {"title": "Maintenance", "labels": ["maintenance", "chore"]}, 147 {"title": "Refactored", "labels": ["refactor"]}, 148 {"title": "Documentation", "labels": ["documentation"]}, 149 ] 150 151 pulls = fetch_pull_requests_since_last_release() 152 print(f"Found {len(pulls)} merged PRs since last release") # noqa 153 assigned = assign_prs(pulls, categories) 154 render_changelog(pulls, assigned) 155 print() # noqa 156 render_contributors(pulls) 157 158 release = fetch_latest_release() 159 diff = github_compare_tags(release["tag_name"]) 160 if diff["total_commits"] != len(pulls): 161 raise ValueError( 162 "Something went wrong and not all PR were fetched. " 163 f'There is {len(pulls)} PRs but {diff["total_commits"]} in the diff' 164 "Please verify that all PRs are included in the changelog." 165 ) # noqa 166 [end of build_tools/changelog.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/build_tools/changelog.py b/build_tools/changelog.py --- a/build_tools/changelog.py +++ b/build_tools/changelog.py @@ -23,6 +23,7 @@ def fetch_merged_pull_requests(page: int = 1) -> List[Dict]: # noqa "Fetch a page of pull requests" params = { + "base": "main", "state": "closed", "page": page, "per_page": 50, @@ -160,6 +161,6 @@ if diff["total_commits"] != len(pulls): raise ValueError( "Something went wrong and not all PR were fetched. " - f'There is {len(pulls)} PRs but {diff["total_commits"]} in the diff' + f'There are {len(pulls)} PRs but {diff["total_commits"]} in the diff. ' "Please verify that all PRs are included in the changelog." ) # noqa
{"golden_diff": "diff --git a/build_tools/changelog.py b/build_tools/changelog.py\n--- a/build_tools/changelog.py\n+++ b/build_tools/changelog.py\n@@ -23,6 +23,7 @@\n def fetch_merged_pull_requests(page: int = 1) -> List[Dict]: # noqa\n \"Fetch a page of pull requests\"\n params = {\n+ \"base\": \"main\",\n \"state\": \"closed\",\n \"page\": page,\n \"per_page\": 50,\n@@ -160,6 +161,6 @@\n if diff[\"total_commits\"] != len(pulls):\n raise ValueError(\n \"Something went wrong and not all PR were fetched. \"\n- f'There is {len(pulls)} PRs but {diff[\"total_commits\"]} in the diff'\n+ f'There are {len(pulls)} PRs but {diff[\"total_commits\"]} in the diff. '\n \"Please verify that all PRs are included in the changelog.\"\n ) # noqa\n", "issue": "[BUG] changelog generator picks up merges into branches other than `main`\nThe changelog generator seems to pick up PR that were not merged into `main` but into another branch.\r\n\r\nCurrent example for 0.13.0 release at pre-0.13.0 state: #2941, #2952.\r\n\r\nFYI @lmmentel \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"RestructuredText changelog generator.\"\"\"\n\nimport os\nfrom collections import defaultdict\nfrom typing import Dict, List\n\nimport httpx\nfrom dateutil import parser\n\nHEADERS = {\n \"Accept\": \"application/vnd.github.v3+json\",\n}\n\nif os.getenv(\"GITHUB_TOKEN\") is not None:\n HEADERS[\"Authorization\"] = f\"token {os.getenv('GITHUB_TOKEN')}\"\n\nOWNER = \"alan-turing-institute\"\nREPO = \"sktime\"\nGITHUB_REPOS = \"https://api.github.com/repos\"\n\n\ndef fetch_merged_pull_requests(page: int = 1) -> List[Dict]: # noqa\n \"Fetch a page of pull requests\"\n params = {\n \"state\": \"closed\",\n \"page\": page,\n \"per_page\": 50,\n \"sort\": \"updated\",\n \"direction\": \"desc\",\n }\n r = httpx.get(\n f\"{GITHUB_REPOS}/{OWNER}/{REPO}/pulls\",\n headers=HEADERS,\n params=params,\n )\n return [pr for pr in r.json() if pr[\"merged_at\"]]\n\n\ndef fetch_latest_release(): # noqa\n response = httpx.get(\n f\"{GITHUB_REPOS}/{OWNER}/{REPO}/releases/latest\", headers=HEADERS\n )\n\n if response.status_code == 200:\n return response.json()\n else:\n raise ValueError(response.text, response.status_code)\n\n\ndef fetch_pull_requests_since_last_release() -> List[Dict]: # noqa\n \"Fetch pull requests and filter based on merged date\"\n\n release = fetch_latest_release()\n published_at = parser.parse(release[\"published_at\"])\n print( # noqa\n f\"Latest release {release['tag_name']} was published at {published_at}\"\n )\n\n is_exhausted = False\n page = 1\n all_pulls = []\n while not is_exhausted:\n pulls = fetch_merged_pull_requests(page=page)\n all_pulls.extend(\n [p for p in pulls if parser.parse(p[\"merged_at\"]) > published_at]\n )\n is_exhausted = any(parser.parse(p[\"merged_at\"]) < published_at for p in pulls)\n page += 1\n return all_pulls\n\n\ndef github_compare_tags(tag_left: str, tag_right: str = \"HEAD\"): # noqa\n \"Compare commit between two tags\"\n response = httpx.get(\n f\"{GITHUB_REPOS}/{OWNER}/{REPO}/compare/{tag_left}...{tag_right}\"\n )\n if response.status_code == 200:\n return response.json()\n else:\n raise ValueError(response.text, response.status_code)\n\n\ndef render_contributors(prs: List, fmt: str = \"rst\"): # noqa\n \"Find unique authors and print a list in given format\"\n authors = sorted({pr[\"user\"][\"login\"] for pr in prs}, key=lambda x: x.lower())\n\n header = \"Contributors\"\n if fmt == \"github\":\n print(f\"### {header}\") # noqa\n print(\", \".join(f\"@{user}\" for user in authors)) # noqa\n elif fmt == \"rst\":\n print(header) # noqa\n print(\"~\" * len(header), end=\"\\n\\n\") # noqa\n print(\",\\n\".join(f\":user:`{user}`\" for user in authors)) # noqa\n\n\ndef assign_prs(prs, categs: List[Dict[str, List[str]]]): # noqa\n \"Assign PR to categories based on labels\"\n assigned = defaultdict(list)\n\n for i, pr in enumerate(prs):\n for cat in categs:\n pr_labels = [label[\"name\"] for label in pr[\"labels\"]]\n if not set(cat[\"labels\"]).isdisjoint(set(pr_labels)):\n assigned[cat[\"title\"]].append(i)\n\n # if any(l.startswith(\"module\") for l in pr_labels):\n # print(i, pr_labels)\n\n assigned[\"Other\"] = list(\n set(range(len(prs))) - {i for _, l in assigned.items() for i in l}\n )\n\n return assigned\n\n\ndef render_row(pr): # noqa\n \"Render a single row with PR in restructuredText format\"\n print( # noqa\n \"*\",\n pr[\"title\"].replace(\"`\", \"``\"),\n f\"(:pr:`{pr['number']}`)\",\n f\":user:`{pr['user']['login']}`\",\n )\n\n\ndef render_changelog(prs, assigned): # noqa\n # sourcery skip: use-named-expression\n \"Render changelog\"\n for title, _ in assigned.items():\n pr_group = [prs[i] for i in assigned[title]]\n if pr_group:\n print(f\"\\n{title}\") # noqa\n print(\"~\" * len(title), end=\"\\n\\n\") # noqa\n\n for pr in sorted(pr_group, key=lambda x: parser.parse(x[\"merged_at\"])):\n render_row(pr)\n\n\nif __name__ == \"__main__\":\n\n categories = [\n {\"title\": \"Enhancements\", \"labels\": [\"feature\", \"enhancement\"]},\n {\"title\": \"Fixes\", \"labels\": [\"bug\", \"fix\", \"bugfix\"]},\n {\"title\": \"Maintenance\", \"labels\": [\"maintenance\", \"chore\"]},\n {\"title\": \"Refactored\", \"labels\": [\"refactor\"]},\n {\"title\": \"Documentation\", \"labels\": [\"documentation\"]},\n ]\n\n pulls = fetch_pull_requests_since_last_release()\n print(f\"Found {len(pulls)} merged PRs since last release\") # noqa\n assigned = assign_prs(pulls, categories)\n render_changelog(pulls, assigned)\n print() # noqa\n render_contributors(pulls)\n\n release = fetch_latest_release()\n diff = github_compare_tags(release[\"tag_name\"])\n if diff[\"total_commits\"] != len(pulls):\n raise ValueError(\n \"Something went wrong and not all PR were fetched. \"\n f'There is {len(pulls)} PRs but {diff[\"total_commits\"]} in the diff'\n \"Please verify that all PRs are included in the changelog.\"\n ) # noqa\n", "path": "build_tools/changelog.py"}]}
2,385
224
gh_patches_debug_28894
rasdani/github-patches
git_diff
openfun__richie-1616
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Move course run enroll button to the contact block ## Feature Request **Is your feature request related to a problem or unsupported use case? Please describe.** Change the course detail template to be equal than the fun-mooc. On fun-mooc site it is given much more visibility to the current open for enrollment course run button, because it was customized to be on the contact django block. I've done exactly the same thing for nau.edu.pt. So I think we should make this the default case for Richie. **Describe the solution you'd like** I've copy paste the fun-mooc `course_detail.html` that change the blocks `contact` and `runs_open`. **Describe alternatives you've considered** Add same information bellow the enrollment count, but I think the fun-mooc's version is better. **Discovery, Documentation, Adoption, Migration Strategy** If we decide to go further this change, we need to document it on the UPGRADE.md, because the django blocks on the `course_detail.html` could be used to customize the UI. **Do you want to work on it through a Pull Request?** If we approve this ideia, I could send a PR. FUN-MOOC version: ![image](https://user-images.githubusercontent.com/67018/155501008-5a2adc4b-e47b-4ac9-a3af-8be63d307baf.png) NAU version: ![image](https://user-images.githubusercontent.com/67018/155501074-7871f97b-d92d-4a29-a100-28ef3310859c.png) Demo version: ![image](https://user-images.githubusercontent.com/67018/155501476-3774b11c-fa42-48ab-a645-42903ba55e00.png) </issue> <code> [start of src/richie/apps/courses/templatetags/extra_tags.py] 1 """Custom template tags for the courses application of Richie.""" 2 import json 3 4 from django import template 5 from django.core.exceptions import ObjectDoesNotExist 6 from django.template.defaultfilters import stringfilter 7 from django.template.loader import render_to_string 8 from django.utils import timezone 9 from django.utils.translation import get_language 10 from django.utils.translation import gettext as _ 11 from django.utils.translation import to_locale 12 13 import arrow 14 from classytags.arguments import Argument, MultiValueArgument 15 from classytags.core import Options, Tag 16 from classytags.utils import flatten_context 17 from cms.templatetags.cms_tags import ( 18 Placeholder, 19 PlaceholderOptions, 20 _get_page_by_untyped_arg, 21 ) 22 from cms.toolbar.utils import get_toolbar_from_request 23 from cms.utils import get_site_id 24 from cms.utils.plugins import get_plugins 25 26 from ..lms import LMSHandler 27 28 # pylint: disable=invalid-name 29 register = template.Library() 30 31 32 # pylint: disable=too-many-arguments 33 def get_plugins_render_tag( 34 context, name, varname, nodelist, page_lookup=None, edit=True 35 ): 36 """ 37 Retrieve the placeholder's plugins and set them as a variable in the template context. 38 If the placeholder is empty, render the block as fallback content and return the 39 resulting HTML. 40 If the placeholder is editable and rendered on its own page, the edit script and markup 41 are added to the HTML content. 42 """ 43 content = "" 44 request = context.get("request") 45 46 if request: 47 48 context[varname] = [] 49 page = _get_page_by_untyped_arg(page_lookup, request, get_site_id(None)) 50 51 if not page: 52 return "" 53 54 try: 55 placeholder = page.placeholders.get(slot=name) 56 except ObjectDoesNotExist: 57 return "" 58 else: 59 context[varname] = [ 60 cms_plugin.get_plugin_instance()[0] 61 for cms_plugin in get_plugins( 62 request, placeholder, template=page.get_template() 63 ) 64 ] 65 66 # Default content if there is no plugins in the placeholder 67 if not context[varname] and nodelist: 68 content = nodelist.render(context) 69 70 # Add the edit script and markup to the content, only if the placeholder is editable 71 if edit: 72 toolbar = get_toolbar_from_request(request) 73 if toolbar.edit_mode_active: 74 renderer = toolbar.get_content_renderer() 75 data = renderer.get_editable_placeholder_context(placeholder, page=page) 76 data["content"] = content 77 content = renderer.placeholder_edit_template.format(**data) 78 79 return content 80 81 82 @register.tag("placeholder_as_plugins") 83 class PlaceholderAsPlugins(Placeholder): 84 """ 85 Like DjangoCMS 'placeholder' but sets the list of linked plugins to a variable name 86 instead of rendering the placeholder. 87 """ 88 89 name = "placeholder_as_plugins" 90 options = PlaceholderOptions( 91 Argument("name", resolve=False), 92 "as", 93 Argument("varname", resolve=False), 94 MultiValueArgument("extra_bits", required=False, resolve=False), 95 blocks=[("endplaceholder_as_plugins", "nodelist")], 96 ) 97 98 # pylint: disable=arguments-renamed,too-many-arguments 99 def render_tag(self, context, name, varname, extra_bits, nodelist=None): 100 return get_plugins_render_tag(context, name, varname, nodelist) 101 102 103 @register.tag("get_placeholder_plugins") 104 class GetPlaceholderPlugins(Tag): 105 """ 106 A template tag that gets plugins from a page's placeholder and sets them as a context variable: 107 108 {% get_placeholder_plugins "logo" page_lookup as varname %} 109 {% get_placeholder_plugins "logo" page_lookup as varname or %} 110 <div>No content</div> 111 {% endget_placeholder_plugins %} 112 113 The page_lookup parameter can be omitted and will default to the current page 114 115 {% get_placeholder_plugins "logo" as varname %} 116 {% get_placeholder_plugins "logo" as varname or %} 117 <div>No content</div> 118 {% endget_placeholder_plugins %} 119 120 This tag can typically be used in association with the block_plugin tag, 121 to render the retrieved plugins: 122 123 {% get_placeholder_plugins "logo" page_lookup as plugins %} 124 {% blockplugin plugins.0 %} 125 <img src="{% thumbnail instance.picture 300x150 %}"/> 126 {% endblockplugin %} 127 128 Keyword arguments: 129 name: the name of the placeholder 130 page_lookup: lookup argument for Page. See `_get_page_by_untyped_arg()` 131 for detailed information on the allowed types and their interpretation for the 132 `page_lookup` argument. 133 varname: context variable name. Output will be added to template context as this variable 134 instead of being returned. 135 or: optional argument which if given will make the template tag a block 136 tag whose content is shown if the placeholder is empty 137 """ 138 139 name = "get_placeholder_plugins" 140 options = PlaceholderOptions( 141 Argument("name", resolve=False), 142 Argument("page_lookup", required=False, default=None), 143 "as", 144 Argument("varname", resolve=False), 145 MultiValueArgument("extra_bits", required=False, resolve=False), 146 blocks=[("endget_placeholder_plugins", "nodelist")], 147 ) 148 149 # pylint: disable=arguments-differ,too-many-arguments, unused-argument 150 def render_tag( 151 self, context, name, page_lookup, varname, extra_bits, nodelist=None 152 ): 153 return get_plugins_render_tag( 154 context, name, varname, nodelist, page_lookup, edit=False 155 ) 156 157 158 @register.tag() 159 class BlockPlugin(Tag): 160 """ 161 Like DjangoCMS 'render_plugin_block' but only includes the edit script and markup when 162 the related placeholder is editable. 163 164 This issue was raised to DjangoCMS and we need our own template tag until they find a way 165 to fix it in DjangoCMS (https://github.com/divio/django-cms/issues/6683). 166 """ 167 168 name = "blockplugin" 169 template = "cms/toolbar/plugin.html" 170 options = Options(Argument("plugin"), blocks=[("endblockplugin", "nodelist")]) 171 172 # pylint: disable=arguments-differ 173 def render_tag(self, context, plugin, nodelist): 174 """ 175 Renders the block for the plugin and returns the resulting HTML leaving the temmpate 176 context untouched. 177 If the placeholder is editable, the edit script and markup are added to the rendered HTML. 178 """ 179 request = context.get("request") 180 if not plugin or not request: 181 return "" 182 183 # Add the plugin and its rendered content to an internal context 184 internal_context = flatten_context(context) 185 internal_context["instance"] = plugin 186 internal_context["content"] = nodelist.render(context.new(internal_context)) 187 188 # Add the edit script and markup to the content, only if the placeholder is editable 189 # and the visited page is the one on which the plugin's placeholder is declared. 190 toolbar = get_toolbar_from_request(request) 191 if plugin.placeholder.page == request.current_page and toolbar.edit_mode_active: 192 return render_to_string(self.template, internal_context) 193 194 return internal_context["content"] 195 196 197 @register.filter() 198 def is_empty_placeholder(page, slot): 199 """A template filter to determine if a placeholder is empty. 200 201 This is useful when we don't want to include any wrapper markup in our template unless 202 the placeholder unless it actually contains plugins. 203 """ 204 placeholder = page.placeholders.get(slot=slot) 205 return not placeholder.cmsplugin_set.exists() 206 207 208 @register.filter() 209 def order_by(queryset, args): 210 """A template filter to force ordering on a queryset. 211 212 Taken from: https://djangosnippets.org/snippets/741/ 213 This is useful for DjangoCMS page querysets because we don't have access to the view. 214 """ 215 args = [x.strip() for x in args.split(",")] 216 return queryset.order_by(*args) 217 218 219 @register.filter() 220 def has_connected_lms(course_run): 221 """ 222 Determine if the passed course run has a connected LMS (as determined through out LMSHandler 223 and settings). 224 This enables our templates to either use the <CourseRunEnrollment /> component or a simple 225 link to the course run. 226 """ 227 return LMSHandler.select_lms(course_run.resource_link) is not None 228 229 230 @register.simple_tag(takes_context=True) 231 def course_enrollment_widget_props(context): 232 """ 233 Return a json dumps which contains all course_run's properties required by 234 CourseEnrollment React widget 235 """ 236 course_run = context["run"] 237 238 profile_urls = json.loads( 239 context.get("AUTHENTICATION", {}).get("profile_urls", "{}") 240 ) 241 dashboard_link = profile_urls.get("dashboard", {}).get("action") 242 243 starts_in_message = None 244 if course_run.start > timezone.now(): 245 course_start = arrow.get(course_run.start) 246 humanized_course_start = course_start.humanize( 247 arrow.now(), locale=to_locale(get_language()) 248 ) 249 # Translators: delay indicates when the course will start as a duration. 250 # In english the string will be "The course will start in 3 days" 251 starts_in_message = _("The course will start {delay:s}").format( 252 delay=humanized_course_start 253 ) 254 255 return json.dumps( 256 { 257 "courseRun": { 258 "id": course_run.id, 259 "resource_link": course_run.resource_link, 260 "priority": course_run.state["priority"], 261 "starts_in_message": starts_in_message, 262 "dashboard_link": dashboard_link, 263 } 264 } 265 ) 266 267 268 @register.filter 269 @stringfilter 270 def trim(value): 271 """ 272 Remove whitespaces before and after a string. 273 """ 274 return value.strip() 275 [end of src/richie/apps/courses/templatetags/extra_tags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/richie/apps/courses/templatetags/extra_tags.py b/src/richie/apps/courses/templatetags/extra_tags.py --- a/src/richie/apps/courses/templatetags/extra_tags.py +++ b/src/richie/apps/courses/templatetags/extra_tags.py @@ -5,7 +5,7 @@ from django.core.exceptions import ObjectDoesNotExist from django.template.defaultfilters import stringfilter from django.template.loader import render_to_string -from django.utils import timezone +from django.utils import timezone, translation from django.utils.translation import get_language from django.utils.translation import gettext as _ from django.utils.translation import to_locale @@ -227,6 +227,33 @@ return LMSHandler.select_lms(course_run.resource_link) is not None [email protected]() +def visible_on_course_page(course_runs, edit_mode_active=None): + """ + Determine if the passed course run should be visible on the course page, if on edit mode + show all the course runs. + """ + if edit_mode_active: + return course_runs + return list(filter(lambda run: run.is_visible_on_course_page, course_runs)) + + [email protected]() +def sort_runs_by_language_and_start_date(course_runs): + """ + Order course runs by: firstly runs that contains the language of the current user and only + after the runs that don't match the current user authenticated language. On both groups, they + should be sorted by course start date. + """ + current_language = translation.get_language() + return list( + sorted( + course_runs, + key=lambda run: (current_language not in run.languages, run.start), + ) + ) + + @register.simple_tag(takes_context=True) def course_enrollment_widget_props(context): """
{"golden_diff": "diff --git a/src/richie/apps/courses/templatetags/extra_tags.py b/src/richie/apps/courses/templatetags/extra_tags.py\n--- a/src/richie/apps/courses/templatetags/extra_tags.py\n+++ b/src/richie/apps/courses/templatetags/extra_tags.py\n@@ -5,7 +5,7 @@\n from django.core.exceptions import ObjectDoesNotExist\n from django.template.defaultfilters import stringfilter\n from django.template.loader import render_to_string\n-from django.utils import timezone\n+from django.utils import timezone, translation\n from django.utils.translation import get_language\n from django.utils.translation import gettext as _\n from django.utils.translation import to_locale\n@@ -227,6 +227,33 @@\n return LMSHandler.select_lms(course_run.resource_link) is not None\n \n \[email protected]()\n+def visible_on_course_page(course_runs, edit_mode_active=None):\n+ \"\"\"\n+ Determine if the passed course run should be visible on the course page, if on edit mode\n+ show all the course runs.\n+ \"\"\"\n+ if edit_mode_active:\n+ return course_runs\n+ return list(filter(lambda run: run.is_visible_on_course_page, course_runs))\n+\n+\[email protected]()\n+def sort_runs_by_language_and_start_date(course_runs):\n+ \"\"\"\n+ Order course runs by: firstly runs that contains the language of the current user and only\n+ after the runs that don't match the current user authenticated language. On both groups, they\n+ should be sorted by course start date.\n+ \"\"\"\n+ current_language = translation.get_language()\n+ return list(\n+ sorted(\n+ course_runs,\n+ key=lambda run: (current_language not in run.languages, run.start),\n+ )\n+ )\n+\n+\n @register.simple_tag(takes_context=True)\n def course_enrollment_widget_props(context):\n \"\"\"\n", "issue": "Move course run enroll button to the contact block\n## Feature Request\r\n\r\n**Is your feature request related to a problem or unsupported use case? Please describe.**\r\nChange the course detail template to be equal than the fun-mooc.\r\nOn fun-mooc site it is given much more visibility to the current open for enrollment course run button, because it was customized to be on the contact django block.\r\nI've done exactly the same thing for nau.edu.pt.\r\nSo I think we should make this the default case for Richie.\r\n\r\n**Describe the solution you'd like**\r\nI've copy paste the fun-mooc `course_detail.html` that change the blocks `contact` and `runs_open`.\r\n\r\n**Describe alternatives you've considered**\r\nAdd same information bellow the enrollment count, but I think the fun-mooc's version is better. \r\n\r\n**Discovery, Documentation, Adoption, Migration Strategy**\r\nIf we decide to go further this change, we need to document it on the UPGRADE.md, because the django blocks on the `course_detail.html` could be used to customize the UI.\r\n\r\n**Do you want to work on it through a Pull Request?**\r\nIf we approve this ideia, I could send a PR.\r\n\r\nFUN-MOOC version:\r\n![image](https://user-images.githubusercontent.com/67018/155501008-5a2adc4b-e47b-4ac9-a3af-8be63d307baf.png)\r\n\r\nNAU version:\r\n![image](https://user-images.githubusercontent.com/67018/155501074-7871f97b-d92d-4a29-a100-28ef3310859c.png)\r\n\r\nDemo version:\r\n![image](https://user-images.githubusercontent.com/67018/155501476-3774b11c-fa42-48ab-a645-42903ba55e00.png)\r\n\n", "before_files": [{"content": "\"\"\"Custom template tags for the courses application of Richie.\"\"\"\nimport json\n\nfrom django import template\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.template.defaultfilters import stringfilter\nfrom django.template.loader import render_to_string\nfrom django.utils import timezone\nfrom django.utils.translation import get_language\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import to_locale\n\nimport arrow\nfrom classytags.arguments import Argument, MultiValueArgument\nfrom classytags.core import Options, Tag\nfrom classytags.utils import flatten_context\nfrom cms.templatetags.cms_tags import (\n Placeholder,\n PlaceholderOptions,\n _get_page_by_untyped_arg,\n)\nfrom cms.toolbar.utils import get_toolbar_from_request\nfrom cms.utils import get_site_id\nfrom cms.utils.plugins import get_plugins\n\nfrom ..lms import LMSHandler\n\n# pylint: disable=invalid-name\nregister = template.Library()\n\n\n# pylint: disable=too-many-arguments\ndef get_plugins_render_tag(\n context, name, varname, nodelist, page_lookup=None, edit=True\n):\n \"\"\"\n Retrieve the placeholder's plugins and set them as a variable in the template context.\n If the placeholder is empty, render the block as fallback content and return the\n resulting HTML.\n If the placeholder is editable and rendered on its own page, the edit script and markup\n are added to the HTML content.\n \"\"\"\n content = \"\"\n request = context.get(\"request\")\n\n if request:\n\n context[varname] = []\n page = _get_page_by_untyped_arg(page_lookup, request, get_site_id(None))\n\n if not page:\n return \"\"\n\n try:\n placeholder = page.placeholders.get(slot=name)\n except ObjectDoesNotExist:\n return \"\"\n else:\n context[varname] = [\n cms_plugin.get_plugin_instance()[0]\n for cms_plugin in get_plugins(\n request, placeholder, template=page.get_template()\n )\n ]\n\n # Default content if there is no plugins in the placeholder\n if not context[varname] and nodelist:\n content = nodelist.render(context)\n\n # Add the edit script and markup to the content, only if the placeholder is editable\n if edit:\n toolbar = get_toolbar_from_request(request)\n if toolbar.edit_mode_active:\n renderer = toolbar.get_content_renderer()\n data = renderer.get_editable_placeholder_context(placeholder, page=page)\n data[\"content\"] = content\n content = renderer.placeholder_edit_template.format(**data)\n\n return content\n\n\[email protected](\"placeholder_as_plugins\")\nclass PlaceholderAsPlugins(Placeholder):\n \"\"\"\n Like DjangoCMS 'placeholder' but sets the list of linked plugins to a variable name\n instead of rendering the placeholder.\n \"\"\"\n\n name = \"placeholder_as_plugins\"\n options = PlaceholderOptions(\n Argument(\"name\", resolve=False),\n \"as\",\n Argument(\"varname\", resolve=False),\n MultiValueArgument(\"extra_bits\", required=False, resolve=False),\n blocks=[(\"endplaceholder_as_plugins\", \"nodelist\")],\n )\n\n # pylint: disable=arguments-renamed,too-many-arguments\n def render_tag(self, context, name, varname, extra_bits, nodelist=None):\n return get_plugins_render_tag(context, name, varname, nodelist)\n\n\[email protected](\"get_placeholder_plugins\")\nclass GetPlaceholderPlugins(Tag):\n \"\"\"\n A template tag that gets plugins from a page's placeholder and sets them as a context variable:\n\n {% get_placeholder_plugins \"logo\" page_lookup as varname %}\n {% get_placeholder_plugins \"logo\" page_lookup as varname or %}\n <div>No content</div>\n {% endget_placeholder_plugins %}\n\n The page_lookup parameter can be omitted and will default to the current page\n\n {% get_placeholder_plugins \"logo\" as varname %}\n {% get_placeholder_plugins \"logo\" as varname or %}\n <div>No content</div>\n {% endget_placeholder_plugins %}\n\n This tag can typically be used in association with the block_plugin tag,\n to render the retrieved plugins:\n\n {% get_placeholder_plugins \"logo\" page_lookup as plugins %}\n {% blockplugin plugins.0 %}\n <img src=\"{% thumbnail instance.picture 300x150 %}\"/>\n {% endblockplugin %}\n\n Keyword arguments:\n name: the name of the placeholder\n page_lookup: lookup argument for Page. See `_get_page_by_untyped_arg()`\n for detailed information on the allowed types and their interpretation for the\n `page_lookup` argument.\n varname: context variable name. Output will be added to template context as this variable\n instead of being returned.\n or: optional argument which if given will make the template tag a block\n tag whose content is shown if the placeholder is empty\n \"\"\"\n\n name = \"get_placeholder_plugins\"\n options = PlaceholderOptions(\n Argument(\"name\", resolve=False),\n Argument(\"page_lookup\", required=False, default=None),\n \"as\",\n Argument(\"varname\", resolve=False),\n MultiValueArgument(\"extra_bits\", required=False, resolve=False),\n blocks=[(\"endget_placeholder_plugins\", \"nodelist\")],\n )\n\n # pylint: disable=arguments-differ,too-many-arguments, unused-argument\n def render_tag(\n self, context, name, page_lookup, varname, extra_bits, nodelist=None\n ):\n return get_plugins_render_tag(\n context, name, varname, nodelist, page_lookup, edit=False\n )\n\n\[email protected]()\nclass BlockPlugin(Tag):\n \"\"\"\n Like DjangoCMS 'render_plugin_block' but only includes the edit script and markup when\n the related placeholder is editable.\n\n This issue was raised to DjangoCMS and we need our own template tag until they find a way\n to fix it in DjangoCMS (https://github.com/divio/django-cms/issues/6683).\n \"\"\"\n\n name = \"blockplugin\"\n template = \"cms/toolbar/plugin.html\"\n options = Options(Argument(\"plugin\"), blocks=[(\"endblockplugin\", \"nodelist\")])\n\n # pylint: disable=arguments-differ\n def render_tag(self, context, plugin, nodelist):\n \"\"\"\n Renders the block for the plugin and returns the resulting HTML leaving the temmpate\n context untouched.\n If the placeholder is editable, the edit script and markup are added to the rendered HTML.\n \"\"\"\n request = context.get(\"request\")\n if not plugin or not request:\n return \"\"\n\n # Add the plugin and its rendered content to an internal context\n internal_context = flatten_context(context)\n internal_context[\"instance\"] = plugin\n internal_context[\"content\"] = nodelist.render(context.new(internal_context))\n\n # Add the edit script and markup to the content, only if the placeholder is editable\n # and the visited page is the one on which the plugin's placeholder is declared.\n toolbar = get_toolbar_from_request(request)\n if plugin.placeholder.page == request.current_page and toolbar.edit_mode_active:\n return render_to_string(self.template, internal_context)\n\n return internal_context[\"content\"]\n\n\[email protected]()\ndef is_empty_placeholder(page, slot):\n \"\"\"A template filter to determine if a placeholder is empty.\n\n This is useful when we don't want to include any wrapper markup in our template unless\n the placeholder unless it actually contains plugins.\n \"\"\"\n placeholder = page.placeholders.get(slot=slot)\n return not placeholder.cmsplugin_set.exists()\n\n\[email protected]()\ndef order_by(queryset, args):\n \"\"\"A template filter to force ordering on a queryset.\n\n Taken from: https://djangosnippets.org/snippets/741/\n This is useful for DjangoCMS page querysets because we don't have access to the view.\n \"\"\"\n args = [x.strip() for x in args.split(\",\")]\n return queryset.order_by(*args)\n\n\[email protected]()\ndef has_connected_lms(course_run):\n \"\"\"\n Determine if the passed course run has a connected LMS (as determined through out LMSHandler\n and settings).\n This enables our templates to either use the <CourseRunEnrollment /> component or a simple\n link to the course run.\n \"\"\"\n return LMSHandler.select_lms(course_run.resource_link) is not None\n\n\[email protected]_tag(takes_context=True)\ndef course_enrollment_widget_props(context):\n \"\"\"\n Return a json dumps which contains all course_run's properties required by\n CourseEnrollment React widget\n \"\"\"\n course_run = context[\"run\"]\n\n profile_urls = json.loads(\n context.get(\"AUTHENTICATION\", {}).get(\"profile_urls\", \"{}\")\n )\n dashboard_link = profile_urls.get(\"dashboard\", {}).get(\"action\")\n\n starts_in_message = None\n if course_run.start > timezone.now():\n course_start = arrow.get(course_run.start)\n humanized_course_start = course_start.humanize(\n arrow.now(), locale=to_locale(get_language())\n )\n # Translators: delay indicates when the course will start as a duration.\n # In english the string will be \"The course will start in 3 days\"\n starts_in_message = _(\"The course will start {delay:s}\").format(\n delay=humanized_course_start\n )\n\n return json.dumps(\n {\n \"courseRun\": {\n \"id\": course_run.id,\n \"resource_link\": course_run.resource_link,\n \"priority\": course_run.state[\"priority\"],\n \"starts_in_message\": starts_in_message,\n \"dashboard_link\": dashboard_link,\n }\n }\n )\n\n\[email protected]\n@stringfilter\ndef trim(value):\n \"\"\"\n Remove whitespaces before and after a string.\n \"\"\"\n return value.strip()\n", "path": "src/richie/apps/courses/templatetags/extra_tags.py"}]}
3,789
413
gh_patches_debug_31740
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2808
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider arby is broken During the global build at 2021-05-26-14-42-23, spider **arby** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/arby.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/arby.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/arby.geojson)) </issue> <code> [start of locations/spiders/arbys.py] 1 # -*- coding: utf-8 -*- 2 import json 3 import re 4 import scrapy 5 from locations.items import GeojsonPointItem 6 7 8 class ArbysSpider(scrapy.Spider): 9 10 name = "arby" 11 item_attributes = { 'brand': "Arby's", 'brand_wikidata': "Q630866" } 12 allowed_domains = ["locations.arbys.com"] 13 download_delay = 0.2 14 start_urls = ( 15 'https://locations.arbys.com/browse/', 16 ) 17 18 def get_store_info(self, response): 19 data = response.xpath('//script[@type="application/ld+json"]/text()').extract_first() 20 if data: 21 data = json.loads(data)[0] 22 23 properties = { 24 # store name is after the pipe, e.g. Fast Food Drive-Thru 25 # Restaurants | Arby's 8437 26 'ref': data["name"].rsplit("|", 1)[-1].strip(), 27 'name': data["name"], 28 'addr_full': data["address"]["streetAddress"].strip(), 29 'city': data["address"]["addressLocality"].strip(), 30 'state': data["address"]["addressRegion"], 31 'postcode': data["address"]["postalCode"], 32 'phone': data.get("telephone", None), 33 'lat': float(data["geo"]["latitude"]), 34 'lon': float(data["geo"]["longitude"]), 35 'website': response.url, 36 'opening_hours': data["openingHours"], 37 } 38 39 yield GeojsonPointItem(**properties) 40 41 def parse_store(self, response): 42 city_stores = response.xpath('//a[@class="location-name ga-link"]/@href').extract() 43 for city_store in city_stores: 44 yield scrapy.Request( 45 response.urljoin(city_store), 46 callback=self.get_store_info 47 ) 48 49 def parse_state(self, response): 50 51 cities = response.xpath('//a[@class="ga-link"]/@href').extract() 52 for city in cities: 53 yield scrapy.Request( 54 response.urljoin(city), 55 callback=self.parse_store 56 ) 57 58 def parse(self, response): 59 states = response.xpath('//a[@class="ga-link"]/@href').extract() 60 61 for state in states: 62 yield scrapy.Request( 63 response.urljoin(state), 64 callback=self.parse_state 65 ) 66 [end of locations/spiders/arbys.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/arbys.py b/locations/spiders/arbys.py --- a/locations/spiders/arbys.py +++ b/locations/spiders/arbys.py @@ -12,24 +12,31 @@ allowed_domains = ["locations.arbys.com"] download_delay = 0.2 start_urls = ( - 'https://locations.arbys.com/browse/', + 'https://locations.arbys.com/', ) def get_store_info(self, response): data = response.xpath('//script[@type="application/ld+json"]/text()').extract_first() if data: - data = json.loads(data)[0] + try: + data = json.loads(data)[0] + except json.JSONDecodeError: + # Unescaped " on two pages + lines = data.split('\n') + i = 2 + next(i for (i, line) in enumerate(lines) if "mainContentOfPage" in line) + lines[i] = '"text": ""}' + data = '\n'.join(lines) + data = json.loads(data)[0] + properties = { - # store name is after the pipe, e.g. Fast Food Drive-Thru - # Restaurants | Arby's 8437 - 'ref': data["name"].rsplit("|", 1)[-1].strip(), + 'ref': response.css('div.store-id::text').get().split(': ')[-1], 'name': data["name"], 'addr_full': data["address"]["streetAddress"].strip(), 'city': data["address"]["addressLocality"].strip(), 'state': data["address"]["addressRegion"], 'postcode': data["address"]["postalCode"], - 'phone': data.get("telephone", None), + 'phone': data["address"]["telephone"], 'lat': float(data["geo"]["latitude"]), 'lon': float(data["geo"]["longitude"]), 'website': response.url,
{"golden_diff": "diff --git a/locations/spiders/arbys.py b/locations/spiders/arbys.py\n--- a/locations/spiders/arbys.py\n+++ b/locations/spiders/arbys.py\n@@ -12,24 +12,31 @@\n allowed_domains = [\"locations.arbys.com\"]\n download_delay = 0.2\n start_urls = (\n- 'https://locations.arbys.com/browse/',\n+ 'https://locations.arbys.com/',\n )\n \n def get_store_info(self, response):\n data = response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()\n if data:\n- data = json.loads(data)[0]\n+ try:\n+ data = json.loads(data)[0]\n+ except json.JSONDecodeError:\n+ # Unescaped \" on two pages\n+ lines = data.split('\\n')\n+ i = 2 + next(i for (i, line) in enumerate(lines) if \"mainContentOfPage\" in line)\n+ lines[i] = '\"text\": \"\"}'\n+ data = '\\n'.join(lines)\n+ data = json.loads(data)[0]\n+\n \n properties = {\n- # store name is after the pipe, e.g. Fast Food Drive-Thru\n- # Restaurants | Arby's 8437\n- 'ref': data[\"name\"].rsplit(\"|\", 1)[-1].strip(),\n+ 'ref': response.css('div.store-id::text').get().split(': ')[-1],\n 'name': data[\"name\"],\n 'addr_full': data[\"address\"][\"streetAddress\"].strip(),\n 'city': data[\"address\"][\"addressLocality\"].strip(),\n 'state': data[\"address\"][\"addressRegion\"],\n 'postcode': data[\"address\"][\"postalCode\"],\n- 'phone': data.get(\"telephone\", None),\n+ 'phone': data[\"address\"][\"telephone\"],\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n 'website': response.url,\n", "issue": "Spider arby is broken\nDuring the global build at 2021-05-26-14-42-23, spider **arby** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/arby.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/arby.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/arby.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\n\n\nclass ArbysSpider(scrapy.Spider):\n\n name = \"arby\"\n item_attributes = { 'brand': \"Arby's\", 'brand_wikidata': \"Q630866\" }\n allowed_domains = [\"locations.arbys.com\"]\n download_delay = 0.2\n start_urls = (\n 'https://locations.arbys.com/browse/',\n )\n\n def get_store_info(self, response):\n data = response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()\n if data:\n data = json.loads(data)[0]\n\n properties = {\n # store name is after the pipe, e.g. Fast Food Drive-Thru\n # Restaurants | Arby's 8437\n 'ref': data[\"name\"].rsplit(\"|\", 1)[-1].strip(),\n 'name': data[\"name\"],\n 'addr_full': data[\"address\"][\"streetAddress\"].strip(),\n 'city': data[\"address\"][\"addressLocality\"].strip(),\n 'state': data[\"address\"][\"addressRegion\"],\n 'postcode': data[\"address\"][\"postalCode\"],\n 'phone': data.get(\"telephone\", None),\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n 'website': response.url,\n 'opening_hours': data[\"openingHours\"],\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_store(self, response):\n city_stores = response.xpath('//a[@class=\"location-name ga-link\"]/@href').extract()\n for city_store in city_stores:\n yield scrapy.Request(\n response.urljoin(city_store),\n callback=self.get_store_info\n )\n\n def parse_state(self, response):\n\n cities = response.xpath('//a[@class=\"ga-link\"]/@href').extract()\n for city in cities:\n yield scrapy.Request(\n response.urljoin(city),\n callback=self.parse_store\n )\n\n def parse(self, response):\n states = response.xpath('//a[@class=\"ga-link\"]/@href').extract()\n\n for state in states:\n yield scrapy.Request(\n response.urljoin(state),\n callback=self.parse_state\n )\n", "path": "locations/spiders/arbys.py"}]}
1,340
446
gh_patches_debug_33993
rasdani/github-patches
git_diff
mozilla__kitsune-3175
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve performance of _get_creator_counts util function `kitsune.community.utils._get_creator_counts` until function is DB heavy and takes a lot of time to execute. Evaluate its usefulness and provide a way to optimize the query and/or cache the results. This issue is related to the degraded performance SUMO experienced on Fri March 30th ([NR Error](https://rpm.newrelic.com/accounts/1299394/applications/45097089/downtime/34422892)) </issue> <code> [start of kitsune/community/utils.py] 1 import hashlib 2 3 from datetime import datetime, date, timedelta 4 from django.conf import settings 5 from django.core.cache import cache 6 from django.db.models import Count, F 7 8 from kitsune.products.models import Product 9 from kitsune.questions.models import Answer 10 from kitsune.users.models import User 11 from kitsune.users.templatetags.jinja_helpers import profile_avatar 12 from kitsune.wiki.models import Revision 13 14 15 def top_contributors_questions(start=None, end=None, locale=None, product=None, 16 count=10, page=1, use_cache=True): 17 """Get the top Support Forum contributors.""" 18 if use_cache: 19 cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page) 20 cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest() 21 cache_key = 'top_contributors_questions_{}'.format(cache_key) 22 cached = cache.get(cache_key, None) 23 if cached: 24 return cached 25 26 answers = (Answer.objects 27 .exclude(is_spam=True) 28 .exclude(question__is_spam=True) 29 # Adding answer to your own question, isn't a contribution. 30 .exclude(creator_id=F('question__creator_id'))) 31 32 if start is None: 33 # By default we go back 90 days. 34 start = date.today() - timedelta(days=90) 35 answers = answers.filter(created__gte=start) 36 if end: 37 # If no end is specified, we don't need to filter by it. 38 answers = answers.filter(created__lt=end) 39 if locale: 40 answers = answers.filter(question__locale=locale) 41 if product: 42 if isinstance(product, Product): 43 product = product.slug 44 answers = answers.filter(question__product__slug=product) 45 46 users = (User.objects 47 .filter(answers__in=answers) 48 .annotate(query_count=Count('answers')) 49 .order_by('-query_count')) 50 counts = _get_creator_counts(users, count, page) 51 52 if use_cache: 53 cache.set(cache_key, counts, 60*15) # 15 minutes 54 return counts 55 56 57 def top_contributors_kb(start=None, end=None, product=None, count=10, page=1, use_cache=True): 58 """Get the top KB editors (locale='en-US').""" 59 return top_contributors_l10n( 60 start, end, settings.WIKI_DEFAULT_LANGUAGE, product, count, use_cache) 61 62 63 def top_contributors_l10n(start=None, end=None, locale=None, product=None, 64 count=10, page=1, use_cache=True): 65 """Get the top l10n contributors for the KB.""" 66 if use_cache: 67 cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page) 68 cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest() 69 cache_key = u'top_contributors_l10n_{}'.format(cache_key) 70 cached = cache.get(cache_key, None) 71 if cached: 72 return cached 73 74 # Get the user ids and contribution count of the top contributors. 75 revisions = Revision.objects.all() 76 if locale is None: 77 # If there is no locale specified, exclude en-US only. The rest are 78 # l10n. 79 revisions = revisions.exclude(document__locale=settings.WIKI_DEFAULT_LANGUAGE) 80 if start is None: 81 # By default we go back 90 days. 82 start = date.today() - timedelta(days=90) 83 revisions = revisions.filter(created__gte=start) 84 if end: 85 # If no end is specified, we don't need to filter by it. 86 revisions = revisions.filter(created__lt=end) 87 if locale: 88 revisions = revisions.filter(document__locale=locale) 89 if product: 90 if isinstance(product, Product): 91 product = product.slug 92 revisions = revisions.filter(document__products__slug=product) 93 94 users = (User.objects 95 .filter(created_revisions__in=revisions) 96 .annotate(query_count=Count('created_revisions')) 97 .order_by('-query_count')) 98 counts = _get_creator_counts(users, count, page) 99 100 if use_cache: 101 cache.set(cache_key, counts, 60*15) # 15 minutes 102 return counts 103 104 105 def top_contributors_aoa(start=None, end=None, locale=None, count=10, page=1, use_cache=True): 106 """Get the top Army of Awesome contributors.""" 107 # AoA is deprecated, return 0 until we remove all related code. 108 return ([], 0) 109 110 111 def _get_creator_counts(query, count, page): 112 total = query.count() 113 results = [] 114 now = datetime.now() 115 for user in query[((page - 1) * count):(page * count)]: 116 last_contribution_date = user.profile.last_contribution_date 117 days_since_last_activity = None 118 if last_contribution_date: 119 days_since_last_activity = now - last_contribution_date 120 121 data = { 122 'count': user.query_count, 123 'term': user.id, 124 'user': { 125 'id': user.id, 126 'username': user.username, 127 'display_name': user.profile.display_name, 128 'avatar': profile_avatar(user, size=120), 129 'twitter_usernames': user.profile.twitter_usernames, 130 'last_contribution_date': last_contribution_date, 131 'days_since_last_activity': days_since_last_activity, 132 } 133 } 134 results.append(data) 135 136 return (results, total) 137 [end of kitsune/community/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kitsune/community/utils.py b/kitsune/community/utils.py --- a/kitsune/community/utils.py +++ b/kitsune/community/utils.py @@ -7,8 +7,7 @@ from kitsune.products.models import Product from kitsune.questions.models import Answer -from kitsune.users.models import User -from kitsune.users.templatetags.jinja_helpers import profile_avatar +from kitsune.users.models import User, UserMappingType from kitsune.wiki.models import Revision @@ -110,27 +109,36 @@ def _get_creator_counts(query, count, page): total = query.count() + + start = (page - 1) * count + end = page * count + query_data = query.values('id', 'query_count')[start:end] + + query_data = {obj['id']: obj['query_count'] for obj in query_data} + + users_data = (UserMappingType.search().filter(id__in=query_data.keys()) + .values_dict('id', 'username', 'display_name', + 'avatar', 'twitter_usernames', + 'last_contribution_date')[:count]) + + users_data = UserMappingType.reshape(users_data) + results = [] now = datetime.now() - for user in query[((page - 1) * count):(page * count)]: - last_contribution_date = user.profile.last_contribution_date - days_since_last_activity = None - if last_contribution_date: - days_since_last_activity = now - last_contribution_date + + for u_data in users_data: + user_id = u_data.get('id') + last_contribution_date = u_data.get('last_contribution_date', None) + + u_data['days_since_last_activity'] = ((now - last_contribution_date).days + if last_contribution_date else None) data = { - 'count': user.query_count, - 'term': user.id, - 'user': { - 'id': user.id, - 'username': user.username, - 'display_name': user.profile.display_name, - 'avatar': profile_avatar(user, size=120), - 'twitter_usernames': user.profile.twitter_usernames, - 'last_contribution_date': last_contribution_date, - 'days_since_last_activity': days_since_last_activity, - } + 'count': query_data.get(user_id), + 'term': user_id, + 'user': u_data } + results.append(data) - return (results, total) + return results, total
{"golden_diff": "diff --git a/kitsune/community/utils.py b/kitsune/community/utils.py\n--- a/kitsune/community/utils.py\n+++ b/kitsune/community/utils.py\n@@ -7,8 +7,7 @@\n \n from kitsune.products.models import Product\n from kitsune.questions.models import Answer\n-from kitsune.users.models import User\n-from kitsune.users.templatetags.jinja_helpers import profile_avatar\n+from kitsune.users.models import User, UserMappingType\n from kitsune.wiki.models import Revision\n \n \n@@ -110,27 +109,36 @@\n \n def _get_creator_counts(query, count, page):\n total = query.count()\n+\n+ start = (page - 1) * count\n+ end = page * count\n+ query_data = query.values('id', 'query_count')[start:end]\n+\n+ query_data = {obj['id']: obj['query_count'] for obj in query_data}\n+\n+ users_data = (UserMappingType.search().filter(id__in=query_data.keys())\n+ .values_dict('id', 'username', 'display_name',\n+ 'avatar', 'twitter_usernames',\n+ 'last_contribution_date')[:count])\n+\n+ users_data = UserMappingType.reshape(users_data)\n+\n results = []\n now = datetime.now()\n- for user in query[((page - 1) * count):(page * count)]:\n- last_contribution_date = user.profile.last_contribution_date\n- days_since_last_activity = None\n- if last_contribution_date:\n- days_since_last_activity = now - last_contribution_date\n+\n+ for u_data in users_data:\n+ user_id = u_data.get('id')\n+ last_contribution_date = u_data.get('last_contribution_date', None)\n+\n+ u_data['days_since_last_activity'] = ((now - last_contribution_date).days\n+ if last_contribution_date else None)\n \n data = {\n- 'count': user.query_count,\n- 'term': user.id,\n- 'user': {\n- 'id': user.id,\n- 'username': user.username,\n- 'display_name': user.profile.display_name,\n- 'avatar': profile_avatar(user, size=120),\n- 'twitter_usernames': user.profile.twitter_usernames,\n- 'last_contribution_date': last_contribution_date,\n- 'days_since_last_activity': days_since_last_activity,\n- }\n+ 'count': query_data.get(user_id),\n+ 'term': user_id,\n+ 'user': u_data\n }\n+\n results.append(data)\n \n- return (results, total)\n+ return results, total\n", "issue": "Improve performance of _get_creator_counts util function\n`kitsune.community.utils._get_creator_counts` until function is DB heavy and takes a lot of time to execute. Evaluate its usefulness and provide a way to optimize the query and/or cache the results. \r\n\r\nThis issue is related to the degraded performance SUMO experienced on Fri March 30th ([NR Error](https://rpm.newrelic.com/accounts/1299394/applications/45097089/downtime/34422892))\n", "before_files": [{"content": "import hashlib\n\nfrom datetime import datetime, date, timedelta\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.db.models import Count, F\n\nfrom kitsune.products.models import Product\nfrom kitsune.questions.models import Answer\nfrom kitsune.users.models import User\nfrom kitsune.users.templatetags.jinja_helpers import profile_avatar\nfrom kitsune.wiki.models import Revision\n\n\ndef top_contributors_questions(start=None, end=None, locale=None, product=None,\n count=10, page=1, use_cache=True):\n \"\"\"Get the top Support Forum contributors.\"\"\"\n if use_cache:\n cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page)\n cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest()\n cache_key = 'top_contributors_questions_{}'.format(cache_key)\n cached = cache.get(cache_key, None)\n if cached:\n return cached\n\n answers = (Answer.objects\n .exclude(is_spam=True)\n .exclude(question__is_spam=True)\n # Adding answer to your own question, isn't a contribution.\n .exclude(creator_id=F('question__creator_id')))\n\n if start is None:\n # By default we go back 90 days.\n start = date.today() - timedelta(days=90)\n answers = answers.filter(created__gte=start)\n if end:\n # If no end is specified, we don't need to filter by it.\n answers = answers.filter(created__lt=end)\n if locale:\n answers = answers.filter(question__locale=locale)\n if product:\n if isinstance(product, Product):\n product = product.slug\n answers = answers.filter(question__product__slug=product)\n\n users = (User.objects\n .filter(answers__in=answers)\n .annotate(query_count=Count('answers'))\n .order_by('-query_count'))\n counts = _get_creator_counts(users, count, page)\n\n if use_cache:\n cache.set(cache_key, counts, 60*15) # 15 minutes\n return counts\n\n\ndef top_contributors_kb(start=None, end=None, product=None, count=10, page=1, use_cache=True):\n \"\"\"Get the top KB editors (locale='en-US').\"\"\"\n return top_contributors_l10n(\n start, end, settings.WIKI_DEFAULT_LANGUAGE, product, count, use_cache)\n\n\ndef top_contributors_l10n(start=None, end=None, locale=None, product=None,\n count=10, page=1, use_cache=True):\n \"\"\"Get the top l10n contributors for the KB.\"\"\"\n if use_cache:\n cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page)\n cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest()\n cache_key = u'top_contributors_l10n_{}'.format(cache_key)\n cached = cache.get(cache_key, None)\n if cached:\n return cached\n\n # Get the user ids and contribution count of the top contributors.\n revisions = Revision.objects.all()\n if locale is None:\n # If there is no locale specified, exclude en-US only. The rest are\n # l10n.\n revisions = revisions.exclude(document__locale=settings.WIKI_DEFAULT_LANGUAGE)\n if start is None:\n # By default we go back 90 days.\n start = date.today() - timedelta(days=90)\n revisions = revisions.filter(created__gte=start)\n if end:\n # If no end is specified, we don't need to filter by it.\n revisions = revisions.filter(created__lt=end)\n if locale:\n revisions = revisions.filter(document__locale=locale)\n if product:\n if isinstance(product, Product):\n product = product.slug\n revisions = revisions.filter(document__products__slug=product)\n\n users = (User.objects\n .filter(created_revisions__in=revisions)\n .annotate(query_count=Count('created_revisions'))\n .order_by('-query_count'))\n counts = _get_creator_counts(users, count, page)\n\n if use_cache:\n cache.set(cache_key, counts, 60*15) # 15 minutes\n return counts\n\n\ndef top_contributors_aoa(start=None, end=None, locale=None, count=10, page=1, use_cache=True):\n \"\"\"Get the top Army of Awesome contributors.\"\"\"\n # AoA is deprecated, return 0 until we remove all related code.\n return ([], 0)\n\n\ndef _get_creator_counts(query, count, page):\n total = query.count()\n results = []\n now = datetime.now()\n for user in query[((page - 1) * count):(page * count)]:\n last_contribution_date = user.profile.last_contribution_date\n days_since_last_activity = None\n if last_contribution_date:\n days_since_last_activity = now - last_contribution_date\n\n data = {\n 'count': user.query_count,\n 'term': user.id,\n 'user': {\n 'id': user.id,\n 'username': user.username,\n 'display_name': user.profile.display_name,\n 'avatar': profile_avatar(user, size=120),\n 'twitter_usernames': user.profile.twitter_usernames,\n 'last_contribution_date': last_contribution_date,\n 'days_since_last_activity': days_since_last_activity,\n }\n }\n results.append(data)\n\n return (results, total)\n", "path": "kitsune/community/utils.py"}]}
2,157
584
gh_patches_debug_55589
rasdani/github-patches
git_diff
wagtail__wagtail-8800
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> patternlibrary_override_tags breaks all non-development installations #8665 added a `patternlibrary_override_tags` tag library to wagtail.admin, which depends on the django-pattern-library package listed in our testing_extras dependencies. However, this tag library will be loaded on all environments that have `wagtail.admin` in their INSTALLED_APPS, so any Wagtail installation that doesn't include testing_extras is currently broken. As a non-frontend dev, I don't know what this tag library is for, and there's no documentation for me to find out. Deleting it and uninstalling django-pattern-library doesn't break any tests, so it seems to me that it doesn't really belong in the testing dependencies. (If it really is a testing dependency, I'd suggest that moving patternlibrary_override_tags.py into one of the test apps in wagtail/test/ would let it do whatever it's meant to do without breaking non-testing environments.) </issue> <code> [start of wagtail/admin/templatetags/patternlibrary_override_tags.py] 1 from pattern_library.monkey_utils import override_tag 2 3 from wagtail.admin.templatetags.wagtailadmin_tags import register 4 5 override_tag(register, name="test_page_is_public") 6 [end of wagtail/admin/templatetags/patternlibrary_override_tags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/admin/templatetags/patternlibrary_override_tags.py b/wagtail/admin/templatetags/patternlibrary_override_tags.py deleted file mode 100644 --- a/wagtail/admin/templatetags/patternlibrary_override_tags.py +++ /dev/null @@ -1,5 +0,0 @@ -from pattern_library.monkey_utils import override_tag - -from wagtail.admin.templatetags.wagtailadmin_tags import register - -override_tag(register, name="test_page_is_public")
{"golden_diff": "diff --git a/wagtail/admin/templatetags/patternlibrary_override_tags.py b/wagtail/admin/templatetags/patternlibrary_override_tags.py\ndeleted file mode 100644\n--- a/wagtail/admin/templatetags/patternlibrary_override_tags.py\n+++ /dev/null\n@@ -1,5 +0,0 @@\n-from pattern_library.monkey_utils import override_tag\n-\n-from wagtail.admin.templatetags.wagtailadmin_tags import register\n-\n-override_tag(register, name=\"test_page_is_public\")\n", "issue": "patternlibrary_override_tags breaks all non-development installations\n#8665 added a `patternlibrary_override_tags` tag library to wagtail.admin, which depends on the django-pattern-library package listed in our testing_extras dependencies. However, this tag library will be loaded on all environments that have `wagtail.admin` in their INSTALLED_APPS, so any Wagtail installation that doesn't include testing_extras is currently broken.\r\n\r\nAs a non-frontend dev, I don't know what this tag library is for, and there's no documentation for me to find out. Deleting it and uninstalling django-pattern-library doesn't break any tests, so it seems to me that it doesn't really belong in the testing dependencies. (If it really is a testing dependency, I'd suggest that moving patternlibrary_override_tags.py into one of the test apps in wagtail/test/ would let it do whatever it's meant to do without breaking non-testing environments.)\n", "before_files": [{"content": "from pattern_library.monkey_utils import override_tag\n\nfrom wagtail.admin.templatetags.wagtailadmin_tags import register\n\noverride_tag(register, name=\"test_page_is_public\")\n", "path": "wagtail/admin/templatetags/patternlibrary_override_tags.py"}]}
791
121
gh_patches_debug_17682
rasdani/github-patches
git_diff
DataDog__dd-trace-py-3868
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> mypy invalid syntax error with python 2 ### Which version of dd-trace-py are you using? v1.2.0 ### Which version of pip are you using? 22.1.2 ### Which version of the libraries are you using? ``` ddtrace==1.2.0 mypy[python2]==0.961 ``` ### How can we reproduce your problem? 1. Clone https://gist.github.com/majorgreys/720201c4bdd33131e4c231207ef716ce 2. Run `docker run --rm -it (docker build .)` ### What is the result that you get? ``` usr/local/lib/python3.9/site-packages/ddtrace/contrib/redis/asyncio_patch.py:12: error: invalid syntax Found 1 error in 1 file (errors prevented further checking) ``` ### What is the result that you expected? ``` Success: no issues found in 1 source file ``` </issue> <code> [start of ddtrace/contrib/redis/patch.py] 1 import redis 2 3 from ddtrace import config 4 from ddtrace.vendor import wrapt 5 6 from ...internal.utils.formats import stringify_cache_args 7 from ...pin import Pin 8 from ..trace_utils import unwrap 9 from .util import _trace_redis_cmd 10 from .util import _trace_redis_execute_pipeline 11 12 13 config._add("redis", dict(_default_service="redis")) 14 15 16 def patch(): 17 """Patch the instrumented methods 18 19 This duplicated doesn't look nice. The nicer alternative is to use an ObjectProxy on top 20 of Redis and StrictRedis. However, it means that any "import redis.Redis" won't be instrumented. 21 """ 22 if getattr(redis, "_datadog_patch", False): 23 return 24 setattr(redis, "_datadog_patch", True) 25 26 _w = wrapt.wrap_function_wrapper 27 28 if redis.VERSION < (3, 0, 0): 29 _w("redis", "StrictRedis.execute_command", traced_execute_command) 30 _w("redis", "StrictRedis.pipeline", traced_pipeline) 31 _w("redis", "Redis.pipeline", traced_pipeline) 32 _w("redis.client", "BasePipeline.execute", traced_execute_pipeline) 33 _w("redis.client", "BasePipeline.immediate_execute_command", traced_execute_command) 34 else: 35 _w("redis", "Redis.execute_command", traced_execute_command) 36 _w("redis", "Redis.pipeline", traced_pipeline) 37 _w("redis.client", "Pipeline.execute", traced_execute_pipeline) 38 _w("redis.client", "Pipeline.immediate_execute_command", traced_execute_command) 39 if redis.VERSION >= (4, 2, 0): 40 from .asyncio_patch import traced_async_execute_command 41 from .asyncio_patch import traced_async_execute_pipeline 42 43 _w("redis.asyncio.client", "Redis.execute_command", traced_async_execute_command) 44 _w("redis.asyncio.client", "Redis.pipeline", traced_pipeline) 45 _w("redis.asyncio.client", "Pipeline.execute", traced_async_execute_pipeline) 46 _w("redis.asyncio.client", "Pipeline.immediate_execute_command", traced_async_execute_command) 47 Pin(service=None).onto(redis.asyncio.Redis) 48 Pin(service=None).onto(redis.StrictRedis) 49 50 51 def unpatch(): 52 if getattr(redis, "_datadog_patch", False): 53 setattr(redis, "_datadog_patch", False) 54 55 if redis.VERSION < (3, 0, 0): 56 unwrap(redis.StrictRedis, "execute_command") 57 unwrap(redis.StrictRedis, "pipeline") 58 unwrap(redis.Redis, "pipeline") 59 unwrap(redis.client.BasePipeline, "execute") 60 unwrap(redis.client.BasePipeline, "immediate_execute_command") 61 else: 62 unwrap(redis.Redis, "execute_command") 63 unwrap(redis.Redis, "pipeline") 64 unwrap(redis.client.Pipeline, "execute") 65 unwrap(redis.client.Pipeline, "immediate_execute_command") 66 if redis.VERSION >= (4, 2, 0): 67 unwrap(redis.asyncio.client.Redis, "execute_command") 68 unwrap(redis.asyncio.client.Redis, "pipeline") 69 unwrap(redis.asyncio.client.Pipeline, "execute") 70 unwrap(redis.asyncio.client.Pipeline, "immediate_execute_command") 71 72 73 # 74 # tracing functions 75 # 76 def traced_execute_command(func, instance, args, kwargs): 77 pin = Pin.get_from(instance) 78 if not pin or not pin.enabled(): 79 return func(*args, **kwargs) 80 81 with _trace_redis_cmd(pin, config.redis, instance, args): 82 return func(*args, **kwargs) 83 84 85 def traced_pipeline(func, instance, args, kwargs): 86 pipeline = func(*args, **kwargs) 87 pin = Pin.get_from(instance) 88 if pin: 89 pin.onto(pipeline) 90 return pipeline 91 92 93 def traced_execute_pipeline(func, instance, args, kwargs): 94 pin = Pin.get_from(instance) 95 if not pin or not pin.enabled(): 96 return func(*args, **kwargs) 97 98 cmds = [stringify_cache_args(c) for c, _ in instance.command_stack] 99 resource = "\n".join(cmds) 100 with _trace_redis_execute_pipeline(pin, config.redis, resource, instance): 101 return func(*args, **kwargs) 102 [end of ddtrace/contrib/redis/patch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ddtrace/contrib/redis/patch.py b/ddtrace/contrib/redis/patch.py --- a/ddtrace/contrib/redis/patch.py +++ b/ddtrace/contrib/redis/patch.py @@ -1,4 +1,5 @@ import redis +from six import PY3 from ddtrace import config from ddtrace.vendor import wrapt @@ -36,7 +37,8 @@ _w("redis", "Redis.pipeline", traced_pipeline) _w("redis.client", "Pipeline.execute", traced_execute_pipeline) _w("redis.client", "Pipeline.immediate_execute_command", traced_execute_command) - if redis.VERSION >= (4, 2, 0): + # Avoid mypy invalid syntax errors when parsing Python 2 files + if PY3 and redis.VERSION >= (4, 2, 0): from .asyncio_patch import traced_async_execute_command from .asyncio_patch import traced_async_execute_pipeline
{"golden_diff": "diff --git a/ddtrace/contrib/redis/patch.py b/ddtrace/contrib/redis/patch.py\n--- a/ddtrace/contrib/redis/patch.py\n+++ b/ddtrace/contrib/redis/patch.py\n@@ -1,4 +1,5 @@\n import redis\n+from six import PY3\n \n from ddtrace import config\n from ddtrace.vendor import wrapt\n@@ -36,7 +37,8 @@\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"Pipeline.immediate_execute_command\", traced_execute_command)\n- if redis.VERSION >= (4, 2, 0):\n+ # Avoid mypy invalid syntax errors when parsing Python 2 files\n+ if PY3 and redis.VERSION >= (4, 2, 0):\n from .asyncio_patch import traced_async_execute_command\n from .asyncio_patch import traced_async_execute_pipeline\n", "issue": "mypy invalid syntax error with python 2\n### Which version of dd-trace-py are you using?\r\n\r\nv1.2.0\r\n\r\n### Which version of pip are you using?\r\n\r\n22.1.2\r\n\r\n### Which version of the libraries are you using?\r\n\r\n```\r\nddtrace==1.2.0\r\nmypy[python2]==0.961\r\n```\r\n\r\n### How can we reproduce your problem?\r\n\r\n1. Clone https://gist.github.com/majorgreys/720201c4bdd33131e4c231207ef716ce\r\n2. Run `docker run --rm -it (docker build .)`\r\n\r\n### What is the result that you get?\r\n\r\n```\r\nusr/local/lib/python3.9/site-packages/ddtrace/contrib/redis/asyncio_patch.py:12: error: invalid syntax\r\nFound 1 error in 1 file (errors prevented further checking)\r\n```\r\n\r\n### What is the result that you expected?\r\n\r\n```\r\nSuccess: no issues found in 1 source file\r\n```\r\n\n", "before_files": [{"content": "import redis\n\nfrom ddtrace import config\nfrom ddtrace.vendor import wrapt\n\nfrom ...internal.utils.formats import stringify_cache_args\nfrom ...pin import Pin\nfrom ..trace_utils import unwrap\nfrom .util import _trace_redis_cmd\nfrom .util import _trace_redis_execute_pipeline\n\n\nconfig._add(\"redis\", dict(_default_service=\"redis\"))\n\n\ndef patch():\n \"\"\"Patch the instrumented methods\n\n This duplicated doesn't look nice. The nicer alternative is to use an ObjectProxy on top\n of Redis and StrictRedis. However, it means that any \"import redis.Redis\" won't be instrumented.\n \"\"\"\n if getattr(redis, \"_datadog_patch\", False):\n return\n setattr(redis, \"_datadog_patch\", True)\n\n _w = wrapt.wrap_function_wrapper\n\n if redis.VERSION < (3, 0, 0):\n _w(\"redis\", \"StrictRedis.execute_command\", traced_execute_command)\n _w(\"redis\", \"StrictRedis.pipeline\", traced_pipeline)\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"BasePipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"BasePipeline.immediate_execute_command\", traced_execute_command)\n else:\n _w(\"redis\", \"Redis.execute_command\", traced_execute_command)\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"Pipeline.immediate_execute_command\", traced_execute_command)\n if redis.VERSION >= (4, 2, 0):\n from .asyncio_patch import traced_async_execute_command\n from .asyncio_patch import traced_async_execute_pipeline\n\n _w(\"redis.asyncio.client\", \"Redis.execute_command\", traced_async_execute_command)\n _w(\"redis.asyncio.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.asyncio.client\", \"Pipeline.execute\", traced_async_execute_pipeline)\n _w(\"redis.asyncio.client\", \"Pipeline.immediate_execute_command\", traced_async_execute_command)\n Pin(service=None).onto(redis.asyncio.Redis)\n Pin(service=None).onto(redis.StrictRedis)\n\n\ndef unpatch():\n if getattr(redis, \"_datadog_patch\", False):\n setattr(redis, \"_datadog_patch\", False)\n\n if redis.VERSION < (3, 0, 0):\n unwrap(redis.StrictRedis, \"execute_command\")\n unwrap(redis.StrictRedis, \"pipeline\")\n unwrap(redis.Redis, \"pipeline\")\n unwrap(redis.client.BasePipeline, \"execute\")\n unwrap(redis.client.BasePipeline, \"immediate_execute_command\")\n else:\n unwrap(redis.Redis, \"execute_command\")\n unwrap(redis.Redis, \"pipeline\")\n unwrap(redis.client.Pipeline, \"execute\")\n unwrap(redis.client.Pipeline, \"immediate_execute_command\")\n if redis.VERSION >= (4, 2, 0):\n unwrap(redis.asyncio.client.Redis, \"execute_command\")\n unwrap(redis.asyncio.client.Redis, \"pipeline\")\n unwrap(redis.asyncio.client.Pipeline, \"execute\")\n unwrap(redis.asyncio.client.Pipeline, \"immediate_execute_command\")\n\n\n#\n# tracing functions\n#\ndef traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.redis, instance, args):\n return func(*args, **kwargs)\n\n\ndef traced_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n cmds = [stringify_cache_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.redis, resource, instance):\n return func(*args, **kwargs)\n", "path": "ddtrace/contrib/redis/patch.py"}]}
1,858
213
gh_patches_debug_14674
rasdani/github-patches
git_diff
beeware__toga-1626
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Android: `paths.app` doesn't work in event handlers **Describe the bug** On Android, `paths.app` returns `/` when called outside of the `startup` method, e.g. in an event handler. This is because the `__main__` module has finished importing, and it no longer exists in `sys.modules` under that name. This doesn't happen on Windows, because it runs the entire main loop within the top level of the `__main__` module. The other desktop platforms are probably the same. But on Android, the main loop is already running and we have to integrate into it, so importing the `__main__` module does not block. From inspection of the code, it looks like iOS is similar, so it might have the same problem. The fallback to `/` makes no sense on Android anyway, so it should be removed. Reported by @kakos-k9 on Discord. **Test app** ```python import toga from toga.style import Pack from toga.style.pack import COLUMN, ROW class Hello(toga.App): def startup(self): button = toga.Button("Print", on_press=self.on_press) self.on_press(button) self.main_window = toga.MainWindow() self.main_window.content = toga.Box( style=Pack(direction=COLUMN), children=[button] ) self.main_window.show() def on_press(self, button): import traceback for name in ["app", "data", "toga"]: print(f"{name}={getattr(self.paths, name)}") def main(): return Hello() ``` **Environment:** - Operating System: Android - Python version: 3.8 - Software versions: - Toga: 9a292f8e26 </issue> <code> [start of src/android/toga_android/paths.py] 1 import sys 2 from pathlib import Path 3 4 import toga 5 from toga import App 6 7 8 class Paths: 9 # Allow instantiating Path object via the factory 10 Path = Path 11 12 @property 13 def __context(self): 14 return App.app._impl.native.getApplicationContext() 15 16 @property 17 def app(self): 18 try: 19 return Path(sys.modules["__main__"].__file__).parent 20 except KeyError: 21 # If we're running in test conditions, 22 # there is no __main__ module. 23 return Path.cwd() 24 except AttributeError: 25 # If we're running at an interactive prompt, 26 # the __main__ module isn't file-based. 27 return Path.cwd() 28 29 @property 30 def data(self): 31 return Path(self.__context.getFilesDir().getPath()) 32 33 @property 34 def cache(self): 35 return Path(self.__context.getCacheDir().getPath()) 36 37 @property 38 def logs(self): 39 return self.data 40 41 @property 42 def toga(self): 43 """Return a path to a Toga resources 44 """ 45 return Path(toga.__file__).parent 46 47 48 paths = Paths() 49 [end of src/android/toga_android/paths.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/android/toga_android/paths.py b/src/android/toga_android/paths.py --- a/src/android/toga_android/paths.py +++ b/src/android/toga_android/paths.py @@ -13,18 +13,13 @@ def __context(self): return App.app._impl.native.getApplicationContext() + def __init__(self): + # On Android, __main__ only exists during app startup, so cache its location now. + self._app = Path(sys.modules["__main__"].__file__).parent + @property def app(self): - try: - return Path(sys.modules["__main__"].__file__).parent - except KeyError: - # If we're running in test conditions, - # there is no __main__ module. - return Path.cwd() - except AttributeError: - # If we're running at an interactive prompt, - # the __main__ module isn't file-based. - return Path.cwd() + return self._app @property def data(self):
{"golden_diff": "diff --git a/src/android/toga_android/paths.py b/src/android/toga_android/paths.py\n--- a/src/android/toga_android/paths.py\n+++ b/src/android/toga_android/paths.py\n@@ -13,18 +13,13 @@\n def __context(self):\n return App.app._impl.native.getApplicationContext()\n \n+ def __init__(self):\n+ # On Android, __main__ only exists during app startup, so cache its location now.\n+ self._app = Path(sys.modules[\"__main__\"].__file__).parent\n+\n @property\n def app(self):\n- try:\n- return Path(sys.modules[\"__main__\"].__file__).parent\n- except KeyError:\n- # If we're running in test conditions,\n- # there is no __main__ module.\n- return Path.cwd()\n- except AttributeError:\n- # If we're running at an interactive prompt,\n- # the __main__ module isn't file-based.\n- return Path.cwd()\n+ return self._app\n \n @property\n def data(self):\n", "issue": "Android: `paths.app` doesn't work in event handlers\n**Describe the bug**\r\n\r\nOn Android, `paths.app` returns `/` when called outside of the `startup` method, e.g. in an event handler. This is because the `__main__` module has finished importing, and it no longer exists in `sys.modules` under that name.\r\n\r\nThis doesn't happen on Windows, because it runs the entire main loop within the top level of the `__main__` module. The other desktop platforms are probably the same.\r\n\r\nBut on Android, the main loop is already running and we have to integrate into it, so importing the `__main__` module does not block. From inspection of the code, it looks like iOS is similar, so it might have the same problem.\r\n\r\nThe fallback to `/` makes no sense on Android anyway, so it should be removed.\r\n\r\nReported by @kakos-k9 on Discord.\r\n\r\n**Test app**\r\n```python\r\nimport toga\r\nfrom toga.style import Pack\r\nfrom toga.style.pack import COLUMN, ROW\r\n\r\nclass Hello(toga.App):\r\n\r\n def startup(self):\r\n button = toga.Button(\"Print\", on_press=self.on_press)\r\n self.on_press(button)\r\n\r\n self.main_window = toga.MainWindow()\r\n self.main_window.content = toga.Box(\r\n style=Pack(direction=COLUMN),\r\n children=[button]\r\n )\r\n self.main_window.show()\r\n\r\n def on_press(self, button):\r\n import traceback\r\n for name in [\"app\", \"data\", \"toga\"]:\r\n print(f\"{name}={getattr(self.paths, name)}\")\r\n\r\n\r\ndef main():\r\n return Hello()\r\n```\r\n\r\n**Environment:**\r\n - Operating System: Android\r\n - Python version: 3.8\r\n - Software versions:\r\n - Toga: 9a292f8e26\r\n\n", "before_files": [{"content": "import sys\nfrom pathlib import Path\n\nimport toga\nfrom toga import App\n\n\nclass Paths:\n # Allow instantiating Path object via the factory\n Path = Path\n\n @property\n def __context(self):\n return App.app._impl.native.getApplicationContext()\n\n @property\n def app(self):\n try:\n return Path(sys.modules[\"__main__\"].__file__).parent\n except KeyError:\n # If we're running in test conditions,\n # there is no __main__ module.\n return Path.cwd()\n except AttributeError:\n # If we're running at an interactive prompt,\n # the __main__ module isn't file-based.\n return Path.cwd()\n\n @property\n def data(self):\n return Path(self.__context.getFilesDir().getPath())\n\n @property\n def cache(self):\n return Path(self.__context.getCacheDir().getPath())\n\n @property\n def logs(self):\n return self.data\n\n @property\n def toga(self):\n \"\"\"Return a path to a Toga resources\n \"\"\"\n return Path(toga.__file__).parent\n\n\npaths = Paths()\n", "path": "src/android/toga_android/paths.py"}]}
1,265
238
gh_patches_debug_43832
rasdani/github-patches
git_diff
weecology__retriever-401
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Reserved keyword 'order' not being properly replaced `clean_column_name()` in `table.py` should replace the column name `order` with `sporder`. It does not appear to be doing this in #286. </issue> <code> [start of lib/table.py] 1 from retriever.lib.cleanup import * 2 import csv 3 import StringIO 4 5 6 class Table: 7 """Information about a database table.""" 8 def __init__(self, name, **kwargs): 9 self.name = name 10 self.pk = True 11 self.contains_pk = False 12 self.delimiter = None 13 self.header_rows = 1 14 self.column_names_row = 1 15 self.fixed_width = False 16 self.cleanup = Cleanup() 17 self.record_id = 0 18 self.columns = [] 19 self.replace_columns = [] 20 self.escape_single_quotes=True 21 self.escape_double_quotes=True 22 for key, item in kwargs.items(): 23 setattr(self, key, item[0] if isinstance(item, tuple) else item) 24 25 def auto_get_columns(self, header): 26 """Gets the column names from the header row 27 28 Identifies the column names from the header row. 29 Replaces database keywords with alternatives. 30 Replaces special characters and spaces. 31 32 """ 33 if self.fixed_width: 34 column_names = self.extract_values(header) 35 else: 36 # Get column names from header row 37 values = self.split_on_delimiter(header) 38 column_names = [name.strip() for name in values] 39 40 columns = map(lambda x: self.clean_column_name(x), column_names) 41 column_values = {x:[] for x in columns if x} 42 43 return [[x, None] for x in columns if x], column_values 44 45 def clean_column_name(self, column_name): 46 '''Makes sure a column name is formatted correctly by removing reserved 47 words, symbols, numbers, etc.''' 48 column_name = column_name.lower() 49 replace_columns = {old.lower(): new.lower() 50 for old, new in self.replace_columns} 51 column_name = replace_columns.get(column_name, column_name) 52 replace = [ 53 ("%", "percent"), 54 ("&", "and"), 55 ("\xb0", "degrees"), 56 ("group", "grp"), 57 ("order", "sporder"), 58 ("check", "checked"), 59 ("references", "refs"), 60 ("long", "lon"), 61 ("date", "record_date"), 62 ("?", ""), 63 ] 64 replace += [(x, '') for x in (")", "\n", "\r", '"', "'")] 65 replace += [(x, '_') for x in (" ", "(", "/", ".", "-")] 66 column_name = reduce(lambda x, y: x.replace(*y), replace, column_name) 67 68 while "__" in column_name: 69 column_name = column_name.replace("__", "_") 70 column_name = column_name.lstrip("0123456789_").rstrip("_") 71 72 return column_name 73 74 def split_on_delimiter(self, line): 75 dialect = csv.excel 76 dialect.escapechar = "\\" 77 r = csv.reader([line], dialect=dialect, delimiter=self.delimiter) 78 return r.next() 79 80 def combine_on_delimiter(self, line_as_list): 81 """Combine a list of values into a line of csv data""" 82 dialect = csv.excel 83 dialect.escapechar = "\\" 84 writer_file = StringIO.StringIO() 85 writer = csv.writer(writer_file, dialect=dialect, delimiter=self.delimiter) 86 writer.writerow(line_as_list) 87 return writer_file.getvalue() 88 89 def values_from_line(self, line): 90 linevalues = [] 91 if (self.pk and self.contains_pk == False): 92 column = 0 93 else: 94 column = -1 95 96 for value in self.extract_values(line): 97 column += 1 98 try: 99 this_column = self.columns[column][1][0] 100 101 # If data type is "skip" ignore the value 102 if this_column == "skip": 103 pass 104 elif this_column == "combine": 105 # If "combine" append value to end of previous column 106 linevalues[-1] += " " + value 107 else: 108 # Otherwise, add new value 109 linevalues.append(value) 110 except: 111 # too many values for columns; ignore 112 pass 113 114 return linevalues 115 116 def extract_values(self, line): 117 """Given a line of data, this function returns a list of the individual 118 data values.""" 119 if self.fixed_width: 120 pos = 0 121 values = [] 122 for width in self.fixed_width: 123 values.append(line[pos:pos+width].strip()) 124 pos += width 125 return values 126 else: 127 return self.split_on_delimiter(line) 128 129 def get_insert_columns(self, join=True): 130 """Gets a set of column names for insert statements.""" 131 columns = "" 132 for item in self.columns: 133 thistype = item[1][0] 134 if ((thistype != "skip") and (thistype !="combine") and 135 (self.contains_pk == True or thistype[0:3] != "pk-")): 136 columns += item[0] + ", " 137 columns = columns.rstrip(', ') 138 if join: 139 return columns 140 else: 141 return columns.lstrip("(").rstrip(")").split(", ") 142 143 def get_column_datatypes(self): 144 """Gets a set of column names for insert statements.""" 145 columns = [] 146 for item in self.get_insert_columns(False): 147 for column in self.columns: 148 if item == column[0]: 149 columns.append(column[1][0]) 150 return columns 151 152 [end of lib/table.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/table.py b/lib/table.py --- a/lib/table.py +++ b/lib/table.py @@ -1,7 +1,8 @@ -from retriever.lib.cleanup import * import csv import StringIO +from retriever.lib.cleanup import * + class Table: """Information about a database table.""" @@ -19,6 +20,7 @@ self.replace_columns = [] self.escape_single_quotes=True self.escape_double_quotes=True + self.cleaned_columns = False for key, item in kwargs.items(): setattr(self, key, item[0] if isinstance(item, tuple) else item) @@ -39,36 +41,53 @@ columns = map(lambda x: self.clean_column_name(x), column_names) column_values = {x:[] for x in columns if x} - + self.cleaned_columns = True return [[x, None] for x in columns if x], column_values def clean_column_name(self, column_name): - '''Makes sure a column name is formatted correctly by removing reserved - words, symbols, numbers, etc.''' - column_name = column_name.lower() + """Clean column names using the expected sql guidelines + + remove leading whitespaces, replace sql key words, etc.. + """ + column_name = column_name.lower().strip() replace_columns = {old.lower(): new.lower() for old, new in self.replace_columns} - column_name = replace_columns.get(column_name, column_name) + + column_name = replace_columns.get(column_name, column_name).strip() replace = [ - ("%", "percent"), - ("&", "and"), - ("\xb0", "degrees"), - ("group", "grp"), - ("order", "sporder"), - ("check", "checked"), - ("references", "refs"), - ("long", "lon"), - ("date", "record_date"), - ("?", ""), - ] + ("%", "percent"), + ("&", "and"), + ("\xb0", "degrees"), + ("?", ""), + ] replace += [(x, '') for x in (")", "\n", "\r", '"', "'")] - replace += [(x, '_') for x in (" ", "(", "/", ".", "-")] + replace += [(x, '_') for x in (" ", "(", "/", ".", "-", "*", ":")] column_name = reduce(lambda x, y: x.replace(*y), replace, column_name) - while "__" in column_name: column_name = column_name.replace("__", "_") column_name = column_name.lstrip("0123456789_").rstrip("_") - + replace_dict = { + "group": "grp", + "order": "ordered", + "check": "checked", + "references": "refs", + "long": "lon", + "column": "columns", + "cursor": "cursors", + "delete": "deleted", + "insert": "inserted", + "join": "joins", + "select": "selects", + "table": "tables", + "update": "updates", + "date": "record_date" + } + for x in (")", "\n", "\r", '"', "'"): + replace_dict[x] = '' + for x in (" ", "(", "/", ".", "-"): + replace_dict[x] = '_' + if column_name in replace_dict: + column_name = replace_dict[column_name] return column_name def split_on_delimiter(self, line): @@ -129,6 +148,11 @@ def get_insert_columns(self, join=True): """Gets a set of column names for insert statements.""" columns = "" + if not self.cleaned_columns: + column_names = list(self.columns) + self.columns[:] = [] + self.columns = [(self.clean_column_name(name[0]), name[1]) for name in column_names] + self.cleaned_columns = True for item in self.columns: thistype = item[1][0] if ((thistype != "skip") and (thistype !="combine") and @@ -148,4 +172,3 @@ if item == column[0]: columns.append(column[1][0]) return columns -
{"golden_diff": "diff --git a/lib/table.py b/lib/table.py\n--- a/lib/table.py\n+++ b/lib/table.py\n@@ -1,7 +1,8 @@\n-from retriever.lib.cleanup import *\n import csv\n import StringIO\n \n+from retriever.lib.cleanup import *\n+\n \n class Table:\n \"\"\"Information about a database table.\"\"\"\n@@ -19,6 +20,7 @@\n self.replace_columns = []\n self.escape_single_quotes=True\n self.escape_double_quotes=True\n+ self.cleaned_columns = False\n for key, item in kwargs.items():\n setattr(self, key, item[0] if isinstance(item, tuple) else item)\n \n@@ -39,36 +41,53 @@\n \n columns = map(lambda x: self.clean_column_name(x), column_names)\n column_values = {x:[] for x in columns if x}\n-\n+ self.cleaned_columns = True\n return [[x, None] for x in columns if x], column_values\n \n def clean_column_name(self, column_name):\n- '''Makes sure a column name is formatted correctly by removing reserved\n- words, symbols, numbers, etc.'''\n- column_name = column_name.lower()\n+ \"\"\"Clean column names using the expected sql guidelines\n+\n+ remove leading whitespaces, replace sql key words, etc..\n+ \"\"\"\n+ column_name = column_name.lower().strip()\n replace_columns = {old.lower(): new.lower()\n for old, new in self.replace_columns}\n- column_name = replace_columns.get(column_name, column_name)\n+\n+ column_name = replace_columns.get(column_name, column_name).strip()\n replace = [\n- (\"%\", \"percent\"),\n- (\"&\", \"and\"),\n- (\"\\xb0\", \"degrees\"),\n- (\"group\", \"grp\"),\n- (\"order\", \"sporder\"),\n- (\"check\", \"checked\"),\n- (\"references\", \"refs\"),\n- (\"long\", \"lon\"),\n- (\"date\", \"record_date\"),\n- (\"?\", \"\"),\n- ]\n+ (\"%\", \"percent\"),\n+ (\"&\", \"and\"),\n+ (\"\\xb0\", \"degrees\"),\n+ (\"?\", \"\"),\n+ ]\n replace += [(x, '') for x in (\")\", \"\\n\", \"\\r\", '\"', \"'\")]\n- replace += [(x, '_') for x in (\" \", \"(\", \"/\", \".\", \"-\")]\n+ replace += [(x, '_') for x in (\" \", \"(\", \"/\", \".\", \"-\", \"*\", \":\")]\n column_name = reduce(lambda x, y: x.replace(*y), replace, column_name)\n-\n while \"__\" in column_name:\n column_name = column_name.replace(\"__\", \"_\")\n column_name = column_name.lstrip(\"0123456789_\").rstrip(\"_\")\n-\n+ replace_dict = {\n+ \"group\": \"grp\",\n+ \"order\": \"ordered\",\n+ \"check\": \"checked\",\n+ \"references\": \"refs\",\n+ \"long\": \"lon\",\n+ \"column\": \"columns\",\n+ \"cursor\": \"cursors\",\n+ \"delete\": \"deleted\",\n+ \"insert\": \"inserted\",\n+ \"join\": \"joins\",\n+ \"select\": \"selects\",\n+ \"table\": \"tables\",\n+ \"update\": \"updates\",\n+ \"date\": \"record_date\"\n+ }\n+ for x in (\")\", \"\\n\", \"\\r\", '\"', \"'\"):\n+ replace_dict[x] = ''\n+ for x in (\" \", \"(\", \"/\", \".\", \"-\"):\n+ replace_dict[x] = '_'\n+ if column_name in replace_dict:\n+ column_name = replace_dict[column_name]\n return column_name\n \n def split_on_delimiter(self, line):\n@@ -129,6 +148,11 @@\n def get_insert_columns(self, join=True):\n \"\"\"Gets a set of column names for insert statements.\"\"\"\n columns = \"\"\n+ if not self.cleaned_columns:\n+ column_names = list(self.columns)\n+ self.columns[:] = []\n+ self.columns = [(self.clean_column_name(name[0]), name[1]) for name in column_names]\n+ self.cleaned_columns = True\n for item in self.columns:\n thistype = item[1][0]\n if ((thistype != \"skip\") and (thistype !=\"combine\") and\n@@ -148,4 +172,3 @@\n if item == column[0]:\n columns.append(column[1][0])\n return columns\n-\n", "issue": "Reserved keyword 'order' not being properly replaced\n`clean_column_name()` in `table.py` should replace the column name `order` with `sporder`. It does not appear to be doing this in #286.\n\n", "before_files": [{"content": "from retriever.lib.cleanup import *\nimport csv\nimport StringIO\n\n\nclass Table:\n \"\"\"Information about a database table.\"\"\"\n def __init__(self, name, **kwargs):\n self.name = name\n self.pk = True\n self.contains_pk = False\n self.delimiter = None\n self.header_rows = 1\n self.column_names_row = 1\n self.fixed_width = False\n self.cleanup = Cleanup()\n self.record_id = 0\n self.columns = []\n self.replace_columns = []\n self.escape_single_quotes=True\n self.escape_double_quotes=True\n for key, item in kwargs.items():\n setattr(self, key, item[0] if isinstance(item, tuple) else item)\n\n def auto_get_columns(self, header):\n \"\"\"Gets the column names from the header row\n\n Identifies the column names from the header row.\n Replaces database keywords with alternatives.\n Replaces special characters and spaces.\n\n \"\"\"\n if self.fixed_width:\n column_names = self.extract_values(header)\n else:\n # Get column names from header row\n values = self.split_on_delimiter(header)\n column_names = [name.strip() for name in values]\n\n columns = map(lambda x: self.clean_column_name(x), column_names)\n column_values = {x:[] for x in columns if x}\n\n return [[x, None] for x in columns if x], column_values\n\n def clean_column_name(self, column_name):\n '''Makes sure a column name is formatted correctly by removing reserved\n words, symbols, numbers, etc.'''\n column_name = column_name.lower()\n replace_columns = {old.lower(): new.lower()\n for old, new in self.replace_columns}\n column_name = replace_columns.get(column_name, column_name)\n replace = [\n (\"%\", \"percent\"),\n (\"&\", \"and\"),\n (\"\\xb0\", \"degrees\"),\n (\"group\", \"grp\"),\n (\"order\", \"sporder\"),\n (\"check\", \"checked\"),\n (\"references\", \"refs\"),\n (\"long\", \"lon\"),\n (\"date\", \"record_date\"),\n (\"?\", \"\"),\n ]\n replace += [(x, '') for x in (\")\", \"\\n\", \"\\r\", '\"', \"'\")]\n replace += [(x, '_') for x in (\" \", \"(\", \"/\", \".\", \"-\")]\n column_name = reduce(lambda x, y: x.replace(*y), replace, column_name)\n\n while \"__\" in column_name:\n column_name = column_name.replace(\"__\", \"_\")\n column_name = column_name.lstrip(\"0123456789_\").rstrip(\"_\")\n\n return column_name\n\n def split_on_delimiter(self, line):\n dialect = csv.excel\n dialect.escapechar = \"\\\\\"\n r = csv.reader([line], dialect=dialect, delimiter=self.delimiter)\n return r.next()\n\n def combine_on_delimiter(self, line_as_list):\n \"\"\"Combine a list of values into a line of csv data\"\"\"\n dialect = csv.excel\n dialect.escapechar = \"\\\\\"\n writer_file = StringIO.StringIO()\n writer = csv.writer(writer_file, dialect=dialect, delimiter=self.delimiter)\n writer.writerow(line_as_list)\n return writer_file.getvalue()\n\n def values_from_line(self, line):\n linevalues = []\n if (self.pk and self.contains_pk == False):\n column = 0\n else:\n column = -1\n\n for value in self.extract_values(line):\n column += 1\n try:\n this_column = self.columns[column][1][0]\n\n # If data type is \"skip\" ignore the value\n if this_column == \"skip\":\n pass\n elif this_column == \"combine\":\n # If \"combine\" append value to end of previous column\n linevalues[-1] += \" \" + value\n else:\n # Otherwise, add new value\n linevalues.append(value)\n except:\n # too many values for columns; ignore\n pass\n\n return linevalues\n\n def extract_values(self, line):\n \"\"\"Given a line of data, this function returns a list of the individual\n data values.\"\"\"\n if self.fixed_width:\n pos = 0\n values = []\n for width in self.fixed_width:\n values.append(line[pos:pos+width].strip())\n pos += width\n return values\n else:\n return self.split_on_delimiter(line)\n\n def get_insert_columns(self, join=True):\n \"\"\"Gets a set of column names for insert statements.\"\"\"\n columns = \"\"\n for item in self.columns:\n thistype = item[1][0]\n if ((thistype != \"skip\") and (thistype !=\"combine\") and\n (self.contains_pk == True or thistype[0:3] != \"pk-\")):\n columns += item[0] + \", \"\n columns = columns.rstrip(', ')\n if join:\n return columns\n else:\n return columns.lstrip(\"(\").rstrip(\")\").split(\", \")\n\n def get_column_datatypes(self):\n \"\"\"Gets a set of column names for insert statements.\"\"\"\n columns = []\n for item in self.get_insert_columns(False):\n for column in self.columns:\n if item == column[0]:\n columns.append(column[1][0])\n return columns\n\n", "path": "lib/table.py"}]}
2,074
974
gh_patches_debug_6313
rasdani/github-patches
git_diff
pypa__pip-5789
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError on tab completion of `pip install -U ...` **Environment** * pip version: 18.0 * Python version: 3.7.0 * OS: macOS High Sierra Using iTerm2 3.1.7 or Terminal.app. <!-- Feel free to add more information about your environment here --> **Description** <!-- A clear and concise description of what the bug is. --> Attempt `pip install -U projectname` with tab completion. **Expected behavior** <!-- A clear and concise description of what you expected to happen. --> Tab completion works, or at least no error. **How to Reproduce** <!-- Describe the steps to reproduce this bug. --> 1. Type `pip install -U` or `pip install --upgrade` then a few letters of a project name (eg. just `p` below), then press the tab key. 3. An error occurs: **Output** ```console $ pip install --upgrade pTraceback (most recent call last): File "/usr/local/bin/pip3", line 11, in <module> sys.exit(main()) File "/usr/local/lib/python3.7/site-packages/pip/_internal/__init__.py", line 293, in main autocomplete() File "/usr/local/lib/python3.7/site-packages/pip/_internal/__init__.py", line 121, in autocomplete cwords, cword, subcommand.parser.option_list_all, File "/usr/local/lib/python3.7/site-packages/pip/_internal/__init__.py", line 170, in get_path_completion_type for x in opt.metavar.split('/')): AttributeError: 'NoneType' object has no attribute 'split' ``` **Notes** * When omitting `-U` or `--upgrade`, pressing tab doesn't complete anything but more importantly there's no error. * No error with pip 18.0 on Python 2.7.15. Pressing tab suggests files/directories in current dir (with and without `-U`). </issue> <code> [start of src/pip/_internal/cli/autocompletion.py] 1 """Logic that powers autocompletion installed by ``pip completion``. 2 """ 3 4 import optparse 5 import os 6 import sys 7 8 from pip._internal.cli.main_parser import create_main_parser 9 from pip._internal.commands import commands_dict, get_summaries 10 from pip._internal.utils.misc import get_installed_distributions 11 12 13 def autocomplete(): 14 """Entry Point for completion of main and subcommand options. 15 """ 16 # Don't complete if user hasn't sourced bash_completion file. 17 if 'PIP_AUTO_COMPLETE' not in os.environ: 18 return 19 cwords = os.environ['COMP_WORDS'].split()[1:] 20 cword = int(os.environ['COMP_CWORD']) 21 try: 22 current = cwords[cword - 1] 23 except IndexError: 24 current = '' 25 26 subcommands = [cmd for cmd, summary in get_summaries()] 27 options = [] 28 # subcommand 29 try: 30 subcommand_name = [w for w in cwords if w in subcommands][0] 31 except IndexError: 32 subcommand_name = None 33 34 parser = create_main_parser() 35 # subcommand options 36 if subcommand_name: 37 # special case: 'help' subcommand has no options 38 if subcommand_name == 'help': 39 sys.exit(1) 40 # special case: list locally installed dists for show and uninstall 41 should_list_installed = ( 42 subcommand_name in ['show', 'uninstall'] and 43 not current.startswith('-') 44 ) 45 if should_list_installed: 46 installed = [] 47 lc = current.lower() 48 for dist in get_installed_distributions(local_only=True): 49 if dist.key.startswith(lc) and dist.key not in cwords[1:]: 50 installed.append(dist.key) 51 # if there are no dists installed, fall back to option completion 52 if installed: 53 for dist in installed: 54 print(dist) 55 sys.exit(1) 56 57 subcommand = commands_dict[subcommand_name]() 58 59 for opt in subcommand.parser.option_list_all: 60 if opt.help != optparse.SUPPRESS_HELP: 61 for opt_str in opt._long_opts + opt._short_opts: 62 options.append((opt_str, opt.nargs)) 63 64 # filter out previously specified options from available options 65 prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]] 66 options = [(x, v) for (x, v) in options if x not in prev_opts] 67 # filter options by current input 68 options = [(k, v) for k, v in options if k.startswith(current)] 69 # get completion type given cwords and available subcommand options 70 completion_type = get_path_completion_type( 71 cwords, cword, subcommand.parser.option_list_all, 72 ) 73 # get completion files and directories if ``completion_type`` is 74 # ``<file>``, ``<dir>`` or ``<path>`` 75 if completion_type: 76 options = auto_complete_paths(current, completion_type) 77 options = ((opt, 0) for opt in options) 78 for option in options: 79 opt_label = option[0] 80 # append '=' to options which require args 81 if option[1] and option[0][:2] == "--": 82 opt_label += '=' 83 print(opt_label) 84 else: 85 # show main parser options only when necessary 86 87 opts = [i.option_list for i in parser.option_groups] 88 opts.append(parser.option_list) 89 opts = (o for it in opts for o in it) 90 if current.startswith('-'): 91 for opt in opts: 92 if opt.help != optparse.SUPPRESS_HELP: 93 subcommands += opt._long_opts + opt._short_opts 94 else: 95 # get completion type given cwords and all available options 96 completion_type = get_path_completion_type(cwords, cword, opts) 97 if completion_type: 98 subcommands = auto_complete_paths(current, completion_type) 99 100 print(' '.join([x for x in subcommands if x.startswith(current)])) 101 sys.exit(1) 102 103 104 def get_path_completion_type(cwords, cword, opts): 105 """Get the type of path completion (``file``, ``dir``, ``path`` or None) 106 107 :param cwords: same as the environmental variable ``COMP_WORDS`` 108 :param cword: same as the environmental variable ``COMP_CWORD`` 109 :param opts: The available options to check 110 :return: path completion type (``file``, ``dir``, ``path`` or None) 111 """ 112 if cword < 2 or not cwords[cword - 2].startswith('-'): 113 return 114 for opt in opts: 115 if opt.help == optparse.SUPPRESS_HELP: 116 continue 117 for o in str(opt).split('/'): 118 if cwords[cword - 2].split('=')[0] == o: 119 if any(x in ('path', 'file', 'dir') 120 for x in opt.metavar.split('/')): 121 return opt.metavar 122 123 124 def auto_complete_paths(current, completion_type): 125 """If ``completion_type`` is ``file`` or ``path``, list all regular files 126 and directories starting with ``current``; otherwise only list directories 127 starting with ``current``. 128 129 :param current: The word to be completed 130 :param completion_type: path completion type(`file`, `path` or `dir`)i 131 :return: A generator of regular files and/or directories 132 """ 133 directory, filename = os.path.split(current) 134 current_path = os.path.abspath(directory) 135 # Don't complete paths if they can't be accessed 136 if not os.access(current_path, os.R_OK): 137 return 138 filename = os.path.normcase(filename) 139 # list all files that start with ``filename`` 140 file_list = (x for x in os.listdir(current_path) 141 if os.path.normcase(x).startswith(filename)) 142 for f in file_list: 143 opt = os.path.join(current_path, f) 144 comp_file = os.path.normcase(os.path.join(directory, f)) 145 # complete regular files when there is not ``<dir>`` after option 146 # complete directories when there is ``<file>``, ``<path>`` or 147 # ``<dir>``after option 148 if completion_type != 'dir' and os.path.isfile(opt): 149 yield comp_file 150 elif os.path.isdir(opt): 151 yield os.path.join(comp_file, '') 152 [end of src/pip/_internal/cli/autocompletion.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pip/_internal/cli/autocompletion.py b/src/pip/_internal/cli/autocompletion.py --- a/src/pip/_internal/cli/autocompletion.py +++ b/src/pip/_internal/cli/autocompletion.py @@ -116,7 +116,8 @@ continue for o in str(opt).split('/'): if cwords[cword - 2].split('=')[0] == o: - if any(x in ('path', 'file', 'dir') + if not opt.metavar or any( + x in ('path', 'file', 'dir') for x in opt.metavar.split('/')): return opt.metavar
{"golden_diff": "diff --git a/src/pip/_internal/cli/autocompletion.py b/src/pip/_internal/cli/autocompletion.py\n--- a/src/pip/_internal/cli/autocompletion.py\n+++ b/src/pip/_internal/cli/autocompletion.py\n@@ -116,7 +116,8 @@\n continue\n for o in str(opt).split('/'):\n if cwords[cword - 2].split('=')[0] == o:\n- if any(x in ('path', 'file', 'dir')\n+ if not opt.metavar or any(\n+ x in ('path', 'file', 'dir')\n for x in opt.metavar.split('/')):\n return opt.metavar\n", "issue": "AttributeError on tab completion of `pip install -U ...`\n**Environment**\r\n\r\n* pip version: 18.0\r\n* Python version: 3.7.0\r\n* OS: macOS High Sierra\r\n\r\nUsing iTerm2 3.1.7 or Terminal.app.\r\n\r\n<!-- Feel free to add more information about your environment here -->\r\n\r\n**Description**\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nAttempt `pip install -U projectname` with tab completion.\r\n\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nTab completion works, or at least no error.\r\n\r\n**How to Reproduce**\r\n<!-- Describe the steps to reproduce this bug. -->\r\n\r\n1. Type `pip install -U` or `pip install --upgrade` then a few letters of a project name (eg. just `p` below), then press the tab key.\r\n3. An error occurs:\r\n\r\n**Output**\r\n\r\n```console\r\n$ pip install --upgrade pTraceback (most recent call last):\r\n File \"/usr/local/bin/pip3\", line 11, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python3.7/site-packages/pip/_internal/__init__.py\", line 293, in main\r\n autocomplete()\r\n File \"/usr/local/lib/python3.7/site-packages/pip/_internal/__init__.py\", line 121, in autocomplete\r\n cwords, cword, subcommand.parser.option_list_all,\r\n File \"/usr/local/lib/python3.7/site-packages/pip/_internal/__init__.py\", line 170, in get_path_completion_type\r\n for x in opt.metavar.split('/')):\r\nAttributeError: 'NoneType' object has no attribute 'split'\r\n```\r\n\r\n**Notes**\r\n\r\n* When omitting `-U` or `--upgrade`, pressing tab doesn't complete anything but more importantly there's no error.\r\n\r\n* No error with pip 18.0 on Python 2.7.15. Pressing tab suggests files/directories in current dir (with and without `-U`).\n", "before_files": [{"content": "\"\"\"Logic that powers autocompletion installed by ``pip completion``.\n\"\"\"\n\nimport optparse\nimport os\nimport sys\n\nfrom pip._internal.cli.main_parser import create_main_parser\nfrom pip._internal.commands import commands_dict, get_summaries\nfrom pip._internal.utils.misc import get_installed_distributions\n\n\ndef autocomplete():\n \"\"\"Entry Point for completion of main and subcommand options.\n \"\"\"\n # Don't complete if user hasn't sourced bash_completion file.\n if 'PIP_AUTO_COMPLETE' not in os.environ:\n return\n cwords = os.environ['COMP_WORDS'].split()[1:]\n cword = int(os.environ['COMP_CWORD'])\n try:\n current = cwords[cword - 1]\n except IndexError:\n current = ''\n\n subcommands = [cmd for cmd, summary in get_summaries()]\n options = []\n # subcommand\n try:\n subcommand_name = [w for w in cwords if w in subcommands][0]\n except IndexError:\n subcommand_name = None\n\n parser = create_main_parser()\n # subcommand options\n if subcommand_name:\n # special case: 'help' subcommand has no options\n if subcommand_name == 'help':\n sys.exit(1)\n # special case: list locally installed dists for show and uninstall\n should_list_installed = (\n subcommand_name in ['show', 'uninstall'] and\n not current.startswith('-')\n )\n if should_list_installed:\n installed = []\n lc = current.lower()\n for dist in get_installed_distributions(local_only=True):\n if dist.key.startswith(lc) and dist.key not in cwords[1:]:\n installed.append(dist.key)\n # if there are no dists installed, fall back to option completion\n if installed:\n for dist in installed:\n print(dist)\n sys.exit(1)\n\n subcommand = commands_dict[subcommand_name]()\n\n for opt in subcommand.parser.option_list_all:\n if opt.help != optparse.SUPPRESS_HELP:\n for opt_str in opt._long_opts + opt._short_opts:\n options.append((opt_str, opt.nargs))\n\n # filter out previously specified options from available options\n prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]\n options = [(x, v) for (x, v) in options if x not in prev_opts]\n # filter options by current input\n options = [(k, v) for k, v in options if k.startswith(current)]\n # get completion type given cwords and available subcommand options\n completion_type = get_path_completion_type(\n cwords, cword, subcommand.parser.option_list_all,\n )\n # get completion files and directories if ``completion_type`` is\n # ``<file>``, ``<dir>`` or ``<path>``\n if completion_type:\n options = auto_complete_paths(current, completion_type)\n options = ((opt, 0) for opt in options)\n for option in options:\n opt_label = option[0]\n # append '=' to options which require args\n if option[1] and option[0][:2] == \"--\":\n opt_label += '='\n print(opt_label)\n else:\n # show main parser options only when necessary\n\n opts = [i.option_list for i in parser.option_groups]\n opts.append(parser.option_list)\n opts = (o for it in opts for o in it)\n if current.startswith('-'):\n for opt in opts:\n if opt.help != optparse.SUPPRESS_HELP:\n subcommands += opt._long_opts + opt._short_opts\n else:\n # get completion type given cwords and all available options\n completion_type = get_path_completion_type(cwords, cword, opts)\n if completion_type:\n subcommands = auto_complete_paths(current, completion_type)\n\n print(' '.join([x for x in subcommands if x.startswith(current)]))\n sys.exit(1)\n\n\ndef get_path_completion_type(cwords, cword, opts):\n \"\"\"Get the type of path completion (``file``, ``dir``, ``path`` or None)\n\n :param cwords: same as the environmental variable ``COMP_WORDS``\n :param cword: same as the environmental variable ``COMP_CWORD``\n :param opts: The available options to check\n :return: path completion type (``file``, ``dir``, ``path`` or None)\n \"\"\"\n if cword < 2 or not cwords[cword - 2].startswith('-'):\n return\n for opt in opts:\n if opt.help == optparse.SUPPRESS_HELP:\n continue\n for o in str(opt).split('/'):\n if cwords[cword - 2].split('=')[0] == o:\n if any(x in ('path', 'file', 'dir')\n for x in opt.metavar.split('/')):\n return opt.metavar\n\n\ndef auto_complete_paths(current, completion_type):\n \"\"\"If ``completion_type`` is ``file`` or ``path``, list all regular files\n and directories starting with ``current``; otherwise only list directories\n starting with ``current``.\n\n :param current: The word to be completed\n :param completion_type: path completion type(`file`, `path` or `dir`)i\n :return: A generator of regular files and/or directories\n \"\"\"\n directory, filename = os.path.split(current)\n current_path = os.path.abspath(directory)\n # Don't complete paths if they can't be accessed\n if not os.access(current_path, os.R_OK):\n return\n filename = os.path.normcase(filename)\n # list all files that start with ``filename``\n file_list = (x for x in os.listdir(current_path)\n if os.path.normcase(x).startswith(filename))\n for f in file_list:\n opt = os.path.join(current_path, f)\n comp_file = os.path.normcase(os.path.join(directory, f))\n # complete regular files when there is not ``<dir>`` after option\n # complete directories when there is ``<file>``, ``<path>`` or\n # ``<dir>``after option\n if completion_type != 'dir' and os.path.isfile(opt):\n yield comp_file\n elif os.path.isdir(opt):\n yield os.path.join(comp_file, '')\n", "path": "src/pip/_internal/cli/autocompletion.py"}]}
2,705
155
gh_patches_debug_2178
rasdani/github-patches
git_diff
rucio__rucio-1028
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Move conveyor transfers to third_party_copy_operation Motivation ---------- At the moment read is used for source and write is used for destination, for both third_party_copy should be used. </issue> <code> [start of lib/rucio/vcsversion.py] 1 2 ''' 3 This file is automatically generated; Do not edit it. :) 4 ''' 5 VERSION_INFO = { 6 'final': True, 7 'version': '1.15.3', 8 'branch_nick': 'patch-0-Rucio_1_15_3_preparation', 9 'revision_id': 'cd14416223d0b81a940312cb180a07778f85f1f8', 10 'revno': 6357 11 } 12 [end of lib/rucio/vcsversion.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py --- a/lib/rucio/vcsversion.py +++ b/lib/rucio/vcsversion.py @@ -4,8 +4,8 @@ ''' VERSION_INFO = { 'final': True, - 'version': '1.15.3', - 'branch_nick': 'patch-0-Rucio_1_15_3_preparation', - 'revision_id': 'cd14416223d0b81a940312cb180a07778f85f1f8', - 'revno': 6357 + 'version': '1.15.4', + 'branch_nick': 'patch-0-Rucio_1_15_4_preparation', + 'revision_id': 'c2972be03297dc73a65d35bc9ffab1516efffebb', + 'revno': 6402 }
{"golden_diff": "diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py\n--- a/lib/rucio/vcsversion.py\n+++ b/lib/rucio/vcsversion.py\n@@ -4,8 +4,8 @@\n '''\n VERSION_INFO = {\n 'final': True,\n- 'version': '1.15.3',\n- 'branch_nick': 'patch-0-Rucio_1_15_3_preparation',\n- 'revision_id': 'cd14416223d0b81a940312cb180a07778f85f1f8',\n- 'revno': 6357\n+ 'version': '1.15.4',\n+ 'branch_nick': 'patch-0-Rucio_1_15_4_preparation',\n+ 'revision_id': 'c2972be03297dc73a65d35bc9ffab1516efffebb',\n+ 'revno': 6402\n }\n", "issue": "Move conveyor transfers to third_party_copy_operation\nMotivation\r\n----------\r\nAt the moment read is used for source and write is used for destination, for both third_party_copy should be used.\n", "before_files": [{"content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.15.3',\n 'branch_nick': 'patch-0-Rucio_1_15_3_preparation',\n 'revision_id': 'cd14416223d0b81a940312cb180a07778f85f1f8',\n 'revno': 6357\n}\n", "path": "lib/rucio/vcsversion.py"}]}
709
247
gh_patches_debug_16727
rasdani/github-patches
git_diff
learningequality__kolibri-6191
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> recipients column in coach tools in not accurate ### Observed behavior recipients column always indicates assignment to the entire class, even when that's not true: ![groups](https://user-images.githubusercontent.com/2367265/69926711-644bce80-146a-11ea-8f2b-82ec444fb367.gif) ### Expected behavior recipients column should be accurate ### User-facing consequences confusion ### Errors and logs none observed ### Steps to reproduce create a lesson and assign it to a group ### Context 0.13.0 beta 3 </issue> <code> [start of kolibri/core/lessons/viewsets.py] 1 import json 2 from functools import partial 3 from itertools import chain 4 5 from django.db import connection 6 from django.db.models import CharField 7 from django_filters.rest_framework import DjangoFilterBackend 8 9 from .serializers import LessonSerializer 10 from kolibri.core.api import ValuesViewset 11 from kolibri.core.auth.api import KolibriAuthPermissions 12 from kolibri.core.auth.api import KolibriAuthPermissionsFilter 13 from kolibri.core.lessons.models import Lesson 14 from kolibri.core.lessons.models import LessonAssignment 15 from kolibri.core.query import ArrayAgg 16 from kolibri.core.query import GroupConcat 17 from kolibri.core.query import process_uuid_aggregate 18 19 20 def _ensure_raw_dict(d): 21 if hasattr(d, "dict"): 22 d = d.dict() 23 return dict(d) 24 25 26 class LessonPermissions(KolibriAuthPermissions): 27 # Overrides the default validator to sanitize the Lesson POST Payload 28 # before validation 29 def validator(self, request, view, datum): 30 model = view.get_serializer_class().Meta.model 31 validated_data = view.get_serializer().to_internal_value( 32 _ensure_raw_dict(datum) 33 ) 34 # Cannot have create assignments without creating the Lesson first, 35 # so this doesn't try to validate the Lesson with a non-empty lesson_assignments list 36 validated_data.pop("lesson_assignments") 37 return request.user.can_create(model, validated_data) 38 39 40 def _map_lesson_classroom(item): 41 return { 42 "id": item.pop("collection__id"), 43 "name": item.pop("collection__name"), 44 "parent": item.pop("collection__parent_id"), 45 } 46 47 48 class LessonViewset(ValuesViewset): 49 serializer_class = LessonSerializer 50 filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend) 51 filter_fields = ("collection",) 52 permission_classes = (LessonPermissions,) 53 queryset = Lesson.objects.all().order_by("-date_created") 54 55 values = ( 56 "id", 57 "title", 58 "description", 59 "resources", 60 "is_active", 61 "collection", # classroom 62 "collection__id", 63 "collection__name", 64 "collection__parent_id", 65 "created_by", 66 "assignment_ids", 67 ) 68 69 field_map = { 70 "classroom": _map_lesson_classroom, 71 "resources": lambda x: json.loads(x["resources"]), 72 "assignment_ids": partial(process_uuid_aggregate, key="assignment_ids"), 73 } 74 75 def consolidate(self, items): 76 assignment_ids = [] 77 for item in items: 78 assignment_ids.extend(item["assignment_ids"]) 79 assignments = LessonAssignment.objects.filter(id__in=assignment_ids) 80 if connection.vendor == "postgresql" and ArrayAgg is not None: 81 assignments = assignments.annotate( 82 learner_ids=ArrayAgg("collection__membership__user__id"), 83 ) 84 85 def _process_item(item): 86 item["learner_ids"] = map( 87 lambda x: x.hex, filter(lambda x: x, item["learner_ids"]) 88 ) 89 return item 90 91 else: 92 assignments = assignments.values("id").annotate( 93 learner_ids=GroupConcat( 94 "collection__membership__user__id", output_field=CharField(), 95 ), 96 ) 97 98 def _process_item(item): 99 item["learner_ids"] = ( 100 item["learner_ids"].split(",") if item["learner_ids"] else [] 101 ) 102 return item 103 104 assignments = { 105 a["id"]: _process_item(a) 106 for a in assignments.values( 107 "id", "collection", "collection__kind", "learner_ids", "assigned_by" 108 ) 109 } 110 for item in items: 111 item_ids = item.pop("assignment_ids") 112 item["lesson_assignments"] = [assignments[a] for a in item_ids] 113 item["learner_ids"] = list( 114 set( 115 chain.from_iterable( 116 a.pop("learner_ids") for a in item["lesson_assignments"] 117 ) 118 ) 119 ) 120 return items 121 122 def annotate_queryset(self, queryset): 123 if connection.vendor == "postgresql" and ArrayAgg is not None: 124 return queryset.annotate(assignment_ids=ArrayAgg("lesson_assignments__id"),) 125 return queryset.values("id").annotate( 126 assignment_ids=GroupConcat( 127 "lesson_assignments__id", output_field=CharField(), 128 ), 129 ) 130 [end of kolibri/core/lessons/viewsets.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kolibri/core/lessons/viewsets.py b/kolibri/core/lessons/viewsets.py --- a/kolibri/core/lessons/viewsets.py +++ b/kolibri/core/lessons/viewsets.py @@ -4,6 +4,7 @@ from django.db import connection from django.db.models import CharField +from django.db.models import F from django_filters.rest_framework import DjangoFilterBackend from .serializers import LessonSerializer @@ -104,7 +105,11 @@ assignments = { a["id"]: _process_item(a) for a in assignments.values( - "id", "collection", "collection__kind", "learner_ids", "assigned_by" + "id", + "collection", + "learner_ids", + "assigned_by", + collection_kind=F("collection__kind"), ) } for item in items:
{"golden_diff": "diff --git a/kolibri/core/lessons/viewsets.py b/kolibri/core/lessons/viewsets.py\n--- a/kolibri/core/lessons/viewsets.py\n+++ b/kolibri/core/lessons/viewsets.py\n@@ -4,6 +4,7 @@\n \n from django.db import connection\n from django.db.models import CharField\n+from django.db.models import F\n from django_filters.rest_framework import DjangoFilterBackend\n \n from .serializers import LessonSerializer\n@@ -104,7 +105,11 @@\n assignments = {\n a[\"id\"]: _process_item(a)\n for a in assignments.values(\n- \"id\", \"collection\", \"collection__kind\", \"learner_ids\", \"assigned_by\"\n+ \"id\",\n+ \"collection\",\n+ \"learner_ids\",\n+ \"assigned_by\",\n+ collection_kind=F(\"collection__kind\"),\n )\n }\n for item in items:\n", "issue": "recipients column in coach tools in not accurate\n\r\n### Observed behavior\r\n\r\nrecipients column always indicates assignment to the entire class, even when that's not true:\r\n\r\n![groups](https://user-images.githubusercontent.com/2367265/69926711-644bce80-146a-11ea-8f2b-82ec444fb367.gif)\r\n\r\n\r\n### Expected behavior\r\n\r\nrecipients column should be accurate\r\n\r\n### User-facing consequences\r\n\r\nconfusion\r\n\r\n### Errors and logs\r\n\r\nnone observed\r\n\r\n### Steps to reproduce\r\n\r\ncreate a lesson and assign it to a group\r\n\r\n### Context\r\n\r\n0.13.0 beta 3\r\n\n", "before_files": [{"content": "import json\nfrom functools import partial\nfrom itertools import chain\n\nfrom django.db import connection\nfrom django.db.models import CharField\nfrom django_filters.rest_framework import DjangoFilterBackend\n\nfrom .serializers import LessonSerializer\nfrom kolibri.core.api import ValuesViewset\nfrom kolibri.core.auth.api import KolibriAuthPermissions\nfrom kolibri.core.auth.api import KolibriAuthPermissionsFilter\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.lessons.models import LessonAssignment\nfrom kolibri.core.query import ArrayAgg\nfrom kolibri.core.query import GroupConcat\nfrom kolibri.core.query import process_uuid_aggregate\n\n\ndef _ensure_raw_dict(d):\n if hasattr(d, \"dict\"):\n d = d.dict()\n return dict(d)\n\n\nclass LessonPermissions(KolibriAuthPermissions):\n # Overrides the default validator to sanitize the Lesson POST Payload\n # before validation\n def validator(self, request, view, datum):\n model = view.get_serializer_class().Meta.model\n validated_data = view.get_serializer().to_internal_value(\n _ensure_raw_dict(datum)\n )\n # Cannot have create assignments without creating the Lesson first,\n # so this doesn't try to validate the Lesson with a non-empty lesson_assignments list\n validated_data.pop(\"lesson_assignments\")\n return request.user.can_create(model, validated_data)\n\n\ndef _map_lesson_classroom(item):\n return {\n \"id\": item.pop(\"collection__id\"),\n \"name\": item.pop(\"collection__name\"),\n \"parent\": item.pop(\"collection__parent_id\"),\n }\n\n\nclass LessonViewset(ValuesViewset):\n serializer_class = LessonSerializer\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n filter_fields = (\"collection\",)\n permission_classes = (LessonPermissions,)\n queryset = Lesson.objects.all().order_by(\"-date_created\")\n\n values = (\n \"id\",\n \"title\",\n \"description\",\n \"resources\",\n \"is_active\",\n \"collection\", # classroom\n \"collection__id\",\n \"collection__name\",\n \"collection__parent_id\",\n \"created_by\",\n \"assignment_ids\",\n )\n\n field_map = {\n \"classroom\": _map_lesson_classroom,\n \"resources\": lambda x: json.loads(x[\"resources\"]),\n \"assignment_ids\": partial(process_uuid_aggregate, key=\"assignment_ids\"),\n }\n\n def consolidate(self, items):\n assignment_ids = []\n for item in items:\n assignment_ids.extend(item[\"assignment_ids\"])\n assignments = LessonAssignment.objects.filter(id__in=assignment_ids)\n if connection.vendor == \"postgresql\" and ArrayAgg is not None:\n assignments = assignments.annotate(\n learner_ids=ArrayAgg(\"collection__membership__user__id\"),\n )\n\n def _process_item(item):\n item[\"learner_ids\"] = map(\n lambda x: x.hex, filter(lambda x: x, item[\"learner_ids\"])\n )\n return item\n\n else:\n assignments = assignments.values(\"id\").annotate(\n learner_ids=GroupConcat(\n \"collection__membership__user__id\", output_field=CharField(),\n ),\n )\n\n def _process_item(item):\n item[\"learner_ids\"] = (\n item[\"learner_ids\"].split(\",\") if item[\"learner_ids\"] else []\n )\n return item\n\n assignments = {\n a[\"id\"]: _process_item(a)\n for a in assignments.values(\n \"id\", \"collection\", \"collection__kind\", \"learner_ids\", \"assigned_by\"\n )\n }\n for item in items:\n item_ids = item.pop(\"assignment_ids\")\n item[\"lesson_assignments\"] = [assignments[a] for a in item_ids]\n item[\"learner_ids\"] = list(\n set(\n chain.from_iterable(\n a.pop(\"learner_ids\") for a in item[\"lesson_assignments\"]\n )\n )\n )\n return items\n\n def annotate_queryset(self, queryset):\n if connection.vendor == \"postgresql\" and ArrayAgg is not None:\n return queryset.annotate(assignment_ids=ArrayAgg(\"lesson_assignments__id\"),)\n return queryset.values(\"id\").annotate(\n assignment_ids=GroupConcat(\n \"lesson_assignments__id\", output_field=CharField(),\n ),\n )\n", "path": "kolibri/core/lessons/viewsets.py"}]}
1,893
198
gh_patches_debug_134
rasdani/github-patches
git_diff
mkdocs__mkdocs-636
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Site description not working Hi, I have the following configuration ``` yml site_name: embed.js site_url: http://embedjs.com site_author: Ritesh Kumar site_description: A jQuery plugin that analyses the string and automatically embeds emojis, media, maps, tweets, code and services. pages: - Home: index.md - Getting Started: gettingStarted.md - How to use ?: usage.md - Documentation: documentation.md - Working Examples: examples.md - Changelogs: history.md theme: readthedocs extra_css: ["custom/custom.css"] site_favicon: img/favicon.ico repo_url: https://github.com/ritz078/embed.js google_analytics: ['UA-60624235-2', 'rkritesh.in/embed.js'] ``` But sadly author and description are not appearing in the served file. other properties are working fine. Any help will be great. </issue> <code> [start of mkdocs/cli.py] 1 #!/usr/bin/env python 2 # coding: utf-8 3 4 from __future__ import unicode_literals 5 import logging 6 import click 7 import socket 8 9 from mkdocs import __version__ 10 from mkdocs import build 11 from mkdocs import gh_deploy 12 from mkdocs import new 13 from mkdocs import serve 14 from mkdocs import utils 15 from mkdocs import exceptions 16 from mkdocs.config import load_config 17 18 log = logging.getLogger(__name__) 19 20 21 class State(object): 22 ''' Maintain logging level.''' 23 24 def __init__(self, log_name='mkdocs', level=logging.INFO): 25 self.logger = logging.getLogger(log_name) 26 self.logger.propagate = False 27 stream = logging.StreamHandler() 28 formatter = logging.Formatter("%(levelname)-7s - %(message)s ") 29 stream.setFormatter(formatter) 30 self.logger.addHandler(stream) 31 32 self.logger.setLevel(level) 33 34 35 pass_state = click.make_pass_decorator(State, ensure=True) 36 37 38 def verbose_option(f): 39 def callback(ctx, param, value): 40 state = ctx.ensure_object(State) 41 if value: 42 state.logger.setLevel(logging.DEBUG) 43 return click.option('-v', '--verbose', 44 is_flag=True, 45 expose_value=False, 46 help='Enable verbose output', 47 callback=callback)(f) 48 49 50 def quiet_option(f): 51 def callback(ctx, param, value): 52 state = ctx.ensure_object(State) 53 if value: 54 state.logger.setLevel(logging.ERROR) 55 return click.option('-q', '--quiet', 56 is_flag=True, 57 expose_value=False, 58 help='Silence warnings', 59 callback=callback)(f) 60 61 62 def common_options(f): 63 f = verbose_option(f) 64 f = quiet_option(f) 65 return f 66 67 68 clean_help = "Remove old files from the site_dir before building" 69 config_file_help = "Provide a specific MkDocs config" 70 dev_addr_help = ("IP address and port to serve documentation locally (default: " 71 "localhost:8000)") 72 strict_help = ("Enable strict mode. This will cause MkDocs to abort the build " 73 "on any warnings.") 74 theme_help = "The theme to use when building your documentation." 75 theme_choices = utils.get_theme_names() 76 site_dir_help = "The directory to output the result of the documentation build." 77 reload_help = "Enable and disable the live reloading in the development server." 78 commit_message_help = ("A commit message to use when commiting to the " 79 "Github Pages remote branch") 80 remote_branch_help = ("The remote branch to commit to for Github Pages. This " 81 "overrides the value specified in config") 82 83 84 @click.group(context_settings={'help_option_names': ['-h', '--help']}) 85 @click.version_option(__version__, '-V', '--version') 86 @common_options 87 def cli(): 88 """ 89 MkDocs - Project documentation with Markdown. 90 """ 91 pass 92 93 94 @cli.command(name="serve") 95 @click.option('-f', '--config-file', type=click.File('rb'), help=config_file_help) 96 @click.option('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>') 97 @click.option('-s', '--strict', is_flag=True, help=strict_help) 98 @click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help) 99 @click.option('--livereload/--no-livereload', default=True, help=reload_help) 100 @common_options 101 def serve_command(dev_addr, config_file, strict, theme, livereload): 102 """Run the builtin development server""" 103 104 logging.getLogger('tornado').setLevel(logging.WARNING) 105 106 try: 107 serve.serve( 108 config_file=config_file, 109 dev_addr=dev_addr, 110 strict=strict, 111 theme=theme, 112 livereload=livereload, 113 ) 114 except (exceptions.ConfigurationError, socket.error) as e: 115 # Avoid ugly, unhelpful traceback 116 raise SystemExit('\n' + str(e)) 117 118 119 @cli.command(name="build") 120 @click.option('-c', '--clean', is_flag=True, help=clean_help) 121 @click.option('-f', '--config-file', type=click.File('rb'), help=config_file_help) 122 @click.option('-s', '--strict', is_flag=True, help=strict_help) 123 @click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help) 124 @click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help) 125 @common_options 126 def build_command(clean, config_file, strict, theme, site_dir): 127 """Build the MkDocs documentation""" 128 try: 129 build.build(load_config( 130 config_file=config_file, 131 strict=strict, 132 theme=theme, 133 site_dir=site_dir 134 ), clean_site_dir=clean) 135 except exceptions.ConfigurationError as e: 136 # Avoid ugly, unhelpful traceback 137 raise SystemExit('\n' + str(e)) 138 139 140 @cli.command(name="json") 141 @click.option('-c', '--clean', is_flag=True, help=clean_help) 142 @click.option('-f', '--config-file', type=click.File('rb'), help=config_file_help) 143 @click.option('-s', '--strict', is_flag=True, help=strict_help) 144 @click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help) 145 @common_options 146 def json_command(clean, config_file, strict, site_dir): 147 """Build the MkDocs documentation to JSON files 148 149 Rather than building your documentation to HTML pages, this 150 outputs each page in a simple JSON format. This command is 151 useful if you want to index your documentation in an external 152 search engine. 153 """ 154 155 log.warning("The json command is deprcated and will be removed in a future " 156 "MkDocs release. For details on updating: " 157 "http://www.mkdocs.org/about/release-notes/") 158 159 try: 160 build.build(load_config( 161 config_file=config_file, 162 strict=strict, 163 site_dir=site_dir 164 ), dump_json=True, clean_site_dir=clean) 165 except exceptions.ConfigurationError as e: 166 # Avoid ugly, unhelpful traceback 167 raise SystemExit('\n' + str(e)) 168 169 170 @cli.command(name="gh-deploy") 171 @click.option('-c', '--clean', is_flag=True, help=clean_help) 172 @click.option('-f', '--config-file', type=click.File('rb'), help=config_file_help) 173 @click.option('-m', '--message', help=commit_message_help) 174 @click.option('-b', '--remote-branch', help=remote_branch_help) 175 @click.option('-r', '--remote-name', help=remote_branch_help) 176 @common_options 177 def gh_deploy_command(config_file, clean, message, remote_branch, remote_name): 178 """Deploy your documentation to GitHub Pages""" 179 try: 180 config = load_config( 181 config_file=config_file, 182 remote_branch=remote_branch, 183 remote_name=remote_name 184 ) 185 build.build(config, clean_site_dir=clean) 186 gh_deploy.gh_deploy(config, message=message) 187 except exceptions.ConfigurationError as e: 188 # Avoid ugly, unhelpful traceback 189 raise SystemExit('\n' + str(e)) 190 191 192 @cli.command(name="new") 193 @click.argument("project_directory") 194 @common_options 195 def new_command(project_directory): 196 """Create a new MkDocs project""" 197 new.new(project_directory) 198 [end of mkdocs/cli.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mkdocs/cli.py b/mkdocs/cli.py --- a/mkdocs/cli.py +++ b/mkdocs/cli.py @@ -88,7 +88,6 @@ """ MkDocs - Project documentation with Markdown. """ - pass @cli.command(name="serve")
{"golden_diff": "diff --git a/mkdocs/cli.py b/mkdocs/cli.py\n--- a/mkdocs/cli.py\n+++ b/mkdocs/cli.py\n@@ -88,7 +88,6 @@\n \"\"\"\n MkDocs - Project documentation with Markdown.\n \"\"\"\n- pass\n \n \n @cli.command(name=\"serve\")\n", "issue": "Site description not working\nHi,\n\nI have the following configuration\n\n``` yml\nsite_name: embed.js\nsite_url: http://embedjs.com\nsite_author: Ritesh Kumar\nsite_description: A jQuery plugin that analyses the string and automatically embeds emojis, media, maps, tweets, code and services.\npages:\n- Home: index.md\n- Getting Started: gettingStarted.md\n- How to use ?: usage.md\n- Documentation: documentation.md\n- Working Examples: examples.md\n- Changelogs: history.md\ntheme: readthedocs\nextra_css: [\"custom/custom.css\"]\nsite_favicon: img/favicon.ico\nrepo_url: https://github.com/ritz078/embed.js\ngoogle_analytics: ['UA-60624235-2', 'rkritesh.in/embed.js']\n```\n\nBut sadly author and description are not appearing in the served file. other properties are working fine. Any help will be great.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import unicode_literals\nimport logging\nimport click\nimport socket\n\nfrom mkdocs import __version__\nfrom mkdocs import build\nfrom mkdocs import gh_deploy\nfrom mkdocs import new\nfrom mkdocs import serve\nfrom mkdocs import utils\nfrom mkdocs import exceptions\nfrom mkdocs.config import load_config\n\nlog = logging.getLogger(__name__)\n\n\nclass State(object):\n ''' Maintain logging level.'''\n\n def __init__(self, log_name='mkdocs', level=logging.INFO):\n self.logger = logging.getLogger(log_name)\n self.logger.propagate = False\n stream = logging.StreamHandler()\n formatter = logging.Formatter(\"%(levelname)-7s - %(message)s \")\n stream.setFormatter(formatter)\n self.logger.addHandler(stream)\n\n self.logger.setLevel(level)\n\n\npass_state = click.make_pass_decorator(State, ensure=True)\n\n\ndef verbose_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.DEBUG)\n return click.option('-v', '--verbose',\n is_flag=True,\n expose_value=False,\n help='Enable verbose output',\n callback=callback)(f)\n\n\ndef quiet_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.ERROR)\n return click.option('-q', '--quiet',\n is_flag=True,\n expose_value=False,\n help='Silence warnings',\n callback=callback)(f)\n\n\ndef common_options(f):\n f = verbose_option(f)\n f = quiet_option(f)\n return f\n\n\nclean_help = \"Remove old files from the site_dir before building\"\nconfig_file_help = \"Provide a specific MkDocs config\"\ndev_addr_help = (\"IP address and port to serve documentation locally (default: \"\n \"localhost:8000)\")\nstrict_help = (\"Enable strict mode. This will cause MkDocs to abort the build \"\n \"on any warnings.\")\ntheme_help = \"The theme to use when building your documentation.\"\ntheme_choices = utils.get_theme_names()\nsite_dir_help = \"The directory to output the result of the documentation build.\"\nreload_help = \"Enable and disable the live reloading in the development server.\"\ncommit_message_help = (\"A commit message to use when commiting to the \"\n \"Github Pages remote branch\")\nremote_branch_help = (\"The remote branch to commit to for Github Pages. This \"\n \"overrides the value specified in config\")\n\n\[email protected](context_settings={'help_option_names': ['-h', '--help']})\[email protected]_option(__version__, '-V', '--version')\n@common_options\ndef cli():\n \"\"\"\n MkDocs - Project documentation with Markdown.\n \"\"\"\n pass\n\n\[email protected](name=\"serve\")\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_file_help)\[email protected]('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>')\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)\[email protected]('--livereload/--no-livereload', default=True, help=reload_help)\n@common_options\ndef serve_command(dev_addr, config_file, strict, theme, livereload):\n \"\"\"Run the builtin development server\"\"\"\n\n logging.getLogger('tornado').setLevel(logging.WARNING)\n\n try:\n serve.serve(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n livereload=livereload,\n )\n except (exceptions.ConfigurationError, socket.error) as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"build\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_file_help)\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)\[email protected]('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef build_command(clean, config_file, strict, theme, site_dir):\n \"\"\"Build the MkDocs documentation\"\"\"\n try:\n build.build(load_config(\n config_file=config_file,\n strict=strict,\n theme=theme,\n site_dir=site_dir\n ), clean_site_dir=clean)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"json\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_file_help)\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef json_command(clean, config_file, strict, site_dir):\n \"\"\"Build the MkDocs documentation to JSON files\n\n Rather than building your documentation to HTML pages, this\n outputs each page in a simple JSON format. This command is\n useful if you want to index your documentation in an external\n search engine.\n \"\"\"\n\n log.warning(\"The json command is deprcated and will be removed in a future \"\n \"MkDocs release. For details on updating: \"\n \"http://www.mkdocs.org/about/release-notes/\")\n\n try:\n build.build(load_config(\n config_file=config_file,\n strict=strict,\n site_dir=site_dir\n ), dump_json=True, clean_site_dir=clean)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"gh-deploy\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_file_help)\[email protected]('-m', '--message', help=commit_message_help)\[email protected]('-b', '--remote-branch', help=remote_branch_help)\[email protected]('-r', '--remote-name', help=remote_branch_help)\n@common_options\ndef gh_deploy_command(config_file, clean, message, remote_branch, remote_name):\n \"\"\"Deploy your documentation to GitHub Pages\"\"\"\n try:\n config = load_config(\n config_file=config_file,\n remote_branch=remote_branch,\n remote_name=remote_name\n )\n build.build(config, clean_site_dir=clean)\n gh_deploy.gh_deploy(config, message=message)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"new\")\[email protected](\"project_directory\")\n@common_options\ndef new_command(project_directory):\n \"\"\"Create a new MkDocs project\"\"\"\n new.new(project_directory)\n", "path": "mkdocs/cli.py"}]}
2,779
69
gh_patches_debug_4596
rasdani/github-patches
git_diff
liberapay__liberapay.com-979
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Currency exchange rate between EUR and USD are mixed up If I give euros to an account using dollar as main currency, the currency rate gets mixed up somewhere. For instance if I give 5€, on the creator's page it only displays 4.05$ instead of 6.18$. Current exchange rate: * 5 EUR to USD: 6.18$ * 5 USD to EUR: 4.05€ </issue> <code> [start of liberapay/models/_mixin_team.py] 1 """Teams are groups of participants. 2 """ 3 from __future__ import division, print_function, unicode_literals 4 5 from collections import OrderedDict 6 from statistics import median 7 8 from mangopay.utils import Money 9 10 from liberapay.constants import ZERO, TAKE_THROTTLING_THRESHOLD 11 from liberapay.utils import NS 12 13 14 class MemberLimitReached(Exception): pass 15 16 17 class InactiveParticipantAdded(Exception): pass 18 19 20 class MixinTeam(object): 21 22 def invite(self, invitee, inviter): 23 assert self.kind == 'group' 24 with self.db.get_cursor() as c: 25 n_id = invitee.notify( 26 'team_invite', 27 team=self.username, 28 team_url=self.url(), 29 inviter=inviter.username, 30 ) 31 payload = dict(invitee=invitee.id, notification_id=n_id) 32 self.add_event(c, 'invite', payload, inviter.id) 33 34 def add_member(self, member, cursor=None): 35 """Add a member to this team. 36 """ 37 if self.nmembers >= 149: 38 raise MemberLimitReached 39 if member.status != 'active': 40 raise InactiveParticipantAdded 41 self.set_take_for(member, ZERO[self.main_currency], self, cursor=cursor) 42 43 def remove_all_members(self, cursor=None): 44 (cursor or self.db).run(""" 45 INSERT INTO takes 46 (ctime, member, team, amount, actual_amount, recorder) 47 SELECT ctime, member, %(id)s, NULL, NULL, %(id)s 48 FROM current_takes 49 WHERE team=%(id)s 50 """, dict(id=self.id)) 51 52 def member_of(self, team): 53 """Given a Participant object, return a boolean. 54 """ 55 assert team.kind == 'group' 56 return self.db.one(""" 57 SELECT true 58 FROM current_takes 59 WHERE team=%s AND member=%s 60 """, (team.id, self.id), default=False) 61 62 def get_takes_last_week(self): 63 """Get the users' nominal takes last week. Used in throttling. 64 """ 65 assert self.kind == 'group' 66 takes = {t.member: t.amount for t in self.db.all(""" 67 68 SELECT DISTINCT ON (member) member, amount, mtime 69 FROM takes 70 WHERE team=%s 71 AND mtime < ( 72 SELECT ts_start 73 FROM paydays 74 WHERE ts_end > ts_start 75 ORDER BY ts_start DESC LIMIT 1 76 ) 77 ORDER BY member, mtime DESC 78 79 """, (self.id,)) if t.amount} 80 return takes 81 82 def get_take_for(self, member): 83 """Return the nominal take for this member, or None. 84 """ 85 return self.db.one( 86 "SELECT amount FROM current_takes WHERE member = %s AND team = %s", 87 (member.id, self.id) 88 ) 89 90 def compute_max_this_week(self, member_id, last_week): 91 """2x the member's take last week, or the member's take last week + the 92 leftover, or last week's median take, or 1.00, or infinity if the takes 93 were all zero last week or if throttling is disabled. 94 """ 95 if not self.throttle_takes: 96 return 97 sum_last_week = sum(last_week.values()) 98 if sum_last_week == 0: 99 return 100 sum_last_week = sum_last_week.convert(self.main_currency) 101 initial_leftover = self.receiving - sum_last_week 102 nonzero_last_week = [a.convert(self.main_currency).amount for a in last_week.values() if a] 103 member_last_week = last_week.get(member_id, ZERO[self.main_currency]).convert(self.main_currency) 104 return max( 105 member_last_week * 2, 106 member_last_week + initial_leftover, 107 Money(median(nonzero_last_week or (0,)), self.main_currency), 108 TAKE_THROTTLING_THRESHOLD[self.main_currency] 109 ) 110 111 def set_take_for(self, member, take, recorder, check_max=True, cursor=None): 112 """Sets member's take from the team pool. 113 """ 114 assert self.kind == 'group' 115 116 if recorder.id != self.id: 117 cur_take = self.get_take_for(member) 118 if cur_take is None: 119 return None 120 121 if not isinstance(take, (None.__class__, Money)): 122 take = Money(take, self.main_currency) # TODO drop this 123 124 with self.db.get_cursor(cursor) as cursor: 125 # Lock to avoid race conditions 126 cursor.run("LOCK TABLE takes IN EXCLUSIVE MODE") 127 # Throttle the new take, if there is more than one member 128 threshold = TAKE_THROTTLING_THRESHOLD[self.main_currency] 129 if take and check_max and self.nmembers > 1 and take > threshold: 130 last_week = self.get_takes_last_week() 131 max_this_week = self.compute_max_this_week(member.id, last_week) 132 if max_this_week is not None and take > max_this_week: 133 take = max_this_week 134 # Insert the new take 135 cursor.run(""" 136 137 INSERT INTO takes 138 (ctime, member, team, amount, actual_amount, recorder) 139 SELECT COALESCE(( 140 SELECT ctime 141 FROM takes 142 WHERE member=%(member)s 143 AND team=%(team)s 144 LIMIT 1 145 ), current_timestamp) 146 , %(member)s 147 , %(team)s 148 , %(amount)s 149 , CASE WHEN %(amount)s IS NULL THEN NULL ELSE 150 COALESCE(( 151 SELECT actual_amount 152 FROM takes 153 WHERE member=%(member)s 154 AND team=%(team)s 155 ORDER BY mtime DESC 156 LIMIT 1 157 ), zero(%(amount)s::currency_amount)) 158 END 159 , %(recorder)s 160 161 """, dict(member=member.id, team=self.id, amount=take, 162 recorder=recorder.id)) 163 # Recompute the actual takes and update the cached amounts 164 self.recompute_actual_takes(cursor, member=member) 165 # Update is_funded on member's tips 166 member.update_giving(cursor) 167 168 return take 169 170 def get_current_takes(self, cursor=None): 171 """Return a list of member takes for a team. 172 """ 173 assert self.kind == 'group' 174 TAKES = """ 175 SELECT p.id AS member_id, p.username AS member_name, p.avatar_url 176 , (p.mangopay_user_id IS NOT NULL) AS is_identified, p.is_suspended 177 , t.amount, t.actual_amount, t.ctime, t.mtime 178 FROM current_takes t 179 JOIN participants p ON p.id = member 180 WHERE t.team=%(team)s 181 ORDER BY p.username 182 """ 183 records = (cursor or self.db).all(TAKES, dict(team=self.id)) 184 return [r._asdict() for r in records] 185 186 def recompute_actual_takes(self, cursor, member=None): 187 """Get the tips and takes for this team and recompute the actual amounts. 188 189 To avoid deadlocks the given `cursor` should have already acquired an 190 exclusive lock on the `takes` table. 191 """ 192 from liberapay.billing.payday import Payday 193 tips = [NS(t._asdict()) for t in cursor.all(""" 194 SELECT t.id, t.tipper, t.amount AS full_amount 195 , coalesce_currency_amount(( 196 SELECT sum(tr.amount, t.amount::currency) 197 FROM transfers tr 198 WHERE tr.tipper = t.tipper 199 AND tr.team = %(team_id)s 200 AND tr.context = 'take' 201 AND tr.status = 'succeeded' 202 ), t.amount::currency) AS past_transfers_sum 203 FROM current_tips t 204 JOIN participants p ON p.id = t.tipper 205 WHERE t.tippee = %(team_id)s 206 AND t.is_funded 207 AND p.is_suspended IS NOT true 208 """, dict(team_id=self.id))] 209 takes = [NS(r._asdict()) for r in (cursor or self.db).all(""" 210 SELECT t.* 211 FROM current_takes t 212 JOIN participants p ON p.id = t.member 213 WHERE t.team = %s 214 AND p.is_suspended IS NOT true 215 AND p.mangopay_user_id IS NOT NULL 216 """, (self.id,))] 217 # Recompute the takes 218 takes_sum = {} 219 tippers = {} 220 transfers, new_leftover = Payday.resolve_takes(tips, takes, self.main_currency) 221 for t in transfers: 222 if t.member in takes_sum: 223 takes_sum[t.member] += t.amount 224 else: 225 takes_sum[t.member] = t.amount 226 if t.member in tippers: 227 tippers[t.member].add(t.tipper) 228 else: 229 tippers[t.member] = set((t.tipper,)) 230 # Update the leftover 231 cursor.run("UPDATE participants SET leftover = %s WHERE id = %s", 232 (new_leftover, self.id)) 233 self.set_attributes(leftover=new_leftover) 234 # Update the cached amounts (actual_amount, taking, and receiving) 235 zero = ZERO[self.main_currency] 236 for take in takes: 237 member_id = take.member 238 old_amount = take.actual_amount or zero 239 new_amount = takes_sum.get(take.member, zero) 240 diff = new_amount - old_amount 241 if diff != 0: 242 take.actual_amount = new_amount 243 cursor.run(""" 244 UPDATE takes 245 SET actual_amount = %(actual_amount)s 246 WHERE id = %(id)s 247 """, take.__dict__) 248 ntippers = len(tippers.get(member_id, ())) 249 member_currency, old_taking = cursor.one( 250 "SELECT main_currency, taking FROM participants WHERE id = %s", (member_id,) 251 ) 252 diff = diff.convert(member_currency) 253 if old_taking + diff < 0: 254 # Make sure currency fluctuation doesn't result in a negative number 255 diff = -old_taking 256 cursor.run(""" 257 UPDATE participants 258 SET taking = (taking + %(diff)s) 259 , receiving = (receiving + %(diff)s) 260 , nteampatrons = ( 261 CASE WHEN (receiving + %(diff)s) = 0 THEN 0 262 WHEN nteampatrons < %(ntippers)s THEN %(ntippers)s 263 ELSE nteampatrons 264 END 265 ) 266 WHERE id=%(member_id)s 267 """, dict(member_id=member_id, diff=diff, ntippers=ntippers)) 268 if member and member.id == member_id: 269 r = cursor.one( 270 "SELECT taking, receiving FROM participants WHERE id = %s", 271 (member_id,) 272 ) 273 member.set_attributes(**r._asdict()) 274 return takes 275 276 @property 277 def nmembers(self): 278 assert self.kind == 'group' 279 return self.db.one(""" 280 SELECT COUNT(*) 281 FROM current_takes 282 WHERE team=%s 283 """, (self.id,)) 284 285 def get_members(self): 286 """Return an OrderedDict of member dicts. 287 """ 288 takes = self.get_current_takes() 289 last_week = self.get_takes_last_week() 290 members = OrderedDict() 291 members.leftover = self.leftover 292 zero = ZERO[self.main_currency] 293 for take in takes: 294 member = {} 295 m_id = member['id'] = take['member_id'] 296 member['username'] = take['member_name'] 297 member['nominal_take'] = take['amount'].amount 298 member['actual_amount'] = take['actual_amount'] 299 member['last_week'] = last_week.get(m_id, zero).amount 300 x = self.compute_max_this_week(m_id, last_week) 301 member['max_this_week'] = x 302 members[member['id']] = member 303 return members 304 305 @property 306 def closed_by(self): 307 assert self.status == 'closed' 308 return self.db.one(""" 309 SELECT member 310 FROM takes 311 WHERE team = %s 312 ORDER BY mtime DESC 313 LIMIT 1 314 """, (self.id,)) 315 [end of liberapay/models/_mixin_team.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/liberapay/models/_mixin_team.py b/liberapay/models/_mixin_team.py --- a/liberapay/models/_mixin_team.py +++ b/liberapay/models/_mixin_team.py @@ -118,8 +118,7 @@ if cur_take is None: return None - if not isinstance(take, (None.__class__, Money)): - take = Money(take, self.main_currency) # TODO drop this + assert isinstance(take, (None.__class__, Money)) with self.db.get_cursor(cursor) as cursor: # Lock to avoid race conditions
{"golden_diff": "diff --git a/liberapay/models/_mixin_team.py b/liberapay/models/_mixin_team.py\n--- a/liberapay/models/_mixin_team.py\n+++ b/liberapay/models/_mixin_team.py\n@@ -118,8 +118,7 @@\n if cur_take is None:\n return None\n \n- if not isinstance(take, (None.__class__, Money)):\n- take = Money(take, self.main_currency) # TODO drop this\n+ assert isinstance(take, (None.__class__, Money))\n \n with self.db.get_cursor(cursor) as cursor:\n # Lock to avoid race conditions\n", "issue": "Currency exchange rate between EUR and USD are mixed up\nIf I give euros to an account using dollar as main currency, the currency rate gets mixed up somewhere.\r\nFor instance if I give 5\u20ac, on the creator's page it only displays 4.05$ instead of 6.18$.\r\n\r\nCurrent exchange rate:\r\n* 5 EUR to USD: 6.18$\r\n* 5 USD to EUR: 4.05\u20ac\n", "before_files": [{"content": "\"\"\"Teams are groups of participants.\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom statistics import median\n\nfrom mangopay.utils import Money\n\nfrom liberapay.constants import ZERO, TAKE_THROTTLING_THRESHOLD\nfrom liberapay.utils import NS\n\n\nclass MemberLimitReached(Exception): pass\n\n\nclass InactiveParticipantAdded(Exception): pass\n\n\nclass MixinTeam(object):\n\n def invite(self, invitee, inviter):\n assert self.kind == 'group'\n with self.db.get_cursor() as c:\n n_id = invitee.notify(\n 'team_invite',\n team=self.username,\n team_url=self.url(),\n inviter=inviter.username,\n )\n payload = dict(invitee=invitee.id, notification_id=n_id)\n self.add_event(c, 'invite', payload, inviter.id)\n\n def add_member(self, member, cursor=None):\n \"\"\"Add a member to this team.\n \"\"\"\n if self.nmembers >= 149:\n raise MemberLimitReached\n if member.status != 'active':\n raise InactiveParticipantAdded\n self.set_take_for(member, ZERO[self.main_currency], self, cursor=cursor)\n\n def remove_all_members(self, cursor=None):\n (cursor or self.db).run(\"\"\"\n INSERT INTO takes\n (ctime, member, team, amount, actual_amount, recorder)\n SELECT ctime, member, %(id)s, NULL, NULL, %(id)s\n FROM current_takes\n WHERE team=%(id)s\n \"\"\", dict(id=self.id))\n\n def member_of(self, team):\n \"\"\"Given a Participant object, return a boolean.\n \"\"\"\n assert team.kind == 'group'\n return self.db.one(\"\"\"\n SELECT true\n FROM current_takes\n WHERE team=%s AND member=%s\n \"\"\", (team.id, self.id), default=False)\n\n def get_takes_last_week(self):\n \"\"\"Get the users' nominal takes last week. Used in throttling.\n \"\"\"\n assert self.kind == 'group'\n takes = {t.member: t.amount for t in self.db.all(\"\"\"\n\n SELECT DISTINCT ON (member) member, amount, mtime\n FROM takes\n WHERE team=%s\n AND mtime < (\n SELECT ts_start\n FROM paydays\n WHERE ts_end > ts_start\n ORDER BY ts_start DESC LIMIT 1\n )\n ORDER BY member, mtime DESC\n\n \"\"\", (self.id,)) if t.amount}\n return takes\n\n def get_take_for(self, member):\n \"\"\"Return the nominal take for this member, or None.\n \"\"\"\n return self.db.one(\n \"SELECT amount FROM current_takes WHERE member = %s AND team = %s\",\n (member.id, self.id)\n )\n\n def compute_max_this_week(self, member_id, last_week):\n \"\"\"2x the member's take last week, or the member's take last week + the\n leftover, or last week's median take, or 1.00, or infinity if the takes\n were all zero last week or if throttling is disabled.\n \"\"\"\n if not self.throttle_takes:\n return\n sum_last_week = sum(last_week.values())\n if sum_last_week == 0:\n return\n sum_last_week = sum_last_week.convert(self.main_currency)\n initial_leftover = self.receiving - sum_last_week\n nonzero_last_week = [a.convert(self.main_currency).amount for a in last_week.values() if a]\n member_last_week = last_week.get(member_id, ZERO[self.main_currency]).convert(self.main_currency)\n return max(\n member_last_week * 2,\n member_last_week + initial_leftover,\n Money(median(nonzero_last_week or (0,)), self.main_currency),\n TAKE_THROTTLING_THRESHOLD[self.main_currency]\n )\n\n def set_take_for(self, member, take, recorder, check_max=True, cursor=None):\n \"\"\"Sets member's take from the team pool.\n \"\"\"\n assert self.kind == 'group'\n\n if recorder.id != self.id:\n cur_take = self.get_take_for(member)\n if cur_take is None:\n return None\n\n if not isinstance(take, (None.__class__, Money)):\n take = Money(take, self.main_currency) # TODO drop this\n\n with self.db.get_cursor(cursor) as cursor:\n # Lock to avoid race conditions\n cursor.run(\"LOCK TABLE takes IN EXCLUSIVE MODE\")\n # Throttle the new take, if there is more than one member\n threshold = TAKE_THROTTLING_THRESHOLD[self.main_currency]\n if take and check_max and self.nmembers > 1 and take > threshold:\n last_week = self.get_takes_last_week()\n max_this_week = self.compute_max_this_week(member.id, last_week)\n if max_this_week is not None and take > max_this_week:\n take = max_this_week\n # Insert the new take\n cursor.run(\"\"\"\n\n INSERT INTO takes\n (ctime, member, team, amount, actual_amount, recorder)\n SELECT COALESCE((\n SELECT ctime\n FROM takes\n WHERE member=%(member)s\n AND team=%(team)s\n LIMIT 1\n ), current_timestamp)\n , %(member)s\n , %(team)s\n , %(amount)s\n , CASE WHEN %(amount)s IS NULL THEN NULL ELSE\n COALESCE((\n SELECT actual_amount\n FROM takes\n WHERE member=%(member)s\n AND team=%(team)s\n ORDER BY mtime DESC\n LIMIT 1\n ), zero(%(amount)s::currency_amount))\n END\n , %(recorder)s\n\n \"\"\", dict(member=member.id, team=self.id, amount=take,\n recorder=recorder.id))\n # Recompute the actual takes and update the cached amounts\n self.recompute_actual_takes(cursor, member=member)\n # Update is_funded on member's tips\n member.update_giving(cursor)\n\n return take\n\n def get_current_takes(self, cursor=None):\n \"\"\"Return a list of member takes for a team.\n \"\"\"\n assert self.kind == 'group'\n TAKES = \"\"\"\n SELECT p.id AS member_id, p.username AS member_name, p.avatar_url\n , (p.mangopay_user_id IS NOT NULL) AS is_identified, p.is_suspended\n , t.amount, t.actual_amount, t.ctime, t.mtime\n FROM current_takes t\n JOIN participants p ON p.id = member\n WHERE t.team=%(team)s\n ORDER BY p.username\n \"\"\"\n records = (cursor or self.db).all(TAKES, dict(team=self.id))\n return [r._asdict() for r in records]\n\n def recompute_actual_takes(self, cursor, member=None):\n \"\"\"Get the tips and takes for this team and recompute the actual amounts.\n\n To avoid deadlocks the given `cursor` should have already acquired an\n exclusive lock on the `takes` table.\n \"\"\"\n from liberapay.billing.payday import Payday\n tips = [NS(t._asdict()) for t in cursor.all(\"\"\"\n SELECT t.id, t.tipper, t.amount AS full_amount\n , coalesce_currency_amount((\n SELECT sum(tr.amount, t.amount::currency)\n FROM transfers tr\n WHERE tr.tipper = t.tipper\n AND tr.team = %(team_id)s\n AND tr.context = 'take'\n AND tr.status = 'succeeded'\n ), t.amount::currency) AS past_transfers_sum\n FROM current_tips t\n JOIN participants p ON p.id = t.tipper\n WHERE t.tippee = %(team_id)s\n AND t.is_funded\n AND p.is_suspended IS NOT true\n \"\"\", dict(team_id=self.id))]\n takes = [NS(r._asdict()) for r in (cursor or self.db).all(\"\"\"\n SELECT t.*\n FROM current_takes t\n JOIN participants p ON p.id = t.member\n WHERE t.team = %s\n AND p.is_suspended IS NOT true\n AND p.mangopay_user_id IS NOT NULL\n \"\"\", (self.id,))]\n # Recompute the takes\n takes_sum = {}\n tippers = {}\n transfers, new_leftover = Payday.resolve_takes(tips, takes, self.main_currency)\n for t in transfers:\n if t.member in takes_sum:\n takes_sum[t.member] += t.amount\n else:\n takes_sum[t.member] = t.amount\n if t.member in tippers:\n tippers[t.member].add(t.tipper)\n else:\n tippers[t.member] = set((t.tipper,))\n # Update the leftover\n cursor.run(\"UPDATE participants SET leftover = %s WHERE id = %s\",\n (new_leftover, self.id))\n self.set_attributes(leftover=new_leftover)\n # Update the cached amounts (actual_amount, taking, and receiving)\n zero = ZERO[self.main_currency]\n for take in takes:\n member_id = take.member\n old_amount = take.actual_amount or zero\n new_amount = takes_sum.get(take.member, zero)\n diff = new_amount - old_amount\n if diff != 0:\n take.actual_amount = new_amount\n cursor.run(\"\"\"\n UPDATE takes\n SET actual_amount = %(actual_amount)s\n WHERE id = %(id)s\n \"\"\", take.__dict__)\n ntippers = len(tippers.get(member_id, ()))\n member_currency, old_taking = cursor.one(\n \"SELECT main_currency, taking FROM participants WHERE id = %s\", (member_id,)\n )\n diff = diff.convert(member_currency)\n if old_taking + diff < 0:\n # Make sure currency fluctuation doesn't result in a negative number\n diff = -old_taking\n cursor.run(\"\"\"\n UPDATE participants\n SET taking = (taking + %(diff)s)\n , receiving = (receiving + %(diff)s)\n , nteampatrons = (\n CASE WHEN (receiving + %(diff)s) = 0 THEN 0\n WHEN nteampatrons < %(ntippers)s THEN %(ntippers)s\n ELSE nteampatrons\n END\n )\n WHERE id=%(member_id)s\n \"\"\", dict(member_id=member_id, diff=diff, ntippers=ntippers))\n if member and member.id == member_id:\n r = cursor.one(\n \"SELECT taking, receiving FROM participants WHERE id = %s\",\n (member_id,)\n )\n member.set_attributes(**r._asdict())\n return takes\n\n @property\n def nmembers(self):\n assert self.kind == 'group'\n return self.db.one(\"\"\"\n SELECT COUNT(*)\n FROM current_takes\n WHERE team=%s\n \"\"\", (self.id,))\n\n def get_members(self):\n \"\"\"Return an OrderedDict of member dicts.\n \"\"\"\n takes = self.get_current_takes()\n last_week = self.get_takes_last_week()\n members = OrderedDict()\n members.leftover = self.leftover\n zero = ZERO[self.main_currency]\n for take in takes:\n member = {}\n m_id = member['id'] = take['member_id']\n member['username'] = take['member_name']\n member['nominal_take'] = take['amount'].amount\n member['actual_amount'] = take['actual_amount']\n member['last_week'] = last_week.get(m_id, zero).amount\n x = self.compute_max_this_week(m_id, last_week)\n member['max_this_week'] = x\n members[member['id']] = member\n return members\n\n @property\n def closed_by(self):\n assert self.status == 'closed'\n return self.db.one(\"\"\"\n SELECT member\n FROM takes\n WHERE team = %s\n ORDER BY mtime DESC\n LIMIT 1\n \"\"\", (self.id,))\n", "path": "liberapay/models/_mixin_team.py"}]}
4,080
144
gh_patches_debug_26403
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-2188
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Set version for develop branch. According to [PEP 440](https://www.python.org/dev/peps/pep-0440/#developmental-releases), we should set version `X.YaN.devM`. </issue> <code> [start of setup_preprocessing.py] 1 # Copyright 2020 The ElasticDL Authors. All rights reserved. 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Unless required by applicable law or agreed to in writing, software 9 # distributed under the License is distributed on an "AS IS" BASIS, 10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from setuptools import find_packages, setup 15 16 with open("elasticdl_preprocessing/requirements.txt") as f: 17 required_deps = f.read().splitlines() 18 19 extras = {} 20 with open("elasticdl_preprocessing/requirements-dev.txt") as f: 21 extras["develop"] = f.read().splitlines() 22 23 setup( 24 name="elasticdl_preprocessing", 25 version="develop", 26 description="A feature preprocessing library.", 27 long_description="This is an extension of the native Keras Preprocessing" 28 " Layers and Feature Column API from TensorFlow. We can develop our model" 29 " using the native high-level API from TensorFlow and our library." 30 " We can train this model using native TensorFlow or ElasticDL.", 31 long_description_content_type="text/markdown", 32 author="Ant Financial", 33 url="https://elasticdl.org", 34 install_requires=required_deps, 35 extras_require=extras, 36 python_requires=">=3.5", 37 packages=find_packages( 38 include=["elasticdl_preprocessing*"], exclude=["*test*"] 39 ), 40 package_data={"": ["requirements.txt"]}, 41 ) 42 [end of setup_preprocessing.py] [start of setup.py] 1 # Copyright 2020 The ElasticDL Authors. All rights reserved. 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Unless required by applicable law or agreed to in writing, software 9 # distributed under the License is distributed on an "AS IS" BASIS, 10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from setuptools import find_packages, setup 15 16 with open("elasticdl/requirements.txt") as f: 17 required_deps = f.read().splitlines() 18 required_deps.append("elasticdl_client") 19 required_deps.append("elasticdl_preprocessing") 20 21 extras = {} 22 with open("elasticdl/requirements-dev.txt") as f: 23 extras["develop"] = f.read().splitlines() 24 25 setup( 26 name="elasticdl", 27 version="develop", 28 description="A Kubernetes-native Deep Learning Framework", 29 long_description="ElasticDL is a Kubernetes-native deep learning framework" 30 " built on top of TensorFlow 2.0 that supports" 31 " fault-tolerance and elastic scheduling.", 32 long_description_content_type="text/markdown", 33 author="Ant Financial", 34 url="https://elasticdl.org", 35 install_requires=required_deps, 36 extras_require=extras, 37 python_requires=">=3.5", 38 packages=find_packages( 39 exclude=[ 40 "*test*", 41 "elasticdl_client*", 42 "elasticdl_preprocessing*", 43 "model_zoo*", 44 ] 45 ), 46 package_data={ 47 "": [ 48 "proto/*.proto", 49 "docker/*", 50 "Makefile", 51 "requirements.txt", 52 "go/bin/elasticdl_ps", 53 "go/pkg/kernel/capi/*", 54 ] 55 }, 56 ) 57 [end of setup.py] [start of setup_client.py] 1 # Copyright 2020 The ElasticDL Authors. All rights reserved. 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Unless required by applicable law or agreed to in writing, software 9 # distributed under the License is distributed on an "AS IS" BASIS, 10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from setuptools import find_packages, setup 15 16 with open("elasticdl_client/requirements.txt") as f: 17 required_deps = f.read().splitlines() 18 19 setup( 20 name="elasticdl_client", 21 version="develop", 22 description="The client command line tool for ElasticDL.", 23 long_description="ElasticDL Client is the client command line tool for" 24 " ElasticDL. Users can use it to submit distributed ElasticDL jobs to" 25 " a Kubernetes cluster. It also provides an easy way to build and push" 26 " Docker images for distributed ElasticDL jobs.", 27 long_description_content_type="text/markdown", 28 author="Ant Financial", 29 url="https://elasticdl.org", 30 install_requires=required_deps, 31 python_requires=">=3.5", 32 packages=find_packages(include=["elasticdl_client*"]), 33 package_data={"": ["requirements.txt"]}, 34 entry_points={"console_scripts": ["elasticdl=elasticdl_client.main:main"]}, 35 ) 36 [end of setup_client.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ setup( name="elasticdl", - version="develop", + version="0.2.0rc3.dev0", description="A Kubernetes-native Deep Learning Framework", long_description="ElasticDL is a Kubernetes-native deep learning framework" " built on top of TensorFlow 2.0 that supports" diff --git a/setup_client.py b/setup_client.py --- a/setup_client.py +++ b/setup_client.py @@ -18,7 +18,7 @@ setup( name="elasticdl_client", - version="develop", + version="0.2.0rc3.dev0", description="The client command line tool for ElasticDL.", long_description="ElasticDL Client is the client command line tool for" " ElasticDL. Users can use it to submit distributed ElasticDL jobs to" diff --git a/setup_preprocessing.py b/setup_preprocessing.py --- a/setup_preprocessing.py +++ b/setup_preprocessing.py @@ -22,7 +22,7 @@ setup( name="elasticdl_preprocessing", - version="develop", + version="0.2.0rc3.dev0", description="A feature preprocessing library.", long_description="This is an extension of the native Keras Preprocessing" " Layers and Feature Column API from TensorFlow. We can develop our model"
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,7 +24,7 @@\n \n setup(\n name=\"elasticdl\",\n- version=\"develop\",\n+ version=\"0.2.0rc3.dev0\",\n description=\"A Kubernetes-native Deep Learning Framework\",\n long_description=\"ElasticDL is a Kubernetes-native deep learning framework\"\n \" built on top of TensorFlow 2.0 that supports\"\ndiff --git a/setup_client.py b/setup_client.py\n--- a/setup_client.py\n+++ b/setup_client.py\n@@ -18,7 +18,7 @@\n \n setup(\n name=\"elasticdl_client\",\n- version=\"develop\",\n+ version=\"0.2.0rc3.dev0\",\n description=\"The client command line tool for ElasticDL.\",\n long_description=\"ElasticDL Client is the client command line tool for\"\n \" ElasticDL. Users can use it to submit distributed ElasticDL jobs to\"\ndiff --git a/setup_preprocessing.py b/setup_preprocessing.py\n--- a/setup_preprocessing.py\n+++ b/setup_preprocessing.py\n@@ -22,7 +22,7 @@\n \n setup(\n name=\"elasticdl_preprocessing\",\n- version=\"develop\",\n+ version=\"0.2.0rc3.dev0\",\n description=\"A feature preprocessing library.\",\n long_description=\"This is an extension of the native Keras Preprocessing\"\n \" Layers and Feature Column API from TensorFlow. We can develop our model\"\n", "issue": "Set version for develop branch.\nAccording to [PEP 440](https://www.python.org/dev/peps/pep-0440/#developmental-releases), we should set version `X.YaN.devM`.\n", "before_files": [{"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"elasticdl_preprocessing/requirements.txt\") as f:\n required_deps = f.read().splitlines()\n\nextras = {}\nwith open(\"elasticdl_preprocessing/requirements-dev.txt\") as f:\n extras[\"develop\"] = f.read().splitlines()\n\nsetup(\n name=\"elasticdl_preprocessing\",\n version=\"develop\",\n description=\"A feature preprocessing library.\",\n long_description=\"This is an extension of the native Keras Preprocessing\"\n \" Layers and Feature Column API from TensorFlow. We can develop our model\"\n \" using the native high-level API from TensorFlow and our library.\"\n \" We can train this model using native TensorFlow or ElasticDL.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=required_deps,\n extras_require=extras,\n python_requires=\">=3.5\",\n packages=find_packages(\n include=[\"elasticdl_preprocessing*\"], exclude=[\"*test*\"]\n ),\n package_data={\"\": [\"requirements.txt\"]},\n)\n", "path": "setup_preprocessing.py"}, {"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"elasticdl/requirements.txt\") as f:\n required_deps = f.read().splitlines()\nrequired_deps.append(\"elasticdl_client\")\nrequired_deps.append(\"elasticdl_preprocessing\")\n\nextras = {}\nwith open(\"elasticdl/requirements-dev.txt\") as f:\n extras[\"develop\"] = f.read().splitlines()\n\nsetup(\n name=\"elasticdl\",\n version=\"develop\",\n description=\"A Kubernetes-native Deep Learning Framework\",\n long_description=\"ElasticDL is a Kubernetes-native deep learning framework\"\n \" built on top of TensorFlow 2.0 that supports\"\n \" fault-tolerance and elastic scheduling.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=required_deps,\n extras_require=extras,\n python_requires=\">=3.5\",\n packages=find_packages(\n exclude=[\n \"*test*\",\n \"elasticdl_client*\",\n \"elasticdl_preprocessing*\",\n \"model_zoo*\",\n ]\n ),\n package_data={\n \"\": [\n \"proto/*.proto\",\n \"docker/*\",\n \"Makefile\",\n \"requirements.txt\",\n \"go/bin/elasticdl_ps\",\n \"go/pkg/kernel/capi/*\",\n ]\n },\n)\n", "path": "setup.py"}, {"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"elasticdl_client/requirements.txt\") as f:\n required_deps = f.read().splitlines()\n\nsetup(\n name=\"elasticdl_client\",\n version=\"develop\",\n description=\"The client command line tool for ElasticDL.\",\n long_description=\"ElasticDL Client is the client command line tool for\"\n \" ElasticDL. Users can use it to submit distributed ElasticDL jobs to\"\n \" a Kubernetes cluster. It also provides an easy way to build and push\"\n \" Docker images for distributed ElasticDL jobs.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=required_deps,\n python_requires=\">=3.5\",\n packages=find_packages(include=[\"elasticdl_client*\"]),\n package_data={\"\": [\"requirements.txt\"]},\n entry_points={\"console_scripts\": [\"elasticdl=elasticdl_client.main:main\"]},\n)\n", "path": "setup_client.py"}]}
1,961
322
gh_patches_debug_28187
rasdani/github-patches
git_diff
pymedusa__Medusa-6259
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [APP SUBMITTED]: TypeError: a bytes-like object is required, not 'str' ### INFO **Python Version**: `3.5.3 (default, Sep 27 2018, 17:25:39) [GCC 6.3.0 20170516]` **Operating System**: `Linux-4.9.0-4-amd64-x86_64-with-debian-9.6` **Locale**: `UTF-8` **Branch**: [feature/py3_2](../tree/feature/py3_2) **Database**: `44.12` **Commit**: pymedusa/Medusa@ac61f7ad628d70528cab9e8971071d79c0e90a95 **Link to Log**: https://gist.github.com/ee3a4c1fb32f4ed87dc90f4360ceba99 ### ERROR <pre> 2018-12-03 07:31:45 ERROR SNATCHQUEUE-SNATCH-114801 :: [ac61f7a] Snatch failed! For result: [HorribleSubs].Fairy.Tail.Final.Season.-.286.[1080p].mkv Traceback (most recent call last): File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/search/queue.py#L502">medusa/search/queue.py</a>", line 502, in run self.success = snatch_episode(result) File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/search/core.py#L167">medusa/search/core.py</a>", line 167, in snatch_episode result_downloaded = client.send_torrent(result) File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/clients/torrent/generic.py#L246">medusa/clients/torrent/generic.py</a>", line 246, in send_torrent if not self._get_auth(): File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/clients/torrent/rtorrent_client.py#L55">medusa/clients/torrent/rtorrent_client.py</a>", line 55, in _get_auth self.auth = RTorrent(self.host, None, None, True) File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L87">lib/rtorrent/__init__.py</a>", line 87, in __init__ self._verify_conn() File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L126">lib/rtorrent/__init__.py</a>", line 126, in _verify_conn assert 'system.client_version' in self._get_rpc_methods( File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L164">lib/rtorrent/__init__.py</a>", line 164, in _get_rpc_methods return(self._rpc_methods or self._update_rpc_methods()) File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L154">lib/rtorrent/__init__.py</a>", line 154, in _update_rpc_methods self._rpc_methods = self._get_conn().system.listMethods() File "/usr/lib/python3.5/xmlrpc/client.py", line 1092, in __call__ return self.__send(self.__name, args) File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/lib/xmlrpc/scgi.py#L195">lib/rtorrent/lib/xmlrpc/scgi.py</a>", line 195, in __request verbose=self.__verbose File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/lib/xmlrpc/scgi.py#L104">lib/rtorrent/lib/xmlrpc/scgi.py</a>", line 104, in request return self.single_request(host, handler, request_body, verbose) File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/lib/xmlrpc/scgi.py#L135">lib/rtorrent/lib/xmlrpc/scgi.py</a>", line 135, in single_request sock.send(request_body) TypeError: a bytes-like object is required, not 'str' </pre> --- _STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators </issue> <code> [start of medusa/session/hooks.py] 1 # coding=utf-8 2 3 from __future__ import unicode_literals 4 5 import logging 6 7 from medusa.logger.adapters.style import BraceAdapter 8 9 from six import text_type 10 11 log = BraceAdapter(logging.getLogger(__name__)) 12 log.logger.addHandler(logging.NullHandler()) 13 14 15 def log_url(response, **kwargs): 16 """Response hook to log request URL.""" 17 request = response.request 18 log.debug( 19 '{method} URL: {url} [Status: {status}]', { 20 'method': request.method, 21 'url': request.url, 22 'status': response.status_code, 23 } 24 ) 25 log.debug('User-Agent: {}'.format(request.headers['User-Agent'])) 26 27 if request.method.upper() == 'POST': 28 if request.body: 29 if 'multipart/form-data' not in request.headers.get('content-type', ''): 30 body = request.body 31 else: 32 body = request.body[1:99].replace('\n', ' ') + '...' 33 else: 34 body = '' 35 36 # try to log post data using various codecs to decode 37 if isinstance(body, text_type): 38 log.debug('With post data: {0}', body) 39 return 40 41 codecs = ('utf-8', 'latin1', 'cp1252') 42 for codec in codecs: 43 try: 44 data = body.decode(codec) 45 except UnicodeError as error: 46 log.debug('Failed to decode post data as {codec}: {msg}', 47 {'codec': codec, 'msg': error}) 48 else: 49 log.debug('With post data: {0}', data) 50 break 51 else: 52 log.warning('Failed to decode post data with {codecs}', 53 {'codecs': codecs}) 54 [end of medusa/session/hooks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/medusa/session/hooks.py b/medusa/session/hooks.py --- a/medusa/session/hooks.py +++ b/medusa/session/hooks.py @@ -6,7 +6,7 @@ from medusa.logger.adapters.style import BraceAdapter -from six import text_type +from six import ensure_text log = BraceAdapter(logging.getLogger(__name__)) log.logger.addHandler(logging.NullHandler()) @@ -26,28 +26,12 @@ if request.method.upper() == 'POST': if request.body: + text_body = ensure_text(request.body) if 'multipart/form-data' not in request.headers.get('content-type', ''): - body = request.body + body = text_body + elif len(text_body) > 99: + body = text_body[0:99].replace('\n', ' ') + '...' else: - body = request.body[1:99].replace('\n', ' ') + '...' - else: - body = '' + body = text_body.replace('\n', ' ') - # try to log post data using various codecs to decode - if isinstance(body, text_type): log.debug('With post data: {0}', body) - return - - codecs = ('utf-8', 'latin1', 'cp1252') - for codec in codecs: - try: - data = body.decode(codec) - except UnicodeError as error: - log.debug('Failed to decode post data as {codec}: {msg}', - {'codec': codec, 'msg': error}) - else: - log.debug('With post data: {0}', data) - break - else: - log.warning('Failed to decode post data with {codecs}', - {'codecs': codecs})
{"golden_diff": "diff --git a/medusa/session/hooks.py b/medusa/session/hooks.py\n--- a/medusa/session/hooks.py\n+++ b/medusa/session/hooks.py\n@@ -6,7 +6,7 @@\n \n from medusa.logger.adapters.style import BraceAdapter\n \n-from six import text_type\n+from six import ensure_text\n \n log = BraceAdapter(logging.getLogger(__name__))\n log.logger.addHandler(logging.NullHandler())\n@@ -26,28 +26,12 @@\n \n if request.method.upper() == 'POST':\n if request.body:\n+ text_body = ensure_text(request.body)\n if 'multipart/form-data' not in request.headers.get('content-type', ''):\n- body = request.body\n+ body = text_body\n+ elif len(text_body) > 99:\n+ body = text_body[0:99].replace('\\n', ' ') + '...'\n else:\n- body = request.body[1:99].replace('\\n', ' ') + '...'\n- else:\n- body = ''\n+ body = text_body.replace('\\n', ' ')\n \n- # try to log post data using various codecs to decode\n- if isinstance(body, text_type):\n log.debug('With post data: {0}', body)\n- return\n-\n- codecs = ('utf-8', 'latin1', 'cp1252')\n- for codec in codecs:\n- try:\n- data = body.decode(codec)\n- except UnicodeError as error:\n- log.debug('Failed to decode post data as {codec}: {msg}',\n- {'codec': codec, 'msg': error})\n- else:\n- log.debug('With post data: {0}', data)\n- break\n- else:\n- log.warning('Failed to decode post data with {codecs}',\n- {'codecs': codecs})\n", "issue": "[APP SUBMITTED]: TypeError: a bytes-like object is required, not 'str'\n\n### INFO\n**Python Version**: `3.5.3 (default, Sep 27 2018, 17:25:39) [GCC 6.3.0 20170516]`\n**Operating System**: `Linux-4.9.0-4-amd64-x86_64-with-debian-9.6`\n**Locale**: `UTF-8`\n**Branch**: [feature/py3_2](../tree/feature/py3_2)\n**Database**: `44.12`\n**Commit**: pymedusa/Medusa@ac61f7ad628d70528cab9e8971071d79c0e90a95\n**Link to Log**: https://gist.github.com/ee3a4c1fb32f4ed87dc90f4360ceba99\n### ERROR\n<pre>\n2018-12-03 07:31:45 ERROR SNATCHQUEUE-SNATCH-114801 :: [ac61f7a] Snatch failed! For result: [HorribleSubs].Fairy.Tail.Final.Season.-.286.[1080p].mkv\nTraceback (most recent call last):\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/search/queue.py#L502\">medusa/search/queue.py</a>\", line 502, in run\n self.success = snatch_episode(result)\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/search/core.py#L167\">medusa/search/core.py</a>\", line 167, in snatch_episode\n result_downloaded = client.send_torrent(result)\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/clients/torrent/generic.py#L246\">medusa/clients/torrent/generic.py</a>\", line 246, in send_torrent\n if not self._get_auth():\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/clients/torrent/rtorrent_client.py#L55\">medusa/clients/torrent/rtorrent_client.py</a>\", line 55, in _get_auth\n self.auth = RTorrent(self.host, None, None, True)\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L87\">lib/rtorrent/__init__.py</a>\", line 87, in __init__\n self._verify_conn()\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L126\">lib/rtorrent/__init__.py</a>\", line 126, in _verify_conn\n assert 'system.client_version' in self._get_rpc_methods(\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L164\">lib/rtorrent/__init__.py</a>\", line 164, in _get_rpc_methods\n return(self._rpc_methods or self._update_rpc_methods())\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L154\">lib/rtorrent/__init__.py</a>\", line 154, in _update_rpc_methods\n self._rpc_methods = self._get_conn().system.listMethods()\n File \"/usr/lib/python3.5/xmlrpc/client.py\", line 1092, in __call__\n return self.__send(self.__name, args)\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/lib/xmlrpc/scgi.py#L195\">lib/rtorrent/lib/xmlrpc/scgi.py</a>\", line 195, in __request\n verbose=self.__verbose\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/lib/xmlrpc/scgi.py#L104\">lib/rtorrent/lib/xmlrpc/scgi.py</a>\", line 104, in request\n return self.single_request(host, handler, request_body, verbose)\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/lib/xmlrpc/scgi.py#L135\">lib/rtorrent/lib/xmlrpc/scgi.py</a>\", line 135, in single_request\n sock.send(request_body)\nTypeError: a bytes-like object is required, not 'str'\n</pre>\n---\n_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom medusa.logger.adapters.style import BraceAdapter\n\nfrom six import text_type\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\ndef log_url(response, **kwargs):\n \"\"\"Response hook to log request URL.\"\"\"\n request = response.request\n log.debug(\n '{method} URL: {url} [Status: {status}]', {\n 'method': request.method,\n 'url': request.url,\n 'status': response.status_code,\n }\n )\n log.debug('User-Agent: {}'.format(request.headers['User-Agent']))\n\n if request.method.upper() == 'POST':\n if request.body:\n if 'multipart/form-data' not in request.headers.get('content-type', ''):\n body = request.body\n else:\n body = request.body[1:99].replace('\\n', ' ') + '...'\n else:\n body = ''\n\n # try to log post data using various codecs to decode\n if isinstance(body, text_type):\n log.debug('With post data: {0}', body)\n return\n\n codecs = ('utf-8', 'latin1', 'cp1252')\n for codec in codecs:\n try:\n data = body.decode(codec)\n except UnicodeError as error:\n log.debug('Failed to decode post data as {codec}: {msg}',\n {'codec': codec, 'msg': error})\n else:\n log.debug('With post data: {0}', data)\n break\n else:\n log.warning('Failed to decode post data with {codecs}',\n {'codecs': codecs})\n", "path": "medusa/session/hooks.py"}]}
2,432
403
gh_patches_debug_1874
rasdani/github-patches
git_diff
cloudtools__troposphere-120
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update metadata to include property keys Within the CloudFormation metadata object for auto scaling launch configurations, it'd be nice to be able to rely on the validators within the template to sanity check the allowed keys within the 'config' dictionary: ``` "Resources": { "MyInstance": { "Type": "AWS::EC2::Instance", "Metadata" : { "AWS::CloudFormation::Init" : { "config" : { "packages" : { : }, "groups" : { : }, "users" : { : }, "sources" : { : }, "files" : { : }, "commands" : { : }, "services" : { : } } } }, "Properties": { : } } } ``` Currently the implementation of troposphere.cloudformation.InitConfig only contains one key which is 'files' </issue> <code> [start of troposphere/cloudformation.py] 1 # Copyright (c) 2013, Mark Peek <[email protected]> 2 # All rights reserved. 3 # 4 # See LICENSE file for full license. 5 6 from . import AWSHelperFn, AWSObject, AWSProperty, Ref 7 from .validators import integer 8 9 10 class Stack(AWSObject): 11 type = "AWS::CloudFormation::Stack" 12 13 props = { 14 'TemplateURL': (basestring, True), 15 'TimeoutInMinutes': (integer, False), 16 'Parameters': (dict, False), 17 } 18 19 20 class WaitCondition(AWSObject): 21 type = "AWS::CloudFormation::WaitCondition" 22 23 props = { 24 'Count': (integer, False), 25 'Handle': (Ref, True), 26 'Timeout': (integer, True), 27 } 28 29 30 class WaitConditionHandle(AWSObject): 31 type = "AWS::CloudFormation::WaitConditionHandle" 32 33 props = {} 34 35 36 class InitFileContext(AWSHelperFn): 37 def __init__(self, data): 38 self.data = data 39 40 def JSONrepr(self): 41 return self.data 42 43 44 class InitFile(AWSProperty): 45 props = { 46 'content': (basestring, True), 47 'mode': (basestring, False), 48 'owner': (basestring, False), 49 'group': (basestring, False), 50 'context': (InitFileContext, False) 51 } 52 53 54 class InitFiles(AWSHelperFn): 55 def __init__(self, data): 56 self.validate(data) 57 self.data = data 58 59 def validate(self, data): 60 for k in data: 61 if not isinstance(data[k], InitFile): 62 raise ValueError("File '" + k + "' must be of type InitFile") 63 64 def JSONrepr(self): 65 return self.data 66 67 68 class InitConfig(AWSProperty): 69 props = { 70 'files': (dict, False) 71 } 72 73 74 class Init(AWSHelperFn): 75 def __init__(self, data): 76 self.validate(data) 77 self.data = {"AWS::CloudFormation::Init": data} 78 79 def validate(self, data): 80 if 'config' not in data: 81 raise ValueError('config property is required') 82 if not isinstance(data['config'], InitConfig): 83 raise ValueError( 84 'config property must be of type autoscaling.InitConfig' 85 ) 86 87 def JSONrepr(self): 88 return self.data 89 [end of troposphere/cloudformation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/troposphere/cloudformation.py b/troposphere/cloudformation.py --- a/troposphere/cloudformation.py +++ b/troposphere/cloudformation.py @@ -67,7 +67,13 @@ class InitConfig(AWSProperty): props = { - 'files': (dict, False) + 'groups': (dict, False), + 'users': (dict, False), + 'sources': (dict, False), + 'packages': (dict, False), + 'files': (dict, False), + 'commands': (dict, False), + 'services': (dict, False) }
{"golden_diff": "diff --git a/troposphere/cloudformation.py b/troposphere/cloudformation.py\n--- a/troposphere/cloudformation.py\n+++ b/troposphere/cloudformation.py\n@@ -67,7 +67,13 @@\n \n class InitConfig(AWSProperty):\n props = {\n- 'files': (dict, False)\n+ 'groups': (dict, False),\n+ 'users': (dict, False),\n+ 'sources': (dict, False),\n+ 'packages': (dict, False),\n+ 'files': (dict, False),\n+ 'commands': (dict, False),\n+ 'services': (dict, False)\n }\n", "issue": "Update metadata to include property keys\nWithin the CloudFormation metadata object for auto scaling launch configurations, it'd be nice to be able to rely on the validators within the template to sanity check the allowed keys within the 'config' dictionary: \n\n```\n\"Resources\": {\n \"MyInstance\": {\n \"Type\": \"AWS::EC2::Instance\",\n \"Metadata\" : {\n \"AWS::CloudFormation::Init\" : {\n \"config\" : {\n \"packages\" : {\n :\n },\n \"groups\" : {\n :\n },\n \"users\" : {\n :\n },\n \"sources\" : {\n :\n },\n \"files\" : {\n :\n },\n \"commands\" : {\n :\n },\n \"services\" : {\n :\n }\n }\n }\n },\n \"Properties\": {\n :\n }\n }\n}\n```\n\nCurrently the implementation of troposphere.cloudformation.InitConfig only contains one key which is 'files'\n\n", "before_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty, Ref\nfrom .validators import integer\n\n\nclass Stack(AWSObject):\n type = \"AWS::CloudFormation::Stack\"\n\n props = {\n 'TemplateURL': (basestring, True),\n 'TimeoutInMinutes': (integer, False),\n 'Parameters': (dict, False),\n }\n\n\nclass WaitCondition(AWSObject):\n type = \"AWS::CloudFormation::WaitCondition\"\n\n props = {\n 'Count': (integer, False),\n 'Handle': (Ref, True),\n 'Timeout': (integer, True),\n }\n\n\nclass WaitConditionHandle(AWSObject):\n type = \"AWS::CloudFormation::WaitConditionHandle\"\n\n props = {}\n\n\nclass InitFileContext(AWSHelperFn):\n def __init__(self, data):\n self.data = data\n\n def JSONrepr(self):\n return self.data\n\n\nclass InitFile(AWSProperty):\n props = {\n 'content': (basestring, True),\n 'mode': (basestring, False),\n 'owner': (basestring, False),\n 'group': (basestring, False),\n 'context': (InitFileContext, False)\n }\n\n\nclass InitFiles(AWSHelperFn):\n def __init__(self, data):\n self.validate(data)\n self.data = data\n\n def validate(self, data):\n for k in data:\n if not isinstance(data[k], InitFile):\n raise ValueError(\"File '\" + k + \"' must be of type InitFile\")\n\n def JSONrepr(self):\n return self.data\n\n\nclass InitConfig(AWSProperty):\n props = {\n 'files': (dict, False)\n }\n\n\nclass Init(AWSHelperFn):\n def __init__(self, data):\n self.validate(data)\n self.data = {\"AWS::CloudFormation::Init\": data}\n\n def validate(self, data):\n if 'config' not in data:\n raise ValueError('config property is required')\n if not isinstance(data['config'], InitConfig):\n raise ValueError(\n 'config property must be of type autoscaling.InitConfig'\n )\n\n def JSONrepr(self):\n return self.data\n", "path": "troposphere/cloudformation.py"}]}
1,419
143
gh_patches_debug_21385
rasdani/github-patches
git_diff
joke2k__faker-529
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Decrease binary dummy length `fake.binary(length=1048576)` generate too huge binary data but it is useless at docs build time. I tested two case, 1048576 and 1024. ``` console (faker) [item4@item4-mbp docs]$ echo "1048576" 1048576 (faker) [item4@item4-mbp docs]$ time make html sphinx-build -b html -d _build/doctrees . _build/html Running Sphinx v1.4.8 loading pickled environment... done building [mo]: targets for 0 po files that are out of date building [html]: targets for 61 source files that are out of date updating environment: 0 added, 61 changed, 0 removed reading sources... [100%] providers/faker.providers.user_agent looking for now-outdated files... none found pickling environment... done checking consistency... done preparing documents... done writing output... [100%] providers/faker.providers.user_agent generating indices... genindex writing additional pages... search copying static files... WARNING: html_static_path entry '/Users/item4/Projects/faker/docs/_static' does not exist done copying extra files... done dumping search index in English (code: en) ... done dumping object inventory... done build succeeded, 1 warning. Build finished. The HTML pages are in _build/html. real 3m24.875s user 3m3.222s sys 0m10.986s (faker) [item4@item4-mbp docs]$ echo "1024" 1024 (faker) [item4@item4-mbp docs]$ time make html sphinx-build -b html -d _build/doctrees . _build/html Running Sphinx v1.4.8 loading pickled environment... done building [mo]: targets for 0 po files that are out of date building [html]: targets for 61 source files that are out of date updating environment: 0 added, 61 changed, 0 removed reading sources... [100%] providers/faker.providers.user_agent looking for now-outdated files... none found pickling environment... done checking consistency... done preparing documents... done writing output... [100%] providers/faker.providers.user_agent generating indices... genindex writing additional pages... search copying static files... WARNING: html_static_path entry '/Users/item4/Projects/faker/docs/_static' does not exist done copying extra files... done dumping search index in English (code: en) ... done dumping object inventory... done build succeeded, 1 warning. Build finished. The HTML pages are in _build/html. real 0m46.086s user 0m43.772s sys 0m1.479s ``` Does docs really need 1048576 length binary? </issue> <code> [start of faker/documentor.py] 1 # coding=utf-8 2 3 from __future__ import unicode_literals 4 5 import inspect 6 7 from faker import utils 8 9 10 class Documentor(object): 11 12 def __init__(self, generator): 13 """ 14 :param generator: a localized Generator with providers filled, 15 for which to write the documentation 16 :type generator: faker.Generator() 17 """ 18 self.generator = generator 19 self.max_name_len = 0 20 self.already_generated = [] 21 22 def get_formatters(self, locale=None, excludes=None, **kwargs): 23 24 self.max_name_len = 0 25 self.already_generated = [] if excludes is None else excludes[:] 26 formatters = [] 27 providers = self.generator.get_providers() 28 for provider in providers[::-1]: # reverse 29 if locale and provider.__lang__ != locale: 30 continue 31 formatters.append( 32 (provider, self.get_provider_formatters(provider, **kwargs)) 33 ) 34 return formatters 35 36 def get_provider_formatters(self, provider, prefix='fake.', 37 with_args=True, with_defaults=True): 38 39 formatters = {} 40 41 for name, method in inspect.getmembers(provider, inspect.ismethod): 42 43 # skip 'private' method and inherited methods 44 if name.startswith('_') or name in self.already_generated: 45 continue 46 47 arguments = [] 48 49 if with_args: 50 # retrieve all parameter 51 argspec = inspect.getargspec(method) 52 53 lst = [x for x in argspec.args if x not in ['self', 'cls']] 54 for i, arg in enumerate(lst): 55 56 if argspec.defaults and with_defaults: 57 58 try: 59 default = argspec.defaults[i] 60 if utils.is_string(default): 61 default = utils.quote(default) 62 else: 63 # TODO check default type 64 default = "{0}".format(default) 65 66 arg = "{0}={1}".format(arg, default) 67 68 except IndexError: 69 pass 70 71 arguments.append(arg) 72 if with_args == 'first': 73 break 74 75 if with_args != 'first': 76 if argspec.varargs: 77 arguments.append('*' + argspec.varargs) 78 if argspec.keywords: 79 arguments.append('**' + argspec.keywords) 80 81 # build fake method signature 82 signature = "{0}{1}({2})".format(prefix, 83 name, 84 ", ".join(arguments)) 85 86 # make a fake example 87 example = self.generator.format(name) 88 89 formatters[signature] = example 90 91 self.max_name_len = max(self.max_name_len, len(signature)) 92 self.already_generated.append(name) 93 94 return formatters 95 96 @staticmethod 97 def get_provider_name(provider_class): 98 return provider_class.__provider__ 99 [end of faker/documentor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/faker/documentor.py b/faker/documentor.py --- a/faker/documentor.py +++ b/faker/documentor.py @@ -39,12 +39,16 @@ formatters = {} for name, method in inspect.getmembers(provider, inspect.ismethod): - # skip 'private' method and inherited methods if name.startswith('_') or name in self.already_generated: continue arguments = [] + faker_args = [] + faker_kwargs = {} + + if name == 'binary': + faker_kwargs['length'] = 1024 if with_args: # retrieve all parameter @@ -84,7 +88,7 @@ ", ".join(arguments)) # make a fake example - example = self.generator.format(name) + example = self.generator.format(name, *faker_args, **faker_kwargs) formatters[signature] = example
{"golden_diff": "diff --git a/faker/documentor.py b/faker/documentor.py\n--- a/faker/documentor.py\n+++ b/faker/documentor.py\n@@ -39,12 +39,16 @@\n formatters = {}\n \n for name, method in inspect.getmembers(provider, inspect.ismethod):\n-\n # skip 'private' method and inherited methods\n if name.startswith('_') or name in self.already_generated:\n continue\n \n arguments = []\n+ faker_args = []\n+ faker_kwargs = {}\n+\n+ if name == 'binary':\n+ faker_kwargs['length'] = 1024\n \n if with_args:\n # retrieve all parameter\n@@ -84,7 +88,7 @@\n \", \".join(arguments))\n \n # make a fake example\n- example = self.generator.format(name)\n+ example = self.generator.format(name, *faker_args, **faker_kwargs)\n \n formatters[signature] = example\n", "issue": "Decrease binary dummy length\n`fake.binary(length=1048576)` generate too huge binary data but it is useless at docs build time.\n\nI tested two case, 1048576 and 1024.\n\n``` console\n(faker) [item4@item4-mbp docs]$ echo \"1048576\"\n1048576\n(faker) [item4@item4-mbp docs]$ time make html\nsphinx-build -b html -d _build/doctrees . _build/html\nRunning Sphinx v1.4.8\nloading pickled environment... done\nbuilding [mo]: targets for 0 po files that are out of date\nbuilding [html]: targets for 61 source files that are out of date\nupdating environment: 0 added, 61 changed, 0 removed\nreading sources... [100%] providers/faker.providers.user_agent\nlooking for now-outdated files... none found\npickling environment... done\nchecking consistency... done\npreparing documents... done\nwriting output... [100%] providers/faker.providers.user_agent\ngenerating indices... genindex\nwriting additional pages... search\ncopying static files... WARNING: html_static_path entry '/Users/item4/Projects/faker/docs/_static' does not exist\ndone\ncopying extra files... done\ndumping search index in English (code: en) ... done\ndumping object inventory... done\nbuild succeeded, 1 warning.\n\nBuild finished. The HTML pages are in _build/html.\n\nreal 3m24.875s\nuser 3m3.222s\nsys 0m10.986s\n(faker) [item4@item4-mbp docs]$ echo \"1024\"\n1024\n(faker) [item4@item4-mbp docs]$ time make html\nsphinx-build -b html -d _build/doctrees . _build/html\nRunning Sphinx v1.4.8\nloading pickled environment... done\nbuilding [mo]: targets for 0 po files that are out of date\nbuilding [html]: targets for 61 source files that are out of date\nupdating environment: 0 added, 61 changed, 0 removed\nreading sources... [100%] providers/faker.providers.user_agent\nlooking for now-outdated files... none found\npickling environment... done\nchecking consistency... done\npreparing documents... done\nwriting output... [100%] providers/faker.providers.user_agent\ngenerating indices... genindex\nwriting additional pages... search\ncopying static files... WARNING: html_static_path entry '/Users/item4/Projects/faker/docs/_static' does not exist\ndone\ncopying extra files... done\ndumping search index in English (code: en) ... done\ndumping object inventory... done\nbuild succeeded, 1 warning.\n\nBuild finished. The HTML pages are in _build/html.\n\nreal 0m46.086s\nuser 0m43.772s\nsys 0m1.479s\n```\n\nDoes docs really need 1048576 length binary?\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport inspect\n\nfrom faker import utils\n\n\nclass Documentor(object):\n\n def __init__(self, generator):\n \"\"\"\n :param generator: a localized Generator with providers filled,\n for which to write the documentation\n :type generator: faker.Generator()\n \"\"\"\n self.generator = generator\n self.max_name_len = 0\n self.already_generated = []\n\n def get_formatters(self, locale=None, excludes=None, **kwargs):\n\n self.max_name_len = 0\n self.already_generated = [] if excludes is None else excludes[:]\n formatters = []\n providers = self.generator.get_providers()\n for provider in providers[::-1]: # reverse\n if locale and provider.__lang__ != locale:\n continue\n formatters.append(\n (provider, self.get_provider_formatters(provider, **kwargs))\n )\n return formatters\n\n def get_provider_formatters(self, provider, prefix='fake.',\n with_args=True, with_defaults=True):\n\n formatters = {}\n\n for name, method in inspect.getmembers(provider, inspect.ismethod):\n\n # skip 'private' method and inherited methods\n if name.startswith('_') or name in self.already_generated:\n continue\n\n arguments = []\n\n if with_args:\n # retrieve all parameter\n argspec = inspect.getargspec(method)\n\n lst = [x for x in argspec.args if x not in ['self', 'cls']]\n for i, arg in enumerate(lst):\n\n if argspec.defaults and with_defaults:\n\n try:\n default = argspec.defaults[i]\n if utils.is_string(default):\n default = utils.quote(default)\n else:\n # TODO check default type\n default = \"{0}\".format(default)\n\n arg = \"{0}={1}\".format(arg, default)\n\n except IndexError:\n pass\n\n arguments.append(arg)\n if with_args == 'first':\n break\n\n if with_args != 'first':\n if argspec.varargs:\n arguments.append('*' + argspec.varargs)\n if argspec.keywords:\n arguments.append('**' + argspec.keywords)\n\n # build fake method signature\n signature = \"{0}{1}({2})\".format(prefix,\n name,\n \", \".join(arguments))\n\n # make a fake example\n example = self.generator.format(name)\n\n formatters[signature] = example\n\n self.max_name_len = max(self.max_name_len, len(signature))\n self.already_generated.append(name)\n\n return formatters\n\n @staticmethod\n def get_provider_name(provider_class):\n return provider_class.__provider__\n", "path": "faker/documentor.py"}]}
2,007
208
gh_patches_debug_18357
rasdani/github-patches
git_diff
pyca__cryptography-414
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Binding.is_avaialble can print to stderr Specifically it'll print an error message, you can see this if you run `tests/hazmat/bindings/test_bindings.py` </issue> <code> [start of cryptography/hazmat/bindings/openssl/binding.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 10 # implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from __future__ import absolute_import, division, print_function 15 16 from cryptography.hazmat.bindings.utils import ( 17 build_ffi, binding_available 18 ) 19 20 _OSX_PRE_INCLUDE = """ 21 #ifdef __APPLE__ 22 #include <AvailabilityMacros.h> 23 #define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \ 24 DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER 25 #undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER 26 #define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER 27 #endif 28 """ 29 30 _OSX_POST_INCLUDE = """ 31 #ifdef __APPLE__ 32 #undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER 33 #define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \ 34 __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER 35 #endif 36 """ 37 38 39 class Binding(object): 40 """ 41 OpenSSL API wrapper. 42 """ 43 _module_prefix = "cryptography.hazmat.bindings.openssl." 44 _modules = [ 45 "asn1", 46 "bignum", 47 "bio", 48 "conf", 49 "crypto", 50 "dh", 51 "dsa", 52 "engine", 53 "err", 54 "evp", 55 "hmac", 56 "nid", 57 "objects", 58 "opensslv", 59 "pem", 60 "pkcs7", 61 "pkcs12", 62 "rand", 63 "rsa", 64 "ssl", 65 "x509", 66 "x509name", 67 "x509v3", 68 ] 69 70 ffi = None 71 lib = None 72 73 def __init__(self): 74 self._ensure_ffi_initialized() 75 76 @classmethod 77 def _ensure_ffi_initialized(cls): 78 if cls.ffi is not None and cls.lib is not None: 79 return 80 81 cls.ffi, cls.lib = build_ffi(cls._module_prefix, cls._modules, 82 _OSX_PRE_INCLUDE, _OSX_POST_INCLUDE, 83 ["crypto", "ssl"]) 84 85 @classmethod 86 def is_available(cls): 87 return binding_available(cls._ensure_ffi_initialized) 88 [end of cryptography/hazmat/bindings/openssl/binding.py] [start of cryptography/hazmat/bindings/utils.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 10 # implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from __future__ import absolute_import, division, print_function 15 16 import sys 17 18 import cffi 19 20 21 def build_ffi(module_prefix, modules, pre_include, post_include, libraries): 22 """ 23 Modules listed in ``modules`` should have the following attributes: 24 25 * ``INCLUDES``: A string containing C includes. 26 * ``TYPES``: A string containing C declarations for types. 27 * ``FUNCTIONS``: A string containing C declarations for functions. 28 * ``MACROS``: A string containing C declarations for any macros. 29 * ``CUSTOMIZATIONS``: A string containing arbitrary top-level C code, this 30 can be used to do things like test for a define and provide an 31 alternate implementation based on that. 32 * ``CONDITIONAL_NAMES``: A dict mapping strings of condition names from the 33 library to a list of names which will not be present without the 34 condition. 35 """ 36 ffi = cffi.FFI() 37 includes = [] 38 functions = [] 39 macros = [] 40 customizations = [] 41 for name in modules: 42 module_name = module_prefix + name 43 __import__(module_name) 44 module = sys.modules[module_name] 45 46 ffi.cdef(module.TYPES) 47 48 macros.append(module.MACROS) 49 functions.append(module.FUNCTIONS) 50 includes.append(module.INCLUDES) 51 customizations.append(module.CUSTOMIZATIONS) 52 53 # loop over the functions & macros after declaring all the types 54 # so we can set interdependent types in different files and still 55 # have them all defined before we parse the funcs & macros 56 for func in functions: 57 ffi.cdef(func) 58 for macro in macros: 59 ffi.cdef(macro) 60 61 # We include functions here so that if we got any of their definitions 62 # wrong, the underlying C compiler will explode. In C you are allowed 63 # to re-declare a function if it has the same signature. That is: 64 # int foo(int); 65 # int foo(int); 66 # is legal, but the following will fail to compile: 67 # int foo(int); 68 # int foo(short); 69 lib = ffi.verify( 70 source="\n".join( 71 [pre_include] + 72 includes + 73 [post_include] + 74 functions + 75 customizations 76 ), 77 libraries=libraries, 78 ext_package="cryptography", 79 ) 80 81 for name in modules: 82 module_name = module_prefix + name 83 module = sys.modules[module_name] 84 for condition, names in module.CONDITIONAL_NAMES.items(): 85 if not getattr(lib, condition): 86 for name in names: 87 delattr(lib, name) 88 89 return ffi, lib 90 91 92 def binding_available(initializer): 93 try: 94 initializer() 95 return True 96 except cffi.VerificationError: 97 return False 98 [end of cryptography/hazmat/bindings/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cryptography/hazmat/bindings/openssl/binding.py b/cryptography/hazmat/bindings/openssl/binding.py --- a/cryptography/hazmat/bindings/openssl/binding.py +++ b/cryptography/hazmat/bindings/openssl/binding.py @@ -13,9 +13,8 @@ from __future__ import absolute_import, division, print_function -from cryptography.hazmat.bindings.utils import ( - build_ffi, binding_available -) +from cryptography.hazmat.bindings.utils import build_ffi + _OSX_PRE_INCLUDE = """ #ifdef __APPLE__ @@ -84,4 +83,5 @@ @classmethod def is_available(cls): - return binding_available(cls._ensure_ffi_initialized) + # OpenSSL is the only binding so for now it must always be available + return True diff --git a/cryptography/hazmat/bindings/utils.py b/cryptography/hazmat/bindings/utils.py --- a/cryptography/hazmat/bindings/utils.py +++ b/cryptography/hazmat/bindings/utils.py @@ -87,11 +87,3 @@ delattr(lib, name) return ffi, lib - - -def binding_available(initializer): - try: - initializer() - return True - except cffi.VerificationError: - return False
{"golden_diff": "diff --git a/cryptography/hazmat/bindings/openssl/binding.py b/cryptography/hazmat/bindings/openssl/binding.py\n--- a/cryptography/hazmat/bindings/openssl/binding.py\n+++ b/cryptography/hazmat/bindings/openssl/binding.py\n@@ -13,9 +13,8 @@\n \n from __future__ import absolute_import, division, print_function\n \n-from cryptography.hazmat.bindings.utils import (\n- build_ffi, binding_available\n-)\n+from cryptography.hazmat.bindings.utils import build_ffi\n+\n \n _OSX_PRE_INCLUDE = \"\"\"\n #ifdef __APPLE__\n@@ -84,4 +83,5 @@\n \n @classmethod\n def is_available(cls):\n- return binding_available(cls._ensure_ffi_initialized)\n+ # OpenSSL is the only binding so for now it must always be available\n+ return True\ndiff --git a/cryptography/hazmat/bindings/utils.py b/cryptography/hazmat/bindings/utils.py\n--- a/cryptography/hazmat/bindings/utils.py\n+++ b/cryptography/hazmat/bindings/utils.py\n@@ -87,11 +87,3 @@\n delattr(lib, name)\n \n return ffi, lib\n-\n-\n-def binding_available(initializer):\n- try:\n- initializer()\n- return True\n- except cffi.VerificationError:\n- return False\n", "issue": "Binding.is_avaialble can print to stderr\nSpecifically it'll print an error message, you can see this if you run `tests/hazmat/bindings/test_bindings.py`\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography.hazmat.bindings.utils import (\n build_ffi, binding_available\n)\n\n_OSX_PRE_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#include <AvailabilityMacros.h>\n#define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n_OSX_POST_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n _module_prefix = \"cryptography.hazmat.bindings.openssl.\"\n _modules = [\n \"asn1\",\n \"bignum\",\n \"bio\",\n \"conf\",\n \"crypto\",\n \"dh\",\n \"dsa\",\n \"engine\",\n \"err\",\n \"evp\",\n \"hmac\",\n \"nid\",\n \"objects\",\n \"opensslv\",\n \"pem\",\n \"pkcs7\",\n \"pkcs12\",\n \"rand\",\n \"rsa\",\n \"ssl\",\n \"x509\",\n \"x509name\",\n \"x509v3\",\n ]\n\n ffi = None\n lib = None\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n if cls.ffi is not None and cls.lib is not None:\n return\n\n cls.ffi, cls.lib = build_ffi(cls._module_prefix, cls._modules,\n _OSX_PRE_INCLUDE, _OSX_POST_INCLUDE,\n [\"crypto\", \"ssl\"])\n\n @classmethod\n def is_available(cls):\n return binding_available(cls._ensure_ffi_initialized)\n", "path": "cryptography/hazmat/bindings/openssl/binding.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\n\nimport cffi\n\n\ndef build_ffi(module_prefix, modules, pre_include, post_include, libraries):\n \"\"\"\n Modules listed in ``modules`` should have the following attributes:\n\n * ``INCLUDES``: A string containing C includes.\n * ``TYPES``: A string containing C declarations for types.\n * ``FUNCTIONS``: A string containing C declarations for functions.\n * ``MACROS``: A string containing C declarations for any macros.\n * ``CUSTOMIZATIONS``: A string containing arbitrary top-level C code, this\n can be used to do things like test for a define and provide an\n alternate implementation based on that.\n * ``CONDITIONAL_NAMES``: A dict mapping strings of condition names from the\n library to a list of names which will not be present without the\n condition.\n \"\"\"\n ffi = cffi.FFI()\n includes = []\n functions = []\n macros = []\n customizations = []\n for name in modules:\n module_name = module_prefix + name\n __import__(module_name)\n module = sys.modules[module_name]\n\n ffi.cdef(module.TYPES)\n\n macros.append(module.MACROS)\n functions.append(module.FUNCTIONS)\n includes.append(module.INCLUDES)\n customizations.append(module.CUSTOMIZATIONS)\n\n # loop over the functions & macros after declaring all the types\n # so we can set interdependent types in different files and still\n # have them all defined before we parse the funcs & macros\n for func in functions:\n ffi.cdef(func)\n for macro in macros:\n ffi.cdef(macro)\n\n # We include functions here so that if we got any of their definitions\n # wrong, the underlying C compiler will explode. In C you are allowed\n # to re-declare a function if it has the same signature. That is:\n # int foo(int);\n # int foo(int);\n # is legal, but the following will fail to compile:\n # int foo(int);\n # int foo(short);\n lib = ffi.verify(\n source=\"\\n\".join(\n [pre_include] +\n includes +\n [post_include] +\n functions +\n customizations\n ),\n libraries=libraries,\n ext_package=\"cryptography\",\n )\n\n for name in modules:\n module_name = module_prefix + name\n module = sys.modules[module_name]\n for condition, names in module.CONDITIONAL_NAMES.items():\n if not getattr(lib, condition):\n for name in names:\n delattr(lib, name)\n\n return ffi, lib\n\n\ndef binding_available(initializer):\n try:\n initializer()\n return True\n except cffi.VerificationError:\n return False\n", "path": "cryptography/hazmat/bindings/utils.py"}]}
2,289
302
gh_patches_debug_20421
rasdani/github-patches
git_diff
psychopy__psychopy-1123
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Builder 'Aperture' component cannot have indefinite duration If a Builder 'Aperture' component is set to have an indefinite duration, the actual result is that it is enabled and then immediately disabled. Hence, it has no effect on other stimuli. Below is example code that gets generated when an indefinite duration is specified: ``` # *aperture* updates if t >= 0.0 and aperture.status == NOT_STARTED: # keep track of start time/frame for later aperture.tStart = t # underestimates by a little under one frame aperture.frameNStart = frameN # exact frame index aperture.enabled = True if aperture.status == STARTED and t >= (0.0 + (-win.monitorFramePeriod*0.75)): #most of one frame period left aperture.enabled = False if aperture.status == STARTED: # only update if being drawn aperture.setPos(mouse.getPos(), log=False) ``` The error is in the second if/then above, which should not occur. It is included as if there was a maximum duration in effect, but the value is omitted in the `(-win.monitorFramePeriod*0.75)` expression and hence the duration is effectively zero. The fix here should be simple but I'm not familiar with playing with the Builder component generation code. So recording the bug here until I can figure out how to fix it (and test). A work-around for users in the mean time would be to simply put an unfeasibly large number in the duration field (e.g. 9999). </issue> <code> [start of psychopy/app/builder/components/aperture.py] 1 # Part of the PsychoPy library 2 # Copyright (C) 2015 Jonathan Peirce 3 # Distributed under the terms of the GNU General Public License (GPL). 4 5 from os import path 6 from ._base import BaseVisualComponent, getInitVals 7 8 __author__ = 'Jeremy Gray, Jon Peirce' 9 # March 2011; builder-component for Yuri Spitsyn's visual.Aperture class 10 # July 2011: jwp added the code for it to be enabled only when needed 11 12 # the absolute path to the folder containing this path 13 thisFolder = path.abspath(path.dirname(__file__)) 14 iconFile = path.join(thisFolder, 'aperture.png') 15 tooltip = _translate('Aperture: restrict the drawing of stimuli to a given ' 16 'region') 17 18 19 class ApertureComponent(BaseVisualComponent): 20 """An event class for using GL stencil to restrict the viewing area to a 21 circle or square of a given size and position""" 22 23 def __init__(self, exp, parentName, name='aperture', units='norm', 24 size=1, pos=(0, 0), 25 startType='time (s)', startVal=0.0, 26 stopType='duration (s)', stopVal=1.0, 27 startEstim='', durationEstim=''): 28 # initialise main parameters 29 super(ApertureComponent, self).__init__( 30 exp, parentName, name=name, units=units, 31 pos=pos, size=size, 32 startType=startType, startVal=startVal, 33 stopType=stopType, stopVal=stopVal, 34 startEstim=startEstim, durationEstim=durationEstim) 35 36 self.type = 'Aperture' 37 self.url = "http://www.psychopy.org/builder/components/aperture.html" 38 # params: 39 # NB make some adjustments on the params defined by _visual component 40 self.order = ['name', 'size', 'pos'] # make sure this is at top 41 42 msg = _translate("How big is the aperture? (a single number for diameter)") 43 self.params['size'].hint = msg 44 # only localize hints and labels 45 self.params['size'].label = _translate("Size") 46 self.params['pos'].hint = _translate("Where is the aperture centred?") 47 48 # inherited from _visual component but not needed 49 del self.params['ori'] 50 del self.params['color'] 51 del self.params['colorSpace'] 52 del self.params['opacity'] 53 54 def writeInitCode(self, buff): 55 # do we need units code? 56 if self.params['units'].val == 'from exp settings': 57 unitsStr = "" 58 else: 59 unitsStr = "units=%(units)s, " % self.params 60 61 # do writing of init 62 inits = getInitVals(self.params) 63 64 code = ("%(name)s = visual.Aperture(\n" 65 " win=win, name='%(name)s',\n" 66 " " + unitsStr + "size=%(size)s, pos=%(pos)s)\n" 67 "%(name)s.disable() # disable until its actually used\n") 68 buff.writeIndentedLines(code % inits) 69 70 def writeFrameCode(self, buff): 71 """Only activate the aperture for the required frames 72 """ 73 74 buff.writeIndented("\n") 75 buff.writeIndented("# *%s* updates\n" % (self.params['name'])) 76 # writes an if statement to determine whether to draw etc 77 self.writeStartTestCode(buff) 78 buff.writeIndented("%(name)s.enabled = True\n" % self.params) 79 # to get out of the if statement 80 buff.setIndentLevel(-1, relative=True) 81 # writes an if statement to determine whether to draw etc 82 self.writeStopTestCode(buff) 83 buff.writeIndented("%(name)s.enabled = False\n" % self.params) 84 # to get out of the if statement 85 buff.setIndentLevel(-1, relative=True) 86 # set parameters that need updating every frame 87 # do any params need updating? (this method inherited from _base) 88 if self.checkNeedToUpdate('set every frame'): 89 code = ("if %(name)s.status == STARTED: # only update if being drawn\n") 90 buff.writeIndented(code % self.params) 91 92 buff.setIndentLevel(+1, relative=True) # to enter the if block 93 self.writeParamUpdates(buff, 'set every frame') 94 buff.setIndentLevel(-1, relative=True) # to exit the if block 95 96 def writeRoutineEndCode(self, buff): 97 msg = "%(name)s.enabled = False # just in case it was left enabled\n" 98 buff.writeIndented(msg % self.params) 99 [end of psychopy/app/builder/components/aperture.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/psychopy/app/builder/components/aperture.py b/psychopy/app/builder/components/aperture.py --- a/psychopy/app/builder/components/aperture.py +++ b/psychopy/app/builder/components/aperture.py @@ -78,11 +78,12 @@ buff.writeIndented("%(name)s.enabled = True\n" % self.params) # to get out of the if statement buff.setIndentLevel(-1, relative=True) - # writes an if statement to determine whether to draw etc - self.writeStopTestCode(buff) - buff.writeIndented("%(name)s.enabled = False\n" % self.params) - # to get out of the if statement - buff.setIndentLevel(-1, relative=True) + if self.params['stopVal'].val not in ['', None, -1, 'None']: + # writes an if statement to determine whether to draw etc + self.writeStopTestCode(buff) + buff.writeIndented("%(name)s.enabled = False\n" % self.params) + # to get out of the if statement + buff.setIndentLevel(-1, relative=True) # set parameters that need updating every frame # do any params need updating? (this method inherited from _base) if self.checkNeedToUpdate('set every frame'):
{"golden_diff": "diff --git a/psychopy/app/builder/components/aperture.py b/psychopy/app/builder/components/aperture.py\n--- a/psychopy/app/builder/components/aperture.py\n+++ b/psychopy/app/builder/components/aperture.py\n@@ -78,11 +78,12 @@\n buff.writeIndented(\"%(name)s.enabled = True\\n\" % self.params)\n # to get out of the if statement\n buff.setIndentLevel(-1, relative=True)\n- # writes an if statement to determine whether to draw etc\n- self.writeStopTestCode(buff)\n- buff.writeIndented(\"%(name)s.enabled = False\\n\" % self.params)\n- # to get out of the if statement\n- buff.setIndentLevel(-1, relative=True)\n+ if self.params['stopVal'].val not in ['', None, -1, 'None']:\n+ # writes an if statement to determine whether to draw etc\n+ self.writeStopTestCode(buff)\n+ buff.writeIndented(\"%(name)s.enabled = False\\n\" % self.params)\n+ # to get out of the if statement\n+ buff.setIndentLevel(-1, relative=True)\n # set parameters that need updating every frame\n # do any params need updating? (this method inherited from _base)\n if self.checkNeedToUpdate('set every frame'):\n", "issue": "Builder 'Aperture' component cannot have indefinite duration\nIf a Builder 'Aperture' component is set to have an indefinite duration, the actual result is that it is enabled and then immediately disabled. Hence, it has no effect on other stimuli. Below is example code that gets generated when an indefinite duration is specified:\n\n```\n# *aperture* updates\nif t >= 0.0 and aperture.status == NOT_STARTED:\n # keep track of start time/frame for later\n aperture.tStart = t # underestimates by a little under one frame\n aperture.frameNStart = frameN # exact frame index\n aperture.enabled = True\nif aperture.status == STARTED and t >= (0.0 + (-win.monitorFramePeriod*0.75)): #most of one frame period left\n aperture.enabled = False\nif aperture.status == STARTED: # only update if being drawn\n aperture.setPos(mouse.getPos(), log=False)\n```\n\nThe error is in the second if/then above, which should not occur. It is included as if there was a maximum duration in effect, but the value is omitted in the `(-win.monitorFramePeriod*0.75)` expression and hence the duration is effectively zero.\n\nThe fix here should be simple but I'm not familiar with playing with the Builder component generation code. So recording the bug here until I can figure out how to fix it (and test).\n\nA work-around for users in the mean time would be to simply put an unfeasibly large number in the duration field (e.g. 9999).\n\n", "before_files": [{"content": "# Part of the PsychoPy library\n# Copyright (C) 2015 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\nfrom os import path\nfrom ._base import BaseVisualComponent, getInitVals\n\n__author__ = 'Jeremy Gray, Jon Peirce'\n# March 2011; builder-component for Yuri Spitsyn's visual.Aperture class\n# July 2011: jwp added the code for it to be enabled only when needed\n\n# the absolute path to the folder containing this path\nthisFolder = path.abspath(path.dirname(__file__))\niconFile = path.join(thisFolder, 'aperture.png')\ntooltip = _translate('Aperture: restrict the drawing of stimuli to a given '\n 'region')\n\n\nclass ApertureComponent(BaseVisualComponent):\n \"\"\"An event class for using GL stencil to restrict the viewing area to a\n circle or square of a given size and position\"\"\"\n\n def __init__(self, exp, parentName, name='aperture', units='norm',\n size=1, pos=(0, 0),\n startType='time (s)', startVal=0.0,\n stopType='duration (s)', stopVal=1.0,\n startEstim='', durationEstim=''):\n # initialise main parameters\n super(ApertureComponent, self).__init__(\n exp, parentName, name=name, units=units,\n pos=pos, size=size,\n startType=startType, startVal=startVal,\n stopType=stopType, stopVal=stopVal,\n startEstim=startEstim, durationEstim=durationEstim)\n\n self.type = 'Aperture'\n self.url = \"http://www.psychopy.org/builder/components/aperture.html\"\n # params:\n # NB make some adjustments on the params defined by _visual component\n self.order = ['name', 'size', 'pos'] # make sure this is at top\n\n msg = _translate(\"How big is the aperture? (a single number for diameter)\")\n self.params['size'].hint = msg\n # only localize hints and labels\n self.params['size'].label = _translate(\"Size\")\n self.params['pos'].hint = _translate(\"Where is the aperture centred?\")\n\n # inherited from _visual component but not needed\n del self.params['ori']\n del self.params['color']\n del self.params['colorSpace']\n del self.params['opacity']\n\n def writeInitCode(self, buff):\n # do we need units code?\n if self.params['units'].val == 'from exp settings':\n unitsStr = \"\"\n else:\n unitsStr = \"units=%(units)s, \" % self.params\n\n # do writing of init\n inits = getInitVals(self.params)\n\n code = (\"%(name)s = visual.Aperture(\\n\"\n \" win=win, name='%(name)s',\\n\"\n \" \" + unitsStr + \"size=%(size)s, pos=%(pos)s)\\n\"\n \"%(name)s.disable() # disable until its actually used\\n\")\n buff.writeIndentedLines(code % inits)\n\n def writeFrameCode(self, buff):\n \"\"\"Only activate the aperture for the required frames\n \"\"\"\n\n buff.writeIndented(\"\\n\")\n buff.writeIndented(\"# *%s* updates\\n\" % (self.params['name']))\n # writes an if statement to determine whether to draw etc\n self.writeStartTestCode(buff)\n buff.writeIndented(\"%(name)s.enabled = True\\n\" % self.params)\n # to get out of the if statement\n buff.setIndentLevel(-1, relative=True)\n # writes an if statement to determine whether to draw etc\n self.writeStopTestCode(buff)\n buff.writeIndented(\"%(name)s.enabled = False\\n\" % self.params)\n # to get out of the if statement\n buff.setIndentLevel(-1, relative=True)\n # set parameters that need updating every frame\n # do any params need updating? (this method inherited from _base)\n if self.checkNeedToUpdate('set every frame'):\n code = (\"if %(name)s.status == STARTED: # only update if being drawn\\n\")\n buff.writeIndented(code % self.params)\n\n buff.setIndentLevel(+1, relative=True) # to enter the if block\n self.writeParamUpdates(buff, 'set every frame')\n buff.setIndentLevel(-1, relative=True) # to exit the if block\n\n def writeRoutineEndCode(self, buff):\n msg = \"%(name)s.enabled = False # just in case it was left enabled\\n\"\n buff.writeIndented(msg % self.params)\n", "path": "psychopy/app/builder/components/aperture.py"}]}
2,082
290
gh_patches_debug_443
rasdani/github-patches
git_diff
doccano__doccano-1209
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ModuleNotFoundError: No module named 'auto_labeling_pipeline' How to reproduce the behaviour --------- <!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions --> <!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. --> I just installed `Doccano==1.2.0` (released just now..) with `pip install doccano` on my Linux machine to check out the `auto_labeling` feature. However, I got the following error running `doccano` in the shell. ``` >>> doccano ``` ``` Setup Database. Traceback (most recent call last): File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module> execute_from_command_line(sys.argv) File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line utility.execute() File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute django.setup() File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup apps.populate(settings.INSTALLED_APPS) File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate app_config.import_models() File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models self.models_module = import_module(models_module_name) File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 994, in _gcd_import File "<frozen importlib._bootstrap>", line 971, in _find_and_load File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 665, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 678, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module> from auto_labeling_pipeline.models import RequestModelFactory ModuleNotFoundError: No module named 'auto_labeling_pipeline' Traceback (most recent call last): File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module> execute_from_command_line(sys.argv) File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line utility.execute() File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute django.setup() File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup apps.populate(settings.INSTALLED_APPS) File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate app_config.import_models() File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models self.models_module = import_module(models_module_name) File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 994, in _gcd_import File "<frozen importlib._bootstrap>", line 971, in _find_and_load File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 665, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 678, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module> from auto_labeling_pipeline.models import RequestModelFactory ModuleNotFoundError: No module named 'auto_labeling_pipeline' Traceback (most recent call last): File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module> execute_from_command_line(sys.argv) File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line utility.execute() File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute django.setup() File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup apps.populate(settings.INSTALLED_APPS) File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate app_config.import_models() File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models self.models_module = import_module(models_module_name) File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 994, in _gcd_import File "<frozen importlib._bootstrap>", line 971, in _find_and_load File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 665, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 678, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module> from auto_labeling_pipeline.models import RequestModelFactory ModuleNotFoundError: No module named 'auto_labeling_pipeline' Create admin user. Traceback (most recent call last): File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module> execute_from_command_line(sys.argv) File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line utility.execute() File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute django.setup() File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup apps.populate(settings.INSTALLED_APPS) File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate app_config.import_models() File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models self.models_module = import_module(models_module_name) File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 994, in _gcd_import File "<frozen importlib._bootstrap>", line 971, in _find_and_load File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 665, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 678, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module> from auto_labeling_pipeline.models import RequestModelFactory ModuleNotFoundError: No module named 'auto_labeling_pipeline' Starting server with port 8000. Exception in thread django-main-thread: Traceback (most recent call last): File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner self.run() File "/usr/lib/python3.6/threading.py", line 864, in run self._target(*self._args, **self._kwargs) File "/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py", line 53, in wrapper fn(*args, **kwargs) File "/usr/local/lib/python3.6/dist-packages/django/core/management/commands/runserver.py", line 110, in inner_run autoreload.raise_last_exception() File "/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py", line 76, in raise_last_exception raise _exception[1] File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 357, in execute autoreload.check_errors(django.setup)() File "/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py", line 53, in wrapper fn(*args, **kwargs) File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup apps.populate(settings.INSTALLED_APPS) File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate app_config.import_models() File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models self.models_module = import_module(models_module_name) File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 994, in _gcd_import File "<frozen importlib._bootstrap>", line 971, in _find_and_load File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 665, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 678, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module> from auto_labeling_pipeline.models import RequestModelFactory ModuleNotFoundError: No module named 'auto_labeling_pipeline' ``` Your Environment --------- <!-- Include details of your environment.--> * Operating System: Ubuntu 18.04.5 LTS * Python Version Used: 3.6.9 * When you install doccano: 02/19/21 7:40 AM GMT * How did you install doccano (Heroku button etc): `pip install doccano` * Doccano version: 1.2.0 </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 import io 4 import os 5 6 from setuptools import find_packages, setup 7 8 NAME = 'doccano' 9 DESCRIPTION = 'doccano, text annotation tool for machine learning practitioners' 10 URL = 'https://github.com/doccano/doccano' 11 EMAIL = '[email protected]' 12 AUTHOR = 'Hironsan' 13 LICENSE = 'MIT' 14 15 here = os.path.abspath(os.path.dirname(__file__)) 16 with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f: 17 long_description = '\n' + f.read() 18 19 required = [ 20 'apache-libcloud>=3.2.0', 21 'colour>=0.1.5', 22 'conllu>=4.2.2', 23 'dj-database-url>=0.5.0', 24 'django-cors-headers>=3.5.0', 25 'django-filter>=2.4.0', 26 'django-rest-polymorphic>=0.1.9', 27 'djangorestframework-csv>=2.1.0', 28 'djangorestframework-xml>=2.0.0', 29 'drf-yasg>=1.20.0', 30 'environs>=9.2.0', 31 'furl>=2.1.0', 32 'pyexcel>=0.6.6', 33 'pyexcel-xlsx>=0.6.0', 34 'python-jose>=3.2.0', 35 'seqeval>=1.2.2', 36 'social-auth-app-django>=4.0.0', 37 'whitenoise>=5.2.0' 38 ] 39 40 setup( 41 name=NAME, 42 use_scm_version=True, 43 setup_requires=['setuptools_scm'], 44 description=DESCRIPTION, 45 long_description=long_description, 46 long_description_content_type='text/markdown', 47 author=AUTHOR, 48 author_email=EMAIL, 49 url=URL, 50 packages=find_packages(exclude=('*.tests',)), 51 entry_points={ 52 'console_scripts': [ 53 'doccano = app.doccano.doccano:main' 54 ] 55 }, 56 install_requires=required, 57 extras_require={ 58 'postgresql': ['psycopg2-binary>=2.8.6'], 59 'mssql': ['django-mssql-backend>=2.8.1'], 60 }, 61 include_package_data=True, 62 license=LICENSE, 63 classifiers=[ 64 'License :: OSI Approved :: MIT License', 65 'Programming Language :: Python', 66 'Programming Language :: Python :: 3.6', 67 'Programming Language :: Python :: 3.7', 68 'Programming Language :: Python :: 3.8', 69 'Programming Language :: Python :: Implementation :: CPython', 70 'Programming Language :: Python :: Implementation :: PyPy' 71 ], 72 ) 73 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -34,7 +34,8 @@ 'python-jose>=3.2.0', 'seqeval>=1.2.2', 'social-auth-app-django>=4.0.0', - 'whitenoise>=5.2.0' + 'whitenoise>=5.2.0', + 'auto-labeling-pipeline>=0.1.12' ] setup(
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,7 +34,8 @@\n 'python-jose>=3.2.0',\n 'seqeval>=1.2.2',\n 'social-auth-app-django>=4.0.0',\n- 'whitenoise>=5.2.0'\n+ 'whitenoise>=5.2.0',\n+ 'auto-labeling-pipeline>=0.1.12'\n ]\n \n setup(\n", "issue": "ModuleNotFoundError: No module named 'auto_labeling_pipeline'\nHow to reproduce the behaviour\r\n---------\r\n<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions -->\r\n\r\n<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->\r\n\r\nI just installed `Doccano==1.2.0` (released just now..) with `pip install doccano` on my Linux machine to check out the `auto_labeling` feature. However, I got the following error running `doccano` in the shell.\r\n\r\n```\r\n>>> doccano\r\n```\r\n\r\n```\r\nSetup Database.\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/app/manage.py\", line 15, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 401, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 377, in execute\r\n django.setup()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/__init__.py\", line 24, in setup\r\n apps.populate(settings.INSTALLED_APPS)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/registry.py\", line 114, in populate\r\n app_config.import_models()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/config.py\", line 211, in import_models\r\n self.models_module = import_module(models_module_name)\r\n File \"/usr/lib/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 955, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 665, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/usr/local/lib/python3.6/dist-packages/app/api/models.py\", line 3, in <module>\r\n from auto_labeling_pipeline.models import RequestModelFactory\r\nModuleNotFoundError: No module named 'auto_labeling_pipeline'\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/app/manage.py\", line 15, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 401, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 377, in execute\r\n django.setup()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/__init__.py\", line 24, in setup\r\n apps.populate(settings.INSTALLED_APPS)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/registry.py\", line 114, in populate\r\n app_config.import_models()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/config.py\", line 211, in import_models\r\n self.models_module = import_module(models_module_name)\r\n File \"/usr/lib/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 955, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 665, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/usr/local/lib/python3.6/dist-packages/app/api/models.py\", line 3, in <module>\r\n from auto_labeling_pipeline.models import RequestModelFactory\r\nModuleNotFoundError: No module named 'auto_labeling_pipeline'\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/app/manage.py\", line 15, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 401, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 377, in execute\r\n django.setup()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/__init__.py\", line 24, in setup\r\n apps.populate(settings.INSTALLED_APPS)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/registry.py\", line 114, in populate\r\n app_config.import_models()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/config.py\", line 211, in import_models\r\n self.models_module = import_module(models_module_name)\r\n File \"/usr/lib/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 955, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 665, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/usr/local/lib/python3.6/dist-packages/app/api/models.py\", line 3, in <module>\r\n from auto_labeling_pipeline.models import RequestModelFactory\r\nModuleNotFoundError: No module named 'auto_labeling_pipeline'\r\nCreate admin user.\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/app/manage.py\", line 15, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 401, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 377, in execute\r\n django.setup()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/__init__.py\", line 24, in setup\r\n apps.populate(settings.INSTALLED_APPS)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/registry.py\", line 114, in populate\r\n app_config.import_models()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/config.py\", line 211, in import_models\r\n self.models_module = import_module(models_module_name)\r\n File \"/usr/lib/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 955, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 665, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/usr/local/lib/python3.6/dist-packages/app/api/models.py\", line 3, in <module>\r\n from auto_labeling_pipeline.models import RequestModelFactory\r\nModuleNotFoundError: No module named 'auto_labeling_pipeline'\r\nStarting server with port 8000.\r\nException in thread django-main-thread:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py\", line 53, in wrapper\r\n fn(*args, **kwargs)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/commands/runserver.py\", line 110, in inner_run\r\n autoreload.raise_last_exception()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py\", line 76, in raise_last_exception\r\n raise _exception[1]\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 357, in execute\r\n autoreload.check_errors(django.setup)()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py\", line 53, in wrapper\r\n fn(*args, **kwargs)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/__init__.py\", line 24, in setup\r\n apps.populate(settings.INSTALLED_APPS)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/registry.py\", line 114, in populate\r\n app_config.import_models()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/config.py\", line 211, in import_models\r\n self.models_module = import_module(models_module_name)\r\n File \"/usr/lib/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 955, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 665, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/usr/local/lib/python3.6/dist-packages/app/api/models.py\", line 3, in <module>\r\n from auto_labeling_pipeline.models import RequestModelFactory\r\nModuleNotFoundError: No module named 'auto_labeling_pipeline'\r\n```\r\n\r\nYour Environment\r\n---------\r\n<!-- Include details of your environment.-->\r\n* Operating System: Ubuntu 18.04.5 LTS\r\n* Python Version Used: 3.6.9\r\n* When you install doccano: 02/19/21 7:40 AM GMT\r\n* How did you install doccano (Heroku button etc): `pip install doccano`\r\n* Doccano version: 1.2.0\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport io\nimport os\n\nfrom setuptools import find_packages, setup\n\nNAME = 'doccano'\nDESCRIPTION = 'doccano, text annotation tool for machine learning practitioners'\nURL = 'https://github.com/doccano/doccano'\nEMAIL = '[email protected]'\nAUTHOR = 'Hironsan'\nLICENSE = 'MIT'\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = '\\n' + f.read()\n\nrequired = [\n 'apache-libcloud>=3.2.0',\n 'colour>=0.1.5',\n 'conllu>=4.2.2',\n 'dj-database-url>=0.5.0',\n 'django-cors-headers>=3.5.0',\n 'django-filter>=2.4.0',\n 'django-rest-polymorphic>=0.1.9',\n 'djangorestframework-csv>=2.1.0',\n 'djangorestframework-xml>=2.0.0',\n 'drf-yasg>=1.20.0',\n 'environs>=9.2.0',\n 'furl>=2.1.0',\n 'pyexcel>=0.6.6',\n 'pyexcel-xlsx>=0.6.0',\n 'python-jose>=3.2.0',\n 'seqeval>=1.2.2',\n 'social-auth-app-django>=4.0.0',\n 'whitenoise>=5.2.0'\n]\n\nsetup(\n name=NAME,\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n description=DESCRIPTION,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=AUTHOR,\n author_email=EMAIL,\n url=URL,\n packages=find_packages(exclude=('*.tests',)),\n entry_points={\n 'console_scripts': [\n 'doccano = app.doccano.doccano:main'\n ]\n },\n install_requires=required,\n extras_require={\n 'postgresql': ['psycopg2-binary>=2.8.6'],\n 'mssql': ['django-mssql-backend>=2.8.1'],\n },\n include_package_data=True,\n license=LICENSE,\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy'\n ],\n)\n", "path": "setup.py"}]}
3,932
117
gh_patches_debug_64631
rasdani/github-patches
git_diff
quantumlib__Cirq-2370
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Escape unicode characters when producing latex Currently, the qcircuit to latex code passes through unicode characters like ↺ ↻. LaTeX may not be happy about this. We should replace the unicode characters with some that LaTeX is happy with. Or perhaps throw an error? </issue> <code> [start of cirq/contrib/qcircuit/qcircuit_pdf.py] 1 # Copyright 2018 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # coverage: ignore 16 17 import errno 18 import os 19 20 from pylatex import Document, NoEscape, Package 21 22 from cirq import circuits 23 from cirq.contrib.qcircuit.qcircuit_diagram import ( 24 circuit_to_latex_using_qcircuit) 25 26 27 def circuit_to_pdf_using_qcircuit_via_tex(circuit: circuits.Circuit, 28 filepath: str, 29 pdf_kwargs=None, 30 qcircuit_kwargs=None, 31 clean_ext=('dvi', 'ps'), 32 documentclass='article'): 33 """Compiles the QCircuit-based latex diagram of the given circuit. 34 35 Args: 36 circuit: The circuit to produce a pdf of. 37 filepath: Where to output the pdf. 38 pdf_kwargs: The arguments to pass to generate_pdf. 39 qcircuit_kwargs: The arguments to pass to 40 circuit_to_latex_using_qcircuit. 41 clean_ext: The file extensions to clean up after compilation. By 42 default, latexmk is used with the '-pdfps' flag, which produces 43 intermediary dvi and ps files. 44 documentclass: The documentclass of the latex file. 45 """ 46 pdf_kwargs = {'compiler': 'latexmk', 'compiler_args': ['-pdfps'], 47 **({} if pdf_kwargs is None else pdf_kwargs)} 48 qcircuit_kwargs = {} if qcircuit_kwargs is None else qcircuit_kwargs 49 tex = circuit_to_latex_using_qcircuit(circuit, **qcircuit_kwargs) 50 doc = Document(documentclass=documentclass, document_options='dvips') 51 doc.packages.append(Package('amsmath')) 52 doc.packages.append(Package('qcircuit')) 53 doc.append(NoEscape(tex)) 54 doc.generate_pdf(filepath, **pdf_kwargs) 55 for ext in clean_ext: 56 try: 57 os.remove(filepath + '.' + ext) 58 except (OSError, IOError) as e: 59 if e.errno != errno.ENOENT: 60 raise 61 [end of cirq/contrib/qcircuit/qcircuit_pdf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cirq/contrib/qcircuit/qcircuit_pdf.py b/cirq/contrib/qcircuit/qcircuit_pdf.py --- a/cirq/contrib/qcircuit/qcircuit_pdf.py +++ b/cirq/contrib/qcircuit/qcircuit_pdf.py @@ -50,6 +50,7 @@ doc = Document(documentclass=documentclass, document_options='dvips') doc.packages.append(Package('amsmath')) doc.packages.append(Package('qcircuit')) + doc.preamble.append(Package('inputenc', options=['utf8'])) doc.append(NoEscape(tex)) doc.generate_pdf(filepath, **pdf_kwargs) for ext in clean_ext:
{"golden_diff": "diff --git a/cirq/contrib/qcircuit/qcircuit_pdf.py b/cirq/contrib/qcircuit/qcircuit_pdf.py\n--- a/cirq/contrib/qcircuit/qcircuit_pdf.py\n+++ b/cirq/contrib/qcircuit/qcircuit_pdf.py\n@@ -50,6 +50,7 @@\n doc = Document(documentclass=documentclass, document_options='dvips')\n doc.packages.append(Package('amsmath'))\n doc.packages.append(Package('qcircuit'))\n+ doc.preamble.append(Package('inputenc', options=['utf8']))\n doc.append(NoEscape(tex))\n doc.generate_pdf(filepath, **pdf_kwargs)\n for ext in clean_ext:\n", "issue": "Escape unicode characters when producing latex\nCurrently, the qcircuit to latex code passes through unicode characters like \u21ba \u21bb. LaTeX may not be happy about this. We should replace the unicode characters with some that LaTeX is happy with. Or perhaps throw an error?\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coverage: ignore\n\nimport errno\nimport os\n\nfrom pylatex import Document, NoEscape, Package\n\nfrom cirq import circuits\nfrom cirq.contrib.qcircuit.qcircuit_diagram import (\n circuit_to_latex_using_qcircuit)\n\n\ndef circuit_to_pdf_using_qcircuit_via_tex(circuit: circuits.Circuit,\n filepath: str,\n pdf_kwargs=None,\n qcircuit_kwargs=None,\n clean_ext=('dvi', 'ps'),\n documentclass='article'):\n \"\"\"Compiles the QCircuit-based latex diagram of the given circuit.\n\n Args:\n circuit: The circuit to produce a pdf of.\n filepath: Where to output the pdf.\n pdf_kwargs: The arguments to pass to generate_pdf.\n qcircuit_kwargs: The arguments to pass to\n circuit_to_latex_using_qcircuit.\n clean_ext: The file extensions to clean up after compilation. By\n default, latexmk is used with the '-pdfps' flag, which produces\n intermediary dvi and ps files.\n documentclass: The documentclass of the latex file.\n \"\"\"\n pdf_kwargs = {'compiler': 'latexmk', 'compiler_args': ['-pdfps'],\n **({} if pdf_kwargs is None else pdf_kwargs)}\n qcircuit_kwargs = {} if qcircuit_kwargs is None else qcircuit_kwargs\n tex = circuit_to_latex_using_qcircuit(circuit, **qcircuit_kwargs)\n doc = Document(documentclass=documentclass, document_options='dvips')\n doc.packages.append(Package('amsmath'))\n doc.packages.append(Package('qcircuit'))\n doc.append(NoEscape(tex))\n doc.generate_pdf(filepath, **pdf_kwargs)\n for ext in clean_ext:\n try:\n os.remove(filepath + '.' + ext)\n except (OSError, IOError) as e:\n if e.errno != errno.ENOENT:\n raise\n", "path": "cirq/contrib/qcircuit/qcircuit_pdf.py"}]}
1,250
152
gh_patches_debug_38801
rasdani/github-patches
git_diff
TheAlgorithms__Python-9083
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Returning index instead of boolean when knuth_morris_pratt matches. In [knuth morris pratt algorithm](https://github.com/TheAlgorithms/Python/blob/master/strings/knuth_morris_pratt.py), I think it's better to return the "index" of the matched substring in case of success, and `-1` in case of failure, so that it would be consistent with the `str.find()` in Python. `True` and `False` aren't that helpful. </issue> <code> [start of strings/knuth_morris_pratt.py] 1 from __future__ import annotations 2 3 4 def kmp(pattern: str, text: str) -> bool: 5 """ 6 The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text 7 with complexity O(n + m) 8 9 1) Preprocess pattern to identify any suffixes that are identical to prefixes 10 11 This tells us where to continue from if we get a mismatch between a character 12 in our pattern and the text. 13 14 2) Step through the text one character at a time and compare it to a character in 15 the pattern updating our location within the pattern if necessary 16 17 """ 18 19 # 1) Construct the failure array 20 failure = get_failure_array(pattern) 21 22 # 2) Step through text searching for pattern 23 i, j = 0, 0 # index into text, pattern 24 while i < len(text): 25 if pattern[j] == text[i]: 26 if j == (len(pattern) - 1): 27 return True 28 j += 1 29 30 # if this is a prefix in our pattern 31 # just go back far enough to continue 32 elif j > 0: 33 j = failure[j - 1] 34 continue 35 i += 1 36 return False 37 38 39 def get_failure_array(pattern: str) -> list[int]: 40 """ 41 Calculates the new index we should go to if we fail a comparison 42 :param pattern: 43 :return: 44 """ 45 failure = [0] 46 i = 0 47 j = 1 48 while j < len(pattern): 49 if pattern[i] == pattern[j]: 50 i += 1 51 elif i > 0: 52 i = failure[i - 1] 53 continue 54 j += 1 55 failure.append(i) 56 return failure 57 58 59 if __name__ == "__main__": 60 # Test 1) 61 pattern = "abc1abc12" 62 text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc" 63 text2 = "alskfjaldsk23adsfabcabc" 64 assert kmp(pattern, text1) and not kmp(pattern, text2) 65 66 # Test 2) 67 pattern = "ABABX" 68 text = "ABABZABABYABABX" 69 assert kmp(pattern, text) 70 71 # Test 3) 72 pattern = "AAAB" 73 text = "ABAAAAAB" 74 assert kmp(pattern, text) 75 76 # Test 4) 77 pattern = "abcdabcy" 78 text = "abcxabcdabxabcdabcdabcy" 79 assert kmp(pattern, text) 80 81 # Test 5) 82 pattern = "aabaabaaa" 83 assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2] 84 [end of strings/knuth_morris_pratt.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/strings/knuth_morris_pratt.py b/strings/knuth_morris_pratt.py --- a/strings/knuth_morris_pratt.py +++ b/strings/knuth_morris_pratt.py @@ -1,7 +1,7 @@ from __future__ import annotations -def kmp(pattern: str, text: str) -> bool: +def knuth_morris_pratt(text: str, pattern: str) -> int: """ The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text with complexity O(n + m) @@ -14,6 +14,12 @@ 2) Step through the text one character at a time and compare it to a character in the pattern updating our location within the pattern if necessary + >>> kmp = "knuth_morris_pratt" + >>> all( + ... knuth_morris_pratt(kmp, s) == kmp.find(s) + ... for s in ("kn", "h_m", "rr", "tt", "not there") + ... ) + True """ # 1) Construct the failure array @@ -24,7 +30,7 @@ while i < len(text): if pattern[j] == text[i]: if j == (len(pattern) - 1): - return True + return i - j j += 1 # if this is a prefix in our pattern @@ -33,7 +39,7 @@ j = failure[j - 1] continue i += 1 - return False + return -1 def get_failure_array(pattern: str) -> list[int]: @@ -57,27 +63,38 @@ if __name__ == "__main__": + import doctest + + doctest.testmod() + # Test 1) pattern = "abc1abc12" text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc" text2 = "alskfjaldsk23adsfabcabc" - assert kmp(pattern, text1) and not kmp(pattern, text2) + assert knuth_morris_pratt(text1, pattern) and knuth_morris_pratt(text2, pattern) # Test 2) pattern = "ABABX" text = "ABABZABABYABABX" - assert kmp(pattern, text) + assert knuth_morris_pratt(text, pattern) # Test 3) pattern = "AAAB" text = "ABAAAAAB" - assert kmp(pattern, text) + assert knuth_morris_pratt(text, pattern) # Test 4) pattern = "abcdabcy" text = "abcxabcdabxabcdabcdabcy" - assert kmp(pattern, text) + assert knuth_morris_pratt(text, pattern) + + # Test 5) -> Doctests + kmp = "knuth_morris_pratt" + assert all( + knuth_morris_pratt(kmp, s) == kmp.find(s) + for s in ("kn", "h_m", "rr", "tt", "not there") + ) - # Test 5) + # Test 6) pattern = "aabaabaaa" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
{"golden_diff": "diff --git a/strings/knuth_morris_pratt.py b/strings/knuth_morris_pratt.py\n--- a/strings/knuth_morris_pratt.py\n+++ b/strings/knuth_morris_pratt.py\n@@ -1,7 +1,7 @@\n from __future__ import annotations\n \n \n-def kmp(pattern: str, text: str) -> bool:\n+def knuth_morris_pratt(text: str, pattern: str) -> int:\n \"\"\"\n The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text\n with complexity O(n + m)\n@@ -14,6 +14,12 @@\n 2) Step through the text one character at a time and compare it to a character in\n the pattern updating our location within the pattern if necessary\n \n+ >>> kmp = \"knuth_morris_pratt\"\n+ >>> all(\n+ ... knuth_morris_pratt(kmp, s) == kmp.find(s)\n+ ... for s in (\"kn\", \"h_m\", \"rr\", \"tt\", \"not there\")\n+ ... )\n+ True\n \"\"\"\n \n # 1) Construct the failure array\n@@ -24,7 +30,7 @@\n while i < len(text):\n if pattern[j] == text[i]:\n if j == (len(pattern) - 1):\n- return True\n+ return i - j\n j += 1\n \n # if this is a prefix in our pattern\n@@ -33,7 +39,7 @@\n j = failure[j - 1]\n continue\n i += 1\n- return False\n+ return -1\n \n \n def get_failure_array(pattern: str) -> list[int]:\n@@ -57,27 +63,38 @@\n \n \n if __name__ == \"__main__\":\n+ import doctest\n+\n+ doctest.testmod()\n+\n # Test 1)\n pattern = \"abc1abc12\"\n text1 = \"alskfjaldsabc1abc1abc12k23adsfabcabc\"\n text2 = \"alskfjaldsk23adsfabcabc\"\n- assert kmp(pattern, text1) and not kmp(pattern, text2)\n+ assert knuth_morris_pratt(text1, pattern) and knuth_morris_pratt(text2, pattern)\n \n # Test 2)\n pattern = \"ABABX\"\n text = \"ABABZABABYABABX\"\n- assert kmp(pattern, text)\n+ assert knuth_morris_pratt(text, pattern)\n \n # Test 3)\n pattern = \"AAAB\"\n text = \"ABAAAAAB\"\n- assert kmp(pattern, text)\n+ assert knuth_morris_pratt(text, pattern)\n \n # Test 4)\n pattern = \"abcdabcy\"\n text = \"abcxabcdabxabcdabcdabcy\"\n- assert kmp(pattern, text)\n+ assert knuth_morris_pratt(text, pattern)\n+\n+ # Test 5) -> Doctests\n+ kmp = \"knuth_morris_pratt\"\n+ assert all(\n+ knuth_morris_pratt(kmp, s) == kmp.find(s)\n+ for s in (\"kn\", \"h_m\", \"rr\", \"tt\", \"not there\")\n+ )\n \n- # Test 5)\n+ # Test 6)\n pattern = \"aabaabaaa\"\n assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]\n", "issue": "Returning index instead of boolean when knuth_morris_pratt matches.\nIn [knuth morris pratt algorithm](https://github.com/TheAlgorithms/Python/blob/master/strings/knuth_morris_pratt.py), I think it's better to return the \"index\" of the matched substring in case of success, and `-1` in case of failure, so that it would be consistent with the `str.find()` in Python. `True` and `False` aren't that helpful.\n", "before_files": [{"content": "from __future__ import annotations\n\n\ndef kmp(pattern: str, text: str) -> bool:\n \"\"\"\n The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text\n with complexity O(n + m)\n\n 1) Preprocess pattern to identify any suffixes that are identical to prefixes\n\n This tells us where to continue from if we get a mismatch between a character\n in our pattern and the text.\n\n 2) Step through the text one character at a time and compare it to a character in\n the pattern updating our location within the pattern if necessary\n\n \"\"\"\n\n # 1) Construct the failure array\n failure = get_failure_array(pattern)\n\n # 2) Step through text searching for pattern\n i, j = 0, 0 # index into text, pattern\n while i < len(text):\n if pattern[j] == text[i]:\n if j == (len(pattern) - 1):\n return True\n j += 1\n\n # if this is a prefix in our pattern\n # just go back far enough to continue\n elif j > 0:\n j = failure[j - 1]\n continue\n i += 1\n return False\n\n\ndef get_failure_array(pattern: str) -> list[int]:\n \"\"\"\n Calculates the new index we should go to if we fail a comparison\n :param pattern:\n :return:\n \"\"\"\n failure = [0]\n i = 0\n j = 1\n while j < len(pattern):\n if pattern[i] == pattern[j]:\n i += 1\n elif i > 0:\n i = failure[i - 1]\n continue\n j += 1\n failure.append(i)\n return failure\n\n\nif __name__ == \"__main__\":\n # Test 1)\n pattern = \"abc1abc12\"\n text1 = \"alskfjaldsabc1abc1abc12k23adsfabcabc\"\n text2 = \"alskfjaldsk23adsfabcabc\"\n assert kmp(pattern, text1) and not kmp(pattern, text2)\n\n # Test 2)\n pattern = \"ABABX\"\n text = \"ABABZABABYABABX\"\n assert kmp(pattern, text)\n\n # Test 3)\n pattern = \"AAAB\"\n text = \"ABAAAAAB\"\n assert kmp(pattern, text)\n\n # Test 4)\n pattern = \"abcdabcy\"\n text = \"abcxabcdabxabcdabcdabcy\"\n assert kmp(pattern, text)\n\n # Test 5)\n pattern = \"aabaabaaa\"\n assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]\n", "path": "strings/knuth_morris_pratt.py"}]}
1,435
808
gh_patches_debug_19094
rasdani/github-patches
git_diff
bridgecrewio__checkov-4875
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CKV_AWS_300 S3 MPU lifecycle alert **Issue** Alert ID - CKV_AWS_300 Checkov sends false alert for MPU "aws_s3_bucket_lifecycle_configuration" despite having the correct rule configuration. In my scenario I'm having an empty filter in my terraform code. But as per logic in checkov code base, (checkov/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py) under scan_resource_conf method, the 'if' condition doesn't take the 'filter' condition into account. This in-turn throws a false alert. **Examples** ```hcl resource "aws_s3_bucket_lifecycle_configuration" "lifecycle_rule" { bucket = aws_s3_bucket.s3.id rule { abort_incomplete_multipart_upload { days_after_initiation = 3 } id = "MPU 3days" filter { } status = "Enabled" } } ``` **Version (please complete the following information):** - Checkov 2.3.155 (Latest) </issue> <code> [start of checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py] 1 from __future__ import annotations 2 3 from typing import Any 4 5 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 6 from checkov.common.models.enums import CheckCategories, CheckResult 7 8 9 class S3AbortIncompleteUploads(BaseResourceCheck): 10 def __init__(self) -> None: 11 """ 12 If you don't set this value in a lifecycle configuration you'll end up paying for s3 13 resources you never could use 14 """ 15 name = "Ensure S3 lifecycle configuration sets period for aborting failed uploads" 16 id = "CKV_AWS_300" 17 supported_resources = ("aws_s3_bucket_lifecycle_configuration",) 18 categories = (CheckCategories.GENERAL_SECURITY,) 19 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 20 21 def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult: 22 self.evaluated_keys = ["rule"] 23 rules = conf.get("rule") 24 if rules and isinstance(rules, list): 25 for idx_rule, rule in enumerate(rules): 26 if ( 27 rule.get("abort_incomplete_multipart_upload") 28 and rule.get("status") == ["Enabled"] 29 and not rule.get("filter") 30 ): 31 self.evaluated_keys = [f"rule/[{idx_rule}]/abort_incomplete_multipart_upload"] 32 return CheckResult.PASSED 33 34 return CheckResult.FAILED 35 36 37 check = S3AbortIncompleteUploads() 38 [end of checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py --- a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py +++ b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py @@ -23,12 +23,13 @@ rules = conf.get("rule") if rules and isinstance(rules, list): for idx_rule, rule in enumerate(rules): - if ( - rule.get("abort_incomplete_multipart_upload") - and rule.get("status") == ["Enabled"] - and not rule.get("filter") - ): + if rule.get("abort_incomplete_multipart_upload") and rule.get("status") == ["Enabled"]: self.evaluated_keys = [f"rule/[{idx_rule}]/abort_incomplete_multipart_upload"] + filter = rule.get("filter") + if filter and isinstance(filter, list) and filter[0]: + # it is possible to set an empty filter, which applies then to all objects + continue + return CheckResult.PASSED return CheckResult.FAILED
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n--- a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n+++ b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n@@ -23,12 +23,13 @@\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n- if (\n- rule.get(\"abort_incomplete_multipart_upload\")\n- and rule.get(\"status\") == [\"Enabled\"]\n- and not rule.get(\"filter\")\n- ):\n+ if rule.get(\"abort_incomplete_multipart_upload\") and rule.get(\"status\") == [\"Enabled\"]:\n self.evaluated_keys = [f\"rule/[{idx_rule}]/abort_incomplete_multipart_upload\"]\n+ filter = rule.get(\"filter\")\n+ if filter and isinstance(filter, list) and filter[0]:\n+ # it is possible to set an empty filter, which applies then to all objects\n+ continue\n+\n return CheckResult.PASSED\n \n return CheckResult.FAILED\n", "issue": "CKV_AWS_300 S3 MPU lifecycle alert\n**Issue**\r\nAlert ID - CKV_AWS_300 \r\nCheckov sends false alert for MPU \"aws_s3_bucket_lifecycle_configuration\" despite having the correct rule configuration. In my scenario I'm having an empty filter in my terraform code. But as per logic in checkov code base, (checkov/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py) under scan_resource_conf method, the 'if' condition doesn't take the 'filter' condition into account. This in-turn throws a false alert.\r\n\r\n**Examples**\r\n```hcl\r\nresource \"aws_s3_bucket_lifecycle_configuration\" \"lifecycle_rule\" {\r\n bucket = aws_s3_bucket.s3.id\r\n rule {\r\n abort_incomplete_multipart_upload {\r\n days_after_initiation = 3\r\n }\r\n id = \"MPU 3days\"\r\n filter {\r\n }\r\n status = \"Enabled\"\r\n }\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov 2.3.155 (Latest)\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\n\nclass S3AbortIncompleteUploads(BaseResourceCheck):\n def __init__(self) -> None:\n \"\"\"\n If you don't set this value in a lifecycle configuration you'll end up paying for s3\n resources you never could use\n \"\"\"\n name = \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\n id = \"CKV_AWS_300\"\n supported_resources = (\"aws_s3_bucket_lifecycle_configuration\",)\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n self.evaluated_keys = [\"rule\"]\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n if (\n rule.get(\"abort_incomplete_multipart_upload\")\n and rule.get(\"status\") == [\"Enabled\"]\n and not rule.get(\"filter\")\n ):\n self.evaluated_keys = [f\"rule/[{idx_rule}]/abort_incomplete_multipart_upload\"]\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = S3AbortIncompleteUploads()\n", "path": "checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py"}]}
1,172
263
gh_patches_debug_53987
rasdani/github-patches
git_diff
getredash__redash-784
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError: 'datetime.timedelta' object has no attribute 'isoformat' On the latest 0.9.2-rc: ``` [2016-01-21 14:30:36,838: ERROR/MainProcess] Task redash.tasks.execute_query[766d3f9f-68a6-4a64-8cd9-b7e4e18bf2af] raised unexpected: AttributeError("'datetime.timedelta' object has no attribute 'isoformat'",) Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 240, in trace_task R = retval = fun(*args, **kwargs) File "/opt/redash/redash/tasks.py", line 31, in __call__ return super(BaseTask, self).__call__(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 437, in __protected_call__ return self.run(*args, **kwargs) File "/opt/redash/redash/tasks.py", line 286, in execute_query data, error = query_runner.run_query(annotated_query) File "/opt/redash/redash/query_runner/pg.py", line 132, in run_query json_data = json.dumps(data, cls=JSONEncoder) File "/usr/lib/python2.7/json/__init__.py", line 250, in dumps sort_keys=sort_keys, **kw).encode(obj) File "/usr/lib/python2.7/json/encoder.py", line 207, in encode chunks = self.iterencode(o, _one_shot=True) File "/usr/lib/python2.7/json/encoder.py", line 270, in iterencode return _iterencode(o, 0) File "/opt/redash/redash/utils.py", line 57, in default return o.isoformat() AttributeError: 'datetime.timedelta' object has no attribute 'isoformat' ``` </issue> <code> [start of redash/utils.py] 1 import cStringIO 2 import csv 3 import codecs 4 import decimal 5 import datetime 6 import json 7 import random 8 import re 9 import hashlib 10 import pytz 11 12 COMMENTS_REGEX = re.compile("/\*.*?\*/") 13 14 15 def utcnow(): 16 """Return datetime.now value with timezone specified. 17 18 Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server, 19 which leads to errors in calculations. 20 """ 21 return datetime.datetime.now(pytz.utc) 22 23 24 def slugify(s): 25 return re.sub('[^a-z0-9_\-]+', '-', s.lower()) 26 27 28 def gen_query_hash(sql): 29 """Returns hash of the given query after stripping all comments, line breaks and multiple 30 spaces, and lower casing all text. 31 32 TODO: possible issue - the following queries will get the same id: 33 1. SELECT 1 FROM table WHERE column='Value'; 34 2. SELECT 1 FROM table where column='value'; 35 """ 36 sql = COMMENTS_REGEX.sub("", sql) 37 sql = "".join(sql.split()).lower() 38 return hashlib.md5(sql.encode('utf-8')).hexdigest() 39 40 41 def generate_token(length): 42 chars = ('abcdefghijklmnopqrstuvwxyz' 43 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 44 '0123456789') 45 46 rand = random.SystemRandom() 47 return ''.join(rand.choice(chars) for x in range(length)) 48 49 class JSONEncoder(json.JSONEncoder): 50 """Custom JSON encoding class, to handle Decimal and datetime.date instances. 51 """ 52 def default(self, o): 53 if isinstance(o, decimal.Decimal): 54 return float(o) 55 56 if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)): 57 return o.isoformat() 58 59 super(JSONEncoder, self).default(o) 60 61 62 def json_dumps(data): 63 return json.dumps(data, cls=JSONEncoder) 64 65 66 def build_url(request, host, path): 67 parts = request.host.split(':') 68 if len(parts) > 1: 69 port = parts[1] 70 if (port, request.scheme) not in (('80', 'http'), ('443', 'https')): 71 host = '{}:{}'.format(host, port) 72 73 return "{}://{}{}".format(request.scheme, host, path) 74 75 76 class UnicodeWriter: 77 """ 78 A CSV writer which will write rows to CSV file "f", 79 which is encoded in the given encoding. 80 """ 81 def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds): 82 # Redirect output to a queue 83 self.queue = cStringIO.StringIO() 84 self.writer = csv.writer(self.queue, dialect=dialect, **kwds) 85 self.stream = f 86 self.encoder = codecs.getincrementalencoder(encoding)() 87 88 def _encode_utf8(self, val): 89 if isinstance(val, (unicode, str)): 90 return val.encode('utf-8') 91 92 return val 93 94 def writerow(self, row): 95 self.writer.writerow([self._encode_utf8(s) for s in row]) 96 # Fetch UTF-8 output from the queue ... 97 data = self.queue.getvalue() 98 data = data.decode("utf-8") 99 # ... and reencode it into the target encoding 100 data = self.encoder.encode(data) 101 # write to the target stream 102 self.stream.write(data) 103 # empty queue 104 self.queue.truncate(0) 105 106 def writerows(self, rows): 107 for row in rows: 108 self.writerow(row) 109 [end of redash/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redash/utils.py b/redash/utils.py --- a/redash/utils.py +++ b/redash/utils.py @@ -53,9 +53,12 @@ if isinstance(o, decimal.Decimal): return float(o) - if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)): + if isinstance(o, (datetime.date, datetime.time)): return o.isoformat() - + + if isinstance(o, datetime.timedelta): + return str(o) + super(JSONEncoder, self).default(o)
{"golden_diff": "diff --git a/redash/utils.py b/redash/utils.py\n--- a/redash/utils.py\n+++ b/redash/utils.py\n@@ -53,9 +53,12 @@\n if isinstance(o, decimal.Decimal):\n return float(o)\n \n- if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):\n+ if isinstance(o, (datetime.date, datetime.time)):\n return o.isoformat()\n- \n+\n+ if isinstance(o, datetime.timedelta):\n+ return str(o)\n+\n super(JSONEncoder, self).default(o)\n", "issue": "AttributeError: 'datetime.timedelta' object has no attribute 'isoformat'\nOn the latest 0.9.2-rc:\n\n```\n[2016-01-21 14:30:36,838: ERROR/MainProcess] Task redash.tasks.execute_query[766d3f9f-68a6-4a64-8cd9-b7e4e18bf2af] raised unexpected: AttributeError(\"'datetime.timedelta' object has no attribute 'isoformat'\",)\nTraceback (most recent call last):\n File \"/usr/local/lib/python2.7/dist-packages/celery/app/trace.py\", line 240, in trace_task\n R = retval = fun(*args, **kwargs)\n File \"/opt/redash/redash/tasks.py\", line 31, in __call__\n return super(BaseTask, self).__call__(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/celery/app/trace.py\", line 437, in __protected_call__\n return self.run(*args, **kwargs)\n File \"/opt/redash/redash/tasks.py\", line 286, in execute_query\n data, error = query_runner.run_query(annotated_query)\n File \"/opt/redash/redash/query_runner/pg.py\", line 132, in run_query\n json_data = json.dumps(data, cls=JSONEncoder)\n File \"/usr/lib/python2.7/json/__init__.py\", line 250, in dumps\n sort_keys=sort_keys, **kw).encode(obj)\n File \"/usr/lib/python2.7/json/encoder.py\", line 207, in encode\n chunks = self.iterencode(o, _one_shot=True)\n File \"/usr/lib/python2.7/json/encoder.py\", line 270, in iterencode\n return _iterencode(o, 0)\n File \"/opt/redash/redash/utils.py\", line 57, in default\n return o.isoformat()\nAttributeError: 'datetime.timedelta' object has no attribute 'isoformat'\n```\n\n", "before_files": [{"content": "import cStringIO\nimport csv\nimport codecs\nimport decimal\nimport datetime\nimport json\nimport random\nimport re\nimport hashlib\nimport pytz\n\nCOMMENTS_REGEX = re.compile(\"/\\*.*?\\*/\")\n\n\ndef utcnow():\n \"\"\"Return datetime.now value with timezone specified.\n\n Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server,\n which leads to errors in calculations.\n \"\"\"\n return datetime.datetime.now(pytz.utc)\n\n\ndef slugify(s):\n return re.sub('[^a-z0-9_\\-]+', '-', s.lower())\n\n\ndef gen_query_hash(sql):\n \"\"\"Returns hash of the given query after stripping all comments, line breaks and multiple\n spaces, and lower casing all text.\n\n TODO: possible issue - the following queries will get the same id:\n 1. SELECT 1 FROM table WHERE column='Value';\n 2. SELECT 1 FROM table where column='value';\n \"\"\"\n sql = COMMENTS_REGEX.sub(\"\", sql)\n sql = \"\".join(sql.split()).lower()\n return hashlib.md5(sql.encode('utf-8')).hexdigest()\n\n\ndef generate_token(length):\n chars = ('abcdefghijklmnopqrstuvwxyz'\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n '0123456789')\n\n rand = random.SystemRandom()\n return ''.join(rand.choice(chars) for x in range(length))\n\nclass JSONEncoder(json.JSONEncoder):\n \"\"\"Custom JSON encoding class, to handle Decimal and datetime.date instances.\n \"\"\"\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n\n if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):\n return o.isoformat()\n \n super(JSONEncoder, self).default(o)\n\n\ndef json_dumps(data):\n return json.dumps(data, cls=JSONEncoder)\n\n\ndef build_url(request, host, path):\n parts = request.host.split(':')\n if len(parts) > 1:\n port = parts[1]\n if (port, request.scheme) not in (('80', 'http'), ('443', 'https')):\n host = '{}:{}'.format(host, port)\n\n return \"{}://{}{}\".format(request.scheme, host, path)\n\n\nclass UnicodeWriter:\n \"\"\"\n A CSV writer which will write rows to CSV file \"f\",\n which is encoded in the given encoding.\n \"\"\"\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # Redirect output to a queue\n self.queue = cStringIO.StringIO()\n self.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n self.stream = f\n self.encoder = codecs.getincrementalencoder(encoding)()\n\n def _encode_utf8(self, val):\n if isinstance(val, (unicode, str)):\n return val.encode('utf-8')\n\n return val\n\n def writerow(self, row):\n self.writer.writerow([self._encode_utf8(s) for s in row])\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n data = data.decode(\"utf-8\")\n # ... and reencode it into the target encoding\n data = self.encoder.encode(data)\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n def writerows(self, rows):\n for row in rows:\n self.writerow(row)\n", "path": "redash/utils.py"}]}
1,960
119
gh_patches_debug_56069
rasdani/github-patches
git_diff
pypa__pip-9636
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The --target option clashes with other command line flags and config files The ``--target`` option clashes with several other command like flags and config files like ``--user`` and distutils setup. Ideally we should handle this far more gracefully. See also #3826, #4106, #562, #4139 </issue> <code> [start of src/pip/_internal/utils/setuptools_build.py] 1 import sys 2 from typing import List, Optional, Sequence 3 4 # Shim to wrap setup.py invocation with setuptools 5 # 6 # We set sys.argv[0] to the path to the underlying setup.py file so 7 # setuptools / distutils don't take the path to the setup.py to be "-c" when 8 # invoking via the shim. This avoids e.g. the following manifest_maker 9 # warning: "warning: manifest_maker: standard file '-c' not found". 10 _SETUPTOOLS_SHIM = ( 11 "import sys, setuptools, tokenize; sys.argv[0] = {0!r}; __file__={0!r};" 12 "f=getattr(tokenize, 'open', open)(__file__);" 13 "code=f.read().replace('\\r\\n', '\\n');" 14 "f.close();" 15 "exec(compile(code, __file__, 'exec'))" 16 ) 17 18 19 def make_setuptools_shim_args( 20 setup_py_path, # type: str 21 global_options=None, # type: Sequence[str] 22 no_user_config=False, # type: bool 23 unbuffered_output=False # type: bool 24 ): 25 # type: (...) -> List[str] 26 """ 27 Get setuptools command arguments with shim wrapped setup file invocation. 28 29 :param setup_py_path: The path to setup.py to be wrapped. 30 :param global_options: Additional global options. 31 :param no_user_config: If True, disables personal user configuration. 32 :param unbuffered_output: If True, adds the unbuffered switch to the 33 argument list. 34 """ 35 args = [sys.executable] 36 if unbuffered_output: 37 args += ["-u"] 38 args += ["-c", _SETUPTOOLS_SHIM.format(setup_py_path)] 39 if global_options: 40 args += global_options 41 if no_user_config: 42 args += ["--no-user-cfg"] 43 return args 44 45 46 def make_setuptools_bdist_wheel_args( 47 setup_py_path, # type: str 48 global_options, # type: Sequence[str] 49 build_options, # type: Sequence[str] 50 destination_dir, # type: str 51 ): 52 # type: (...) -> List[str] 53 # NOTE: Eventually, we'd want to also -S to the flags here, when we're 54 # isolating. Currently, it breaks Python in virtualenvs, because it 55 # relies on site.py to find parts of the standard library outside the 56 # virtualenv. 57 args = make_setuptools_shim_args( 58 setup_py_path, 59 global_options=global_options, 60 unbuffered_output=True 61 ) 62 args += ["bdist_wheel", "-d", destination_dir] 63 args += build_options 64 return args 65 66 67 def make_setuptools_clean_args( 68 setup_py_path, # type: str 69 global_options, # type: Sequence[str] 70 ): 71 # type: (...) -> List[str] 72 args = make_setuptools_shim_args( 73 setup_py_path, 74 global_options=global_options, 75 unbuffered_output=True 76 ) 77 args += ["clean", "--all"] 78 return args 79 80 81 def make_setuptools_develop_args( 82 setup_py_path, # type: str 83 global_options, # type: Sequence[str] 84 install_options, # type: Sequence[str] 85 no_user_config, # type: bool 86 prefix, # type: Optional[str] 87 home, # type: Optional[str] 88 use_user_site, # type: bool 89 ): 90 # type: (...) -> List[str] 91 assert not (use_user_site and prefix) 92 93 args = make_setuptools_shim_args( 94 setup_py_path, 95 global_options=global_options, 96 no_user_config=no_user_config, 97 ) 98 99 args += ["develop", "--no-deps"] 100 101 args += install_options 102 103 if prefix: 104 args += ["--prefix", prefix] 105 if home is not None: 106 args += ["--home", home] 107 108 if use_user_site: 109 args += ["--user", "--prefix="] 110 111 return args 112 113 114 def make_setuptools_egg_info_args( 115 setup_py_path, # type: str 116 egg_info_dir, # type: Optional[str] 117 no_user_config, # type: bool 118 ): 119 # type: (...) -> List[str] 120 args = make_setuptools_shim_args( 121 setup_py_path, no_user_config=no_user_config 122 ) 123 124 args += ["egg_info"] 125 126 if egg_info_dir: 127 args += ["--egg-base", egg_info_dir] 128 129 return args 130 131 132 def make_setuptools_install_args( 133 setup_py_path, # type: str 134 global_options, # type: Sequence[str] 135 install_options, # type: Sequence[str] 136 record_filename, # type: str 137 root, # type: Optional[str] 138 prefix, # type: Optional[str] 139 header_dir, # type: Optional[str] 140 home, # type: Optional[str] 141 use_user_site, # type: bool 142 no_user_config, # type: bool 143 pycompile # type: bool 144 ): 145 # type: (...) -> List[str] 146 assert not (use_user_site and prefix) 147 assert not (use_user_site and root) 148 149 args = make_setuptools_shim_args( 150 setup_py_path, 151 global_options=global_options, 152 no_user_config=no_user_config, 153 unbuffered_output=True 154 ) 155 args += ["install", "--record", record_filename] 156 args += ["--single-version-externally-managed"] 157 158 if root is not None: 159 args += ["--root", root] 160 if prefix is not None: 161 args += ["--prefix", prefix] 162 if home is not None: 163 args += ["--home", home] 164 if use_user_site: 165 args += ["--user", "--prefix="] 166 167 if pycompile: 168 args += ["--compile"] 169 else: 170 args += ["--no-compile"] 171 172 if header_dir: 173 args += ["--install-headers", header_dir] 174 175 args += install_options 176 177 return args 178 [end of src/pip/_internal/utils/setuptools_build.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pip/_internal/utils/setuptools_build.py b/src/pip/_internal/utils/setuptools_build.py --- a/src/pip/_internal/utils/setuptools_build.py +++ b/src/pip/_internal/utils/setuptools_build.py @@ -103,7 +103,7 @@ if prefix: args += ["--prefix", prefix] if home is not None: - args += ["--home", home] + args += ["--install-dir", home] if use_user_site: args += ["--user", "--prefix="]
{"golden_diff": "diff --git a/src/pip/_internal/utils/setuptools_build.py b/src/pip/_internal/utils/setuptools_build.py\n--- a/src/pip/_internal/utils/setuptools_build.py\n+++ b/src/pip/_internal/utils/setuptools_build.py\n@@ -103,7 +103,7 @@\n if prefix:\n args += [\"--prefix\", prefix]\n if home is not None:\n- args += [\"--home\", home]\n+ args += [\"--install-dir\", home]\n \n if use_user_site:\n args += [\"--user\", \"--prefix=\"]\n", "issue": "The --target option clashes with other command line flags and config files\nThe ``--target`` option clashes with several other command like flags and config files like ``--user`` and distutils setup. Ideally we should handle this far more gracefully.\r\n\r\nSee also #3826, #4106, #562, #4139\n", "before_files": [{"content": "import sys\nfrom typing import List, Optional, Sequence\n\n# Shim to wrap setup.py invocation with setuptools\n#\n# We set sys.argv[0] to the path to the underlying setup.py file so\n# setuptools / distutils don't take the path to the setup.py to be \"-c\" when\n# invoking via the shim. This avoids e.g. the following manifest_maker\n# warning: \"warning: manifest_maker: standard file '-c' not found\".\n_SETUPTOOLS_SHIM = (\n \"import sys, setuptools, tokenize; sys.argv[0] = {0!r}; __file__={0!r};\"\n \"f=getattr(tokenize, 'open', open)(__file__);\"\n \"code=f.read().replace('\\\\r\\\\n', '\\\\n');\"\n \"f.close();\"\n \"exec(compile(code, __file__, 'exec'))\"\n)\n\n\ndef make_setuptools_shim_args(\n setup_py_path, # type: str\n global_options=None, # type: Sequence[str]\n no_user_config=False, # type: bool\n unbuffered_output=False # type: bool\n):\n # type: (...) -> List[str]\n \"\"\"\n Get setuptools command arguments with shim wrapped setup file invocation.\n\n :param setup_py_path: The path to setup.py to be wrapped.\n :param global_options: Additional global options.\n :param no_user_config: If True, disables personal user configuration.\n :param unbuffered_output: If True, adds the unbuffered switch to the\n argument list.\n \"\"\"\n args = [sys.executable]\n if unbuffered_output:\n args += [\"-u\"]\n args += [\"-c\", _SETUPTOOLS_SHIM.format(setup_py_path)]\n if global_options:\n args += global_options\n if no_user_config:\n args += [\"--no-user-cfg\"]\n return args\n\n\ndef make_setuptools_bdist_wheel_args(\n setup_py_path, # type: str\n global_options, # type: Sequence[str]\n build_options, # type: Sequence[str]\n destination_dir, # type: str\n):\n # type: (...) -> List[str]\n # NOTE: Eventually, we'd want to also -S to the flags here, when we're\n # isolating. Currently, it breaks Python in virtualenvs, because it\n # relies on site.py to find parts of the standard library outside the\n # virtualenv.\n args = make_setuptools_shim_args(\n setup_py_path,\n global_options=global_options,\n unbuffered_output=True\n )\n args += [\"bdist_wheel\", \"-d\", destination_dir]\n args += build_options\n return args\n\n\ndef make_setuptools_clean_args(\n setup_py_path, # type: str\n global_options, # type: Sequence[str]\n):\n # type: (...) -> List[str]\n args = make_setuptools_shim_args(\n setup_py_path,\n global_options=global_options,\n unbuffered_output=True\n )\n args += [\"clean\", \"--all\"]\n return args\n\n\ndef make_setuptools_develop_args(\n setup_py_path, # type: str\n global_options, # type: Sequence[str]\n install_options, # type: Sequence[str]\n no_user_config, # type: bool\n prefix, # type: Optional[str]\n home, # type: Optional[str]\n use_user_site, # type: bool\n):\n # type: (...) -> List[str]\n assert not (use_user_site and prefix)\n\n args = make_setuptools_shim_args(\n setup_py_path,\n global_options=global_options,\n no_user_config=no_user_config,\n )\n\n args += [\"develop\", \"--no-deps\"]\n\n args += install_options\n\n if prefix:\n args += [\"--prefix\", prefix]\n if home is not None:\n args += [\"--home\", home]\n\n if use_user_site:\n args += [\"--user\", \"--prefix=\"]\n\n return args\n\n\ndef make_setuptools_egg_info_args(\n setup_py_path, # type: str\n egg_info_dir, # type: Optional[str]\n no_user_config, # type: bool\n):\n # type: (...) -> List[str]\n args = make_setuptools_shim_args(\n setup_py_path, no_user_config=no_user_config\n )\n\n args += [\"egg_info\"]\n\n if egg_info_dir:\n args += [\"--egg-base\", egg_info_dir]\n\n return args\n\n\ndef make_setuptools_install_args(\n setup_py_path, # type: str\n global_options, # type: Sequence[str]\n install_options, # type: Sequence[str]\n record_filename, # type: str\n root, # type: Optional[str]\n prefix, # type: Optional[str]\n header_dir, # type: Optional[str]\n home, # type: Optional[str]\n use_user_site, # type: bool\n no_user_config, # type: bool\n pycompile # type: bool\n):\n # type: (...) -> List[str]\n assert not (use_user_site and prefix)\n assert not (use_user_site and root)\n\n args = make_setuptools_shim_args(\n setup_py_path,\n global_options=global_options,\n no_user_config=no_user_config,\n unbuffered_output=True\n )\n args += [\"install\", \"--record\", record_filename]\n args += [\"--single-version-externally-managed\"]\n\n if root is not None:\n args += [\"--root\", root]\n if prefix is not None:\n args += [\"--prefix\", prefix]\n if home is not None:\n args += [\"--home\", home]\n if use_user_site:\n args += [\"--user\", \"--prefix=\"]\n\n if pycompile:\n args += [\"--compile\"]\n else:\n args += [\"--no-compile\"]\n\n if header_dir:\n args += [\"--install-headers\", header_dir]\n\n args += install_options\n\n return args\n", "path": "src/pip/_internal/utils/setuptools_build.py"}]}
2,384
125
gh_patches_debug_9891
rasdani/github-patches
git_diff
spack__spack-4489
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> metis 4.0.3 doesn't install on macOS 10.11.6 with appleclang 7.0.3 A gist of the installation log is at https://gist.github.com/goxberry/b9ca39f4d7c5cd6b0905aaaa42f53746 If I had to guess, the error at the end of the log re: the RPATH option `-rpath=$STUFF` is the problem; IIRC, that syntax works on GCC, but not with LLVM/clang. Replacing `-rpath=` with `-rpath,` (that is, replace the equals sign with a comma) should work for both GCC and LLVM/clang compilers. </issue> <code> [start of var/spack/repos/builtin/packages/metis/package.py] 1 ############################################################################## 2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. 3 # Produced at the Lawrence Livermore National Laboratory. 4 # 5 # This file is part of Spack. 6 # Created by Todd Gamblin, [email protected], All rights reserved. 7 # LLNL-CODE-647188 8 # 9 # For details, see https://github.com/llnl/spack 10 # Please also see the LICENSE file for our notice and the LGPL. 11 # 12 # This program is free software; you can redistribute it and/or modify 13 # it under the terms of the GNU Lesser General Public License (as 14 # published by the Free Software Foundation) version 2.1, February 1999. 15 # 16 # This program is distributed in the hope that it will be useful, but 17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF 18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and 19 # conditions of the GNU Lesser General Public License for more details. 20 # 21 # You should have received a copy of the GNU Lesser General Public 22 # License along with this program; if not, write to the Free Software 23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 ############################################################################## 25 26 from spack import * 27 import glob 28 import sys 29 import os 30 31 32 class Metis(Package): 33 """METIS is a set of serial programs for partitioning graphs, partitioning 34 finite element meshes, and producing fill reducing orderings for sparse 35 matrices. The algorithms implemented in METIS are based on the 36 multilevel recursive-bisection, multilevel k-way, and multi-constraint 37 partitioning schemes.""" 38 39 homepage = "http://glaros.dtc.umn.edu/gkhome/metis/metis/overview" 40 url = "http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis/metis-5.1.0.tar.gz" 41 list_url = "http://glaros.dtc.umn.edu/gkhome/fsroot/sw/metis/OLD" 42 43 version('5.1.0', '5465e67079419a69e0116de24fce58fe') 44 version('5.0.2', 'acb521a4e8c2e6dd559a7f9abd0468c5') 45 version('4.0.3', 'd3848b454532ef18dc83e4fb160d1e10') 46 47 variant('shared', default=True, description='Enables the build of shared libraries.') 48 variant('debug', default=False, description='Builds the library in debug mode.') 49 variant('gdb', default=False, description='Enables gdb support.') 50 51 variant('int64', default=False, description='Sets the bit width of METIS\'s index type to 64.') 52 variant('real64', default=False, description='Sets the bit width of METIS\'s real type to 64.') 53 54 depends_on('[email protected]:', when='@5:', type='build') 55 56 patch('install_gklib_defs_rename.patch', when='@5:') 57 58 def url_for_version(self, version): 59 url = "http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis" 60 if version < Version('4.0.3'): 61 url += "/OLD" 62 url += "/metis-{0}.tar.gz".format(version) 63 return url 64 65 @when('@5:') 66 def patch(self): 67 source_path = self.stage.source_path 68 metis_header = FileFilter(join_path(source_path, 'include', 'metis.h')) 69 70 metis_header.filter( 71 r'(\b)(IDXTYPEWIDTH )(\d+)(\b)', 72 r'\1\2{0}\4'.format('64' if '+int64' in self.spec else '32'), 73 ) 74 metis_header.filter( 75 r'(\b)(REALTYPEWIDTH )(\d+)(\b)', 76 r'\1\2{0}\4'.format('64' if '+real64' in self.spec else '32'), 77 ) 78 79 # Make clang 7.3 happy. 80 # Prevents "ld: section __DATA/__thread_bss extends beyond end of file" 81 # See upstream LLVM issue https://llvm.org/bugs/show_bug.cgi?id=27059 82 # and https://github.com/Homebrew/homebrew-science/blob/master/metis.rb 83 if self.spec.satisfies('%[email protected]'): 84 filter_file('#define MAX_JBUFS 128', '#define MAX_JBUFS 24', 85 join_path(source_path, 'GKlib', 'error.c')) 86 87 @when('@:4') 88 def install(self, spec, prefix): 89 # Process library spec and options 90 if any('+{0}'.format(v) in spec for v in ['gdb', 'int64', 'real64']): 91 raise InstallError('METIS@:4 does not support the following ' 92 'variants: gdb, int64, real64.') 93 94 options = ['COPTIONS=-fPIC'] 95 if '+debug' in spec: 96 options.append('OPTFLAGS=-g -O0') 97 make(*options) 98 99 # Compile and install library files 100 ccompile = Executable(self.compiler.cc) 101 102 mkdir(prefix.bin) 103 binfiles = ('pmetis', 'kmetis', 'oemetis', 'onmetis', 'partnmesh', 104 'partdmesh', 'mesh2nodal', 'mesh2dual', 'graphchk') 105 for binfile in binfiles: 106 install(binfile, prefix.bin) 107 108 mkdir(prefix.lib) 109 install('libmetis.a', prefix.lib) 110 111 mkdir(prefix.include) 112 for h in glob.glob(join_path('Lib', '*.h')): 113 install(h, prefix.include) 114 115 mkdir(prefix.share) 116 sharefiles = (('Graphs', '4elt.graph'), ('Graphs', 'metis.mesh'), 117 ('Graphs', 'test.mgraph')) 118 for sharefile in tuple(join_path(*sf) for sf in sharefiles): 119 install(sharefile, prefix.share) 120 121 if '+shared' in spec: 122 shared_flags = ['-fPIC', '-shared'] 123 if sys.platform == 'darwin': 124 shared_suffix = 'dylib' 125 shared_flags.extend(['-Wl,-all_load', 'libmetis.a']) 126 else: 127 shared_suffix = 'so' 128 shared_flags.extend(['-Wl,-whole-archive', 'libmetis.a', 129 '-Wl,-no-whole-archive']) 130 131 shared_out = '%s/libmetis.%s' % (prefix.lib, shared_suffix) 132 shared_flags.extend(['-o', shared_out]) 133 134 ccompile(*shared_flags) 135 136 # Set up and run tests on installation 137 ccompile('-I%s' % prefix.include, '-L%s' % prefix.lib, 138 '-Wl,-rpath=%s' % (prefix.lib if '+shared' in spec else ''), 139 join_path('Programs', 'io.o'), join_path('Test', 'mtest.c'), 140 '-o', '%s/mtest' % prefix.bin, '-lmetis', '-lm') 141 142 if self.run_tests: 143 test_bin = lambda testname: join_path(prefix.bin, testname) 144 test_graph = lambda graphname: join_path(prefix.share, graphname) 145 146 graph = test_graph('4elt.graph') 147 os.system('%s %s' % (test_bin('mtest'), graph)) 148 os.system('%s %s 40' % (test_bin('kmetis'), graph)) 149 os.system('%s %s' % (test_bin('onmetis'), graph)) 150 graph = test_graph('test.mgraph') 151 os.system('%s %s 2' % (test_bin('pmetis'), graph)) 152 os.system('%s %s 2' % (test_bin('kmetis'), graph)) 153 os.system('%s %s 5' % (test_bin('kmetis'), graph)) 154 graph = test_graph('metis.mesh') 155 os.system('%s %s 10' % (test_bin('partnmesh'), graph)) 156 os.system('%s %s 10' % (test_bin('partdmesh'), graph)) 157 os.system('%s %s' % (test_bin('mesh2dual'), graph)) 158 159 # FIXME: The following code should replace the testing code in the 160 # block above since it causes installs to fail when one or more of 161 # the Metis tests fail, but it currently doesn't work because the 162 # 'mtest', 'onmetis', and 'partnmesh' tests return error codes that 163 # trigger false positives for failure. 164 """ 165 Executable(test_bin('mtest'))(test_graph('4elt.graph')) 166 Executable(test_bin('kmetis'))(test_graph('4elt.graph'), '40') 167 Executable(test_bin('onmetis'))(test_graph('4elt.graph')) 168 169 Executable(test_bin('pmetis'))(test_graph('test.mgraph'), '2') 170 Executable(test_bin('kmetis'))(test_graph('test.mgraph'), '2') 171 Executable(test_bin('kmetis'))(test_graph('test.mgraph'), '5') 172 173 Executable(test_bin('partnmesh'))(test_graph('metis.mesh'), '10') 174 Executable(test_bin('partdmesh'))(test_graph('metis.mesh'), '10') 175 Executable(test_bin('mesh2dual'))(test_graph('metis.mesh')) 176 """ 177 178 @when('@5:') 179 def install(self, spec, prefix): 180 source_directory = self.stage.source_path 181 build_directory = join_path(source_directory, 'build') 182 183 options = std_cmake_args[:] 184 options.append('-DGKLIB_PATH:PATH=%s/GKlib' % source_directory) 185 options.append('-DCMAKE_INSTALL_NAME_DIR:PATH=%s/lib' % prefix) 186 187 if '+shared' in spec: 188 options.append('-DSHARED:BOOL=ON') 189 else: 190 # Remove all RPATH options 191 # (RPATHxxx options somehow trigger cmake to link dynamically) 192 rpath_options = [] 193 for o in options: 194 if o.find('RPATH') >= 0: 195 rpath_options.append(o) 196 for o in rpath_options: 197 options.remove(o) 198 if '+debug' in spec: 199 options.extend(['-DDEBUG:BOOL=ON', 200 '-DCMAKE_BUILD_TYPE:STRING=Debug']) 201 if '+gdb' in spec: 202 options.append('-DGDB:BOOL=ON') 203 204 with working_dir(build_directory, create=True): 205 cmake(source_directory, *options) 206 make() 207 make('install') 208 209 # install GKlib headers, which will be needed for ParMETIS 210 GKlib_dist = join_path(prefix.include, 'GKlib') 211 mkdirp(GKlib_dist) 212 hfiles = glob.glob(join_path(source_directory, 'GKlib', '*.h')) 213 for hfile in hfiles: 214 install(hfile, GKlib_dist) 215 216 if self.run_tests: 217 # FIXME: On some systems, the installed binaries for METIS cannot 218 # be executed without first being read. 219 ls = which('ls') 220 ls('-a', '-l', prefix.bin) 221 222 for f in ['4elt', 'copter2', 'mdual']: 223 graph = join_path(source_directory, 'graphs', '%s.graph' % f) 224 Executable(join_path(prefix.bin, 'graphchk'))(graph) 225 Executable(join_path(prefix.bin, 'gpmetis'))(graph, '2') 226 Executable(join_path(prefix.bin, 'ndmetis'))(graph) 227 228 graph = join_path(source_directory, 'graphs', 'test.mgraph') 229 Executable(join_path(prefix.bin, 'gpmetis'))(graph, '2') 230 graph = join_path(source_directory, 'graphs', 'metis.mesh') 231 Executable(join_path(prefix.bin, 'mpmetis'))(graph, '2') 232 [end of var/spack/repos/builtin/packages/metis/package.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/var/spack/repos/builtin/packages/metis/package.py b/var/spack/repos/builtin/packages/metis/package.py --- a/var/spack/repos/builtin/packages/metis/package.py +++ b/var/spack/repos/builtin/packages/metis/package.py @@ -135,7 +135,8 @@ # Set up and run tests on installation ccompile('-I%s' % prefix.include, '-L%s' % prefix.lib, - '-Wl,-rpath=%s' % (prefix.lib if '+shared' in spec else ''), + self.compiler.cc_rpath_arg + + '%s' % (prefix.lib if '+shared' in spec else ''), join_path('Programs', 'io.o'), join_path('Test', 'mtest.c'), '-o', '%s/mtest' % prefix.bin, '-lmetis', '-lm')
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/metis/package.py b/var/spack/repos/builtin/packages/metis/package.py\n--- a/var/spack/repos/builtin/packages/metis/package.py\n+++ b/var/spack/repos/builtin/packages/metis/package.py\n@@ -135,7 +135,8 @@\n \n # Set up and run tests on installation\n ccompile('-I%s' % prefix.include, '-L%s' % prefix.lib,\n- '-Wl,-rpath=%s' % (prefix.lib if '+shared' in spec else ''),\n+ self.compiler.cc_rpath_arg +\n+ '%s' % (prefix.lib if '+shared' in spec else ''),\n join_path('Programs', 'io.o'), join_path('Test', 'mtest.c'),\n '-o', '%s/mtest' % prefix.bin, '-lmetis', '-lm')\n", "issue": "metis 4.0.3 doesn't install on macOS 10.11.6 with appleclang 7.0.3\nA gist of the installation log is at https://gist.github.com/goxberry/b9ca39f4d7c5cd6b0905aaaa42f53746\r\n\r\nIf I had to guess, the error at the end of the log re: the RPATH option `-rpath=$STUFF` is the problem; IIRC, that syntax works on GCC, but not with LLVM/clang. Replacing `-rpath=` with `-rpath,` (that is, replace the equals sign with a comma) should work for both GCC and LLVM/clang compilers.\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\n\nfrom spack import *\nimport glob\nimport sys\nimport os\n\n\nclass Metis(Package):\n \"\"\"METIS is a set of serial programs for partitioning graphs, partitioning\n finite element meshes, and producing fill reducing orderings for sparse\n matrices. The algorithms implemented in METIS are based on the\n multilevel recursive-bisection, multilevel k-way, and multi-constraint\n partitioning schemes.\"\"\"\n\n homepage = \"http://glaros.dtc.umn.edu/gkhome/metis/metis/overview\"\n url = \"http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis/metis-5.1.0.tar.gz\"\n list_url = \"http://glaros.dtc.umn.edu/gkhome/fsroot/sw/metis/OLD\"\n\n version('5.1.0', '5465e67079419a69e0116de24fce58fe')\n version('5.0.2', 'acb521a4e8c2e6dd559a7f9abd0468c5')\n version('4.0.3', 'd3848b454532ef18dc83e4fb160d1e10')\n\n variant('shared', default=True, description='Enables the build of shared libraries.')\n variant('debug', default=False, description='Builds the library in debug mode.')\n variant('gdb', default=False, description='Enables gdb support.')\n\n variant('int64', default=False, description='Sets the bit width of METIS\\'s index type to 64.')\n variant('real64', default=False, description='Sets the bit width of METIS\\'s real type to 64.')\n\n depends_on('[email protected]:', when='@5:', type='build')\n\n patch('install_gklib_defs_rename.patch', when='@5:')\n\n def url_for_version(self, version):\n url = \"http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis\"\n if version < Version('4.0.3'):\n url += \"/OLD\"\n url += \"/metis-{0}.tar.gz\".format(version)\n return url\n\n @when('@5:')\n def patch(self):\n source_path = self.stage.source_path\n metis_header = FileFilter(join_path(source_path, 'include', 'metis.h'))\n\n metis_header.filter(\n r'(\\b)(IDXTYPEWIDTH )(\\d+)(\\b)',\n r'\\1\\2{0}\\4'.format('64' if '+int64' in self.spec else '32'),\n )\n metis_header.filter(\n r'(\\b)(REALTYPEWIDTH )(\\d+)(\\b)',\n r'\\1\\2{0}\\4'.format('64' if '+real64' in self.spec else '32'),\n )\n\n # Make clang 7.3 happy.\n # Prevents \"ld: section __DATA/__thread_bss extends beyond end of file\"\n # See upstream LLVM issue https://llvm.org/bugs/show_bug.cgi?id=27059\n # and https://github.com/Homebrew/homebrew-science/blob/master/metis.rb\n if self.spec.satisfies('%[email protected]'):\n filter_file('#define MAX_JBUFS 128', '#define MAX_JBUFS 24',\n join_path(source_path, 'GKlib', 'error.c'))\n\n @when('@:4')\n def install(self, spec, prefix):\n # Process library spec and options\n if any('+{0}'.format(v) in spec for v in ['gdb', 'int64', 'real64']):\n raise InstallError('METIS@:4 does not support the following '\n 'variants: gdb, int64, real64.')\n\n options = ['COPTIONS=-fPIC']\n if '+debug' in spec:\n options.append('OPTFLAGS=-g -O0')\n make(*options)\n\n # Compile and install library files\n ccompile = Executable(self.compiler.cc)\n\n mkdir(prefix.bin)\n binfiles = ('pmetis', 'kmetis', 'oemetis', 'onmetis', 'partnmesh',\n 'partdmesh', 'mesh2nodal', 'mesh2dual', 'graphchk')\n for binfile in binfiles:\n install(binfile, prefix.bin)\n\n mkdir(prefix.lib)\n install('libmetis.a', prefix.lib)\n\n mkdir(prefix.include)\n for h in glob.glob(join_path('Lib', '*.h')):\n install(h, prefix.include)\n\n mkdir(prefix.share)\n sharefiles = (('Graphs', '4elt.graph'), ('Graphs', 'metis.mesh'),\n ('Graphs', 'test.mgraph'))\n for sharefile in tuple(join_path(*sf) for sf in sharefiles):\n install(sharefile, prefix.share)\n\n if '+shared' in spec:\n shared_flags = ['-fPIC', '-shared']\n if sys.platform == 'darwin':\n shared_suffix = 'dylib'\n shared_flags.extend(['-Wl,-all_load', 'libmetis.a'])\n else:\n shared_suffix = 'so'\n shared_flags.extend(['-Wl,-whole-archive', 'libmetis.a',\n '-Wl,-no-whole-archive'])\n\n shared_out = '%s/libmetis.%s' % (prefix.lib, shared_suffix)\n shared_flags.extend(['-o', shared_out])\n\n ccompile(*shared_flags)\n\n # Set up and run tests on installation\n ccompile('-I%s' % prefix.include, '-L%s' % prefix.lib,\n '-Wl,-rpath=%s' % (prefix.lib if '+shared' in spec else ''),\n join_path('Programs', 'io.o'), join_path('Test', 'mtest.c'),\n '-o', '%s/mtest' % prefix.bin, '-lmetis', '-lm')\n\n if self.run_tests:\n test_bin = lambda testname: join_path(prefix.bin, testname)\n test_graph = lambda graphname: join_path(prefix.share, graphname)\n\n graph = test_graph('4elt.graph')\n os.system('%s %s' % (test_bin('mtest'), graph))\n os.system('%s %s 40' % (test_bin('kmetis'), graph))\n os.system('%s %s' % (test_bin('onmetis'), graph))\n graph = test_graph('test.mgraph')\n os.system('%s %s 2' % (test_bin('pmetis'), graph))\n os.system('%s %s 2' % (test_bin('kmetis'), graph))\n os.system('%s %s 5' % (test_bin('kmetis'), graph))\n graph = test_graph('metis.mesh')\n os.system('%s %s 10' % (test_bin('partnmesh'), graph))\n os.system('%s %s 10' % (test_bin('partdmesh'), graph))\n os.system('%s %s' % (test_bin('mesh2dual'), graph))\n\n # FIXME: The following code should replace the testing code in the\n # block above since it causes installs to fail when one or more of\n # the Metis tests fail, but it currently doesn't work because the\n # 'mtest', 'onmetis', and 'partnmesh' tests return error codes that\n # trigger false positives for failure.\n \"\"\"\n Executable(test_bin('mtest'))(test_graph('4elt.graph'))\n Executable(test_bin('kmetis'))(test_graph('4elt.graph'), '40')\n Executable(test_bin('onmetis'))(test_graph('4elt.graph'))\n\n Executable(test_bin('pmetis'))(test_graph('test.mgraph'), '2')\n Executable(test_bin('kmetis'))(test_graph('test.mgraph'), '2')\n Executable(test_bin('kmetis'))(test_graph('test.mgraph'), '5')\n\n Executable(test_bin('partnmesh'))(test_graph('metis.mesh'), '10')\n Executable(test_bin('partdmesh'))(test_graph('metis.mesh'), '10')\n Executable(test_bin('mesh2dual'))(test_graph('metis.mesh'))\n \"\"\"\n\n @when('@5:')\n def install(self, spec, prefix):\n source_directory = self.stage.source_path\n build_directory = join_path(source_directory, 'build')\n\n options = std_cmake_args[:]\n options.append('-DGKLIB_PATH:PATH=%s/GKlib' % source_directory)\n options.append('-DCMAKE_INSTALL_NAME_DIR:PATH=%s/lib' % prefix)\n\n if '+shared' in spec:\n options.append('-DSHARED:BOOL=ON')\n else:\n # Remove all RPATH options\n # (RPATHxxx options somehow trigger cmake to link dynamically)\n rpath_options = []\n for o in options:\n if o.find('RPATH') >= 0:\n rpath_options.append(o)\n for o in rpath_options:\n options.remove(o)\n if '+debug' in spec:\n options.extend(['-DDEBUG:BOOL=ON',\n '-DCMAKE_BUILD_TYPE:STRING=Debug'])\n if '+gdb' in spec:\n options.append('-DGDB:BOOL=ON')\n\n with working_dir(build_directory, create=True):\n cmake(source_directory, *options)\n make()\n make('install')\n\n # install GKlib headers, which will be needed for ParMETIS\n GKlib_dist = join_path(prefix.include, 'GKlib')\n mkdirp(GKlib_dist)\n hfiles = glob.glob(join_path(source_directory, 'GKlib', '*.h'))\n for hfile in hfiles:\n install(hfile, GKlib_dist)\n\n if self.run_tests:\n # FIXME: On some systems, the installed binaries for METIS cannot\n # be executed without first being read.\n ls = which('ls')\n ls('-a', '-l', prefix.bin)\n\n for f in ['4elt', 'copter2', 'mdual']:\n graph = join_path(source_directory, 'graphs', '%s.graph' % f)\n Executable(join_path(prefix.bin, 'graphchk'))(graph)\n Executable(join_path(prefix.bin, 'gpmetis'))(graph, '2')\n Executable(join_path(prefix.bin, 'ndmetis'))(graph)\n\n graph = join_path(source_directory, 'graphs', 'test.mgraph')\n Executable(join_path(prefix.bin, 'gpmetis'))(graph, '2')\n graph = join_path(source_directory, 'graphs', 'metis.mesh')\n Executable(join_path(prefix.bin, 'mpmetis'))(graph, '2')\n", "path": "var/spack/repos/builtin/packages/metis/package.py"}]}
3,947
190
gh_patches_debug_43914
rasdani/github-patches
git_diff
google__TensorNetwork-580
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ncon should take a BaseBackend object Since we are now supporting passing `BaseBackend` to `Node` constructors, we should also allow it in `ncon` </issue> <code> [start of tensornetwork/ncon_interface.py] 1 # Copyright 2019 The TensorNetwork Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """NCON interface to TensorNetwork.""" 15 16 import warnings 17 from typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict 18 from tensornetwork import network_components 19 from tensornetwork.backend_contextmanager import get_default_backend 20 from tensornetwork.backends import backend_factory 21 Tensor = Any 22 23 24 def ncon( 25 tensors: Sequence[Union[network_components.BaseNode, Tensor]], 26 network_structure: Sequence[Sequence], 27 con_order: Optional[Sequence] = None, 28 out_order: Optional[Sequence] = None, 29 backend: Optional[Text] = None 30 ) -> Union[network_components.BaseNode, Tensor]: 31 r"""Contracts a list of tensors or nodes according to a tensor network 32 specification. 33 34 The network is provided as a list of lists, one for each 35 tensor, specifying labels for the edges connected to that tensor. 36 37 If a contraction order `con_order` and an output order `out_order` 38 are both provided, the edge labels can be anything. 39 Otherwise (`con_order == None or out_order == None`), the edge labels 40 must be nonzero integers and edges will be contracted in ascending order. 41 Negative integers denote the (dangling) indices of the output tensor, 42 which will be in descending order, e.g. `[-1,-2,-3,...]`. 43 44 For example, matrix multiplication: 45 46 .. code-block:: python 47 48 A = np.array([[1.0, 2.0], [3.0, 4.0]]) 49 B = np.array([[1.0, 1.0], [0.0, 1.0]]) 50 ncon([A,B], [(-1, 1), (1, -2)]) 51 52 Matrix trace: 53 54 .. code-block:: python 55 56 A = np.array([[1.0, 2.0], [3.0, 4.0]]) 57 ncon([A], [(1, 1)]) # 5.0 58 59 Note: 60 The reason `0` is not allowed as an edge label without manually 61 specifying the contraction order is to maintain compatibility with the 62 `original NCON implementation`_. However, the use of `0` in `con_order` 63 to denote outer products is not (currently) 64 supported in this implementation. 65 66 .. _original NCON implementation: 67 https://arxiv.org/abs/1402.0939 68 69 Args: 70 tensors: List of `Tensors` or `BaseNodes`. 71 network_structure: List of lists specifying the tensor network structure. 72 con_order: List of edge labels specifying the contraction order. 73 out_order: List of edge labels specifying the output order. 74 backend: String specifying the backend to use. Defaults to 75 `tensornetwork.backend_contextmanager.get_default_backend`. 76 77 Returns: 78 The result of the contraction. The result is returned as a `Node` 79 if all elements of `tensors` are `BaseNode` objects, else 80 it is returned as a `Tensor` object. 81 """ 82 if backend and (backend not in backend_factory._BACKENDS): 83 raise ValueError("Backend '{}' does not exist".format(backend)) 84 if backend is None: 85 backend = get_default_backend() 86 87 are_nodes = [isinstance(t, network_components.BaseNode) for t in tensors] 88 nodes = {t for t in tensors if isinstance(t, network_components.BaseNode)} 89 if not all([n.backend.name == backend for n in nodes]): 90 raise ValueError( 91 "Some nodes have backends different from '{}'".format(backend)) 92 93 _tensors = [] 94 for t in tensors: 95 if isinstance(t, network_components.BaseNode): 96 _tensors.append(t.tensor) 97 else: 98 _tensors.append(t) 99 100 nodes, con_edges, out_edges = ncon_network( 101 _tensors, 102 network_structure, 103 con_order=con_order, 104 out_order=out_order, 105 backend=backend) 106 107 nodes = set(nodes) # we don't need the ordering here 108 109 # Reverse the list so we can pop from the end: O(1). 110 con_edges = con_edges[::-1] 111 while con_edges: 112 nodes_to_contract = con_edges[-1].get_nodes() 113 edges_to_contract = network_components.get_shared_edges(*nodes_to_contract) 114 115 # Eat up all parallel edges that are adjacent in the ordering. 116 adjacent_parallel_edges = set() 117 for edge in reversed(con_edges): 118 if edge in edges_to_contract: 119 adjacent_parallel_edges.add(edge) 120 else: 121 break 122 con_edges = con_edges[:-len(adjacent_parallel_edges)] 123 124 # In an optimal ordering, all edges connecting a given pair of nodes are 125 # adjacent in con_order. If this is not the case, warn the user. 126 leftovers = edges_to_contract - adjacent_parallel_edges 127 if leftovers: 128 warnings.warn( 129 "Suboptimal ordering detected. Edges {} are not adjacent in the " 130 "contraction order to edges {}, connecting nodes {}. Deviating from " 131 "the specified ordering!".format( 132 list(map(str, leftovers)), 133 list(map(str, adjacent_parallel_edges)), 134 list(map(str, nodes_to_contract)))) 135 con_edges = [e for e in con_edges if e not in edges_to_contract] 136 137 if set(nodes_to_contract) == nodes: 138 # This contraction produces the final output, so order the edges 139 # here to avoid transposes in some cases. 140 contraction_output_order = out_edges 141 else: 142 contraction_output_order = None 143 144 nodes = nodes - set(nodes_to_contract) 145 nodes.add( 146 network_components.contract_between( 147 *nodes_to_contract, 148 name="con({},{})".format(*nodes_to_contract), 149 output_edge_order=contraction_output_order)) 150 151 # TODO: More efficient ordering of products based on out_edges 152 res_node = network_components.outer_product_final_nodes(nodes, out_edges) 153 if all(are_nodes): 154 return res_node 155 return res_node.tensor 156 157 158 def ncon_network( 159 tensors: Sequence[Tensor], 160 network_structure: Sequence[Sequence], 161 con_order: Optional[Sequence] = None, 162 out_order: Optional[Sequence] = None, 163 backend: Optional[Text] = None 164 ) -> Tuple[List[network_components.BaseNode], List[network_components.Edge], 165 List[network_components.Edge]]: 166 r"""Creates a network from a list of tensors according to `tensors`. 167 168 The network is provided as a list of lists, one for each 169 tensor, specifying labels for the edges connected to that tensor. 170 171 If a contraction order `con_order` and an output order `out_order` 172 are both provided, the edge labels can be anything. 173 Otherwise (`con_order == None or out_order == None`), the edge labels 174 must be integers and edges will be contracted in ascending order. 175 Negative integers denote the (dangling) indices of the output tensor, 176 which will be in descending order, e.g. `[-1,-2,-3,...]`. 177 178 This is used internally by `ncon()`. 179 180 Args: 181 tensors: List of `Tensor`s. 182 network_structure: List of lists specifying the tensor network. 183 con_order: List of edge labels specifying the contraction order. 184 out_order: List of edge labels specifying the output order. 185 backend: String specifying the backend to use. Defaults to the default 186 TensorNetwork backend. 187 188 Returns: 189 nodes: List of constructed nodes in the same order as given in `tensors`. 190 con_edges: List of internal `Edge` objects in contraction order. 191 out_edges: List of dangling `Edge` objects in output order. 192 """ 193 if len(tensors) != len(network_structure): 194 raise ValueError('len(tensors) != len(network_structure)') 195 196 nodes, edges = _build_network(tensors, network_structure, backend) 197 198 if con_order is None: 199 try: 200 con_order = sorted((k for k in edges if k >= 0)) 201 if con_order and con_order[0] == 0: 202 raise ValueError("'0' is not a valid edge label when the " 203 "contraction order is not specified separately.") 204 except TypeError: 205 raise ValueError("Non-integer edge label(s): {}".format( 206 list(edges.keys()))) 207 else: 208 if len(con_order) != len(set(con_order)): 209 raise ValueError("Duplicate labels in con_order: {}".format(con_order)) 210 211 if out_order is None: 212 try: 213 out_order = sorted((k for k in edges if k < 0), reverse=True) 214 except TypeError: 215 raise ValueError("Non-integer edge label(s): {}".format( 216 list(edges.keys()))) 217 else: 218 if len(out_order) != len(set(out_order)): 219 raise ValueError("Duplicate labels in out_order: {}".format(out_order)) 220 221 try: 222 con_edges = [edges[k] for k in con_order] 223 out_edges = [edges[k] for k in out_order] 224 except KeyError as err: 225 raise ValueError("Order contained an unknown edge label: {}".format( 226 err.args[0])) 227 228 if len(con_edges) + len(out_edges) != len(edges): 229 raise ValueError( 230 "Edges {} were not included in the contraction and output " 231 "ordering.".format( 232 list(set(edges.keys()) - set(con_order) - set(out_order)))) 233 234 for e in con_edges: 235 if e.is_dangling(): 236 raise ValueError( 237 "Contraction edge {} appears only once in the network.".format( 238 str(e))) 239 240 for e in out_edges: 241 if not e.is_dangling(): 242 raise ValueError( 243 "Output edge {} appears more than once in the network.".format( 244 str(e))) 245 246 return nodes, con_edges, out_edges 247 248 249 def _build_network( 250 tensors: Sequence[Tensor], network_structure: Sequence[Sequence], 251 backend: Text 252 ) -> Tuple[List[network_components.BaseNode], Dict[Any, 253 network_components.Edge]]: 254 nodes = [] 255 edges = {} 256 for i, (tensor, edge_lbls) in enumerate(zip(tensors, network_structure)): 257 if len(tensor.shape) != len(edge_lbls): 258 raise ValueError( 259 "Incorrect number of edge labels specified tensor {}".format(i)) 260 if isinstance(tensor, network_components.BaseNode): 261 node = tensor 262 else: 263 node = network_components.Node( 264 tensor, name="tensor_{}".format(i), backend=backend) 265 266 nodes.append(node) 267 268 for (axis_num, edge_lbl) in enumerate(edge_lbls): 269 if edge_lbl not in edges: 270 e = node[axis_num] 271 e.set_name(str(edge_lbl)) 272 edges[edge_lbl] = e 273 else: 274 # This will raise an error if the edges are not dangling. 275 e = network_components.connect( 276 edges[edge_lbl], node[axis_num], name=str(edge_lbl)) 277 edges[edge_lbl] = e 278 return nodes, edges 279 [end of tensornetwork/ncon_interface.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tensornetwork/ncon_interface.py b/tensornetwork/ncon_interface.py --- a/tensornetwork/ncon_interface.py +++ b/tensornetwork/ncon_interface.py @@ -18,6 +18,8 @@ from tensornetwork import network_components from tensornetwork.backend_contextmanager import get_default_backend from tensornetwork.backends import backend_factory +from tensornetwork.backends.base_backend import BaseBackend + Tensor = Any @@ -26,7 +28,7 @@ network_structure: Sequence[Sequence], con_order: Optional[Sequence] = None, out_order: Optional[Sequence] = None, - backend: Optional[Text] = None + backend: Optional[Union[Text, BaseBackend]] = None ) -> Union[network_components.BaseNode, Tensor]: r"""Contracts a list of tensors or nodes according to a tensor network specification. @@ -79,16 +81,18 @@ if all elements of `tensors` are `BaseNode` objects, else it is returned as a `Tensor` object. """ - if backend and (backend not in backend_factory._BACKENDS): - raise ValueError("Backend '{}' does not exist".format(backend)) if backend is None: backend = get_default_backend() + if isinstance(backend, BaseBackend): + backend_obj = backend + else: + backend_obj = backend_factory.get_backend(backend) are_nodes = [isinstance(t, network_components.BaseNode) for t in tensors] nodes = {t for t in tensors if isinstance(t, network_components.BaseNode)} - if not all([n.backend.name == backend for n in nodes]): - raise ValueError( - "Some nodes have backends different from '{}'".format(backend)) + if not all([n.backend.name == backend_obj.name for n in nodes]): + raise ValueError("Some nodes have backends different from '{}'".format( + backend_obj.name)) _tensors = [] for t in tensors: @@ -102,7 +106,7 @@ network_structure, con_order=con_order, out_order=out_order, - backend=backend) + backend=backend_obj) nodes = set(nodes) # we don't need the ordering here @@ -160,7 +164,7 @@ network_structure: Sequence[Sequence], con_order: Optional[Sequence] = None, out_order: Optional[Sequence] = None, - backend: Optional[Text] = None + backend: Optional[Union[Text, BaseBackend]] = None ) -> Tuple[List[network_components.BaseNode], List[network_components.Edge], List[network_components.Edge]]: r"""Creates a network from a list of tensors according to `tensors`. @@ -182,9 +186,8 @@ network_structure: List of lists specifying the tensor network. con_order: List of edge labels specifying the contraction order. out_order: List of edge labels specifying the output order. - backend: String specifying the backend to use. Defaults to the default - TensorNetwork backend. - + backend: String or BaseBackend object specifying the backend to use. + Defaults to the default TensorNetwork backend. Returns: nodes: List of constructed nodes in the same order as given in `tensors`. con_edges: List of internal `Edge` objects in contraction order. @@ -247,8 +250,9 @@ def _build_network( - tensors: Sequence[Tensor], network_structure: Sequence[Sequence], - backend: Text + tensors: Sequence[Tensor], + network_structure: Sequence[Sequence], + backend: Optional[Union[BaseBackend, Text]] = None, ) -> Tuple[List[network_components.BaseNode], Dict[Any, network_components.Edge]]: nodes = []
{"golden_diff": "diff --git a/tensornetwork/ncon_interface.py b/tensornetwork/ncon_interface.py\n--- a/tensornetwork/ncon_interface.py\n+++ b/tensornetwork/ncon_interface.py\n@@ -18,6 +18,8 @@\n from tensornetwork import network_components\n from tensornetwork.backend_contextmanager import get_default_backend\n from tensornetwork.backends import backend_factory\n+from tensornetwork.backends.base_backend import BaseBackend\n+\n Tensor = Any\n \n \n@@ -26,7 +28,7 @@\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None,\n- backend: Optional[Text] = None\n+ backend: Optional[Union[Text, BaseBackend]] = None\n ) -> Union[network_components.BaseNode, Tensor]:\n r\"\"\"Contracts a list of tensors or nodes according to a tensor network \n specification.\n@@ -79,16 +81,18 @@\n if all elements of `tensors` are `BaseNode` objects, else\n it is returned as a `Tensor` object.\n \"\"\"\n- if backend and (backend not in backend_factory._BACKENDS):\n- raise ValueError(\"Backend '{}' does not exist\".format(backend))\n if backend is None:\n backend = get_default_backend()\n+ if isinstance(backend, BaseBackend):\n+ backend_obj = backend\n+ else:\n+ backend_obj = backend_factory.get_backend(backend)\n \n are_nodes = [isinstance(t, network_components.BaseNode) for t in tensors]\n nodes = {t for t in tensors if isinstance(t, network_components.BaseNode)}\n- if not all([n.backend.name == backend for n in nodes]):\n- raise ValueError(\n- \"Some nodes have backends different from '{}'\".format(backend))\n+ if not all([n.backend.name == backend_obj.name for n in nodes]):\n+ raise ValueError(\"Some nodes have backends different from '{}'\".format(\n+ backend_obj.name))\n \n _tensors = []\n for t in tensors:\n@@ -102,7 +106,7 @@\n network_structure,\n con_order=con_order,\n out_order=out_order,\n- backend=backend)\n+ backend=backend_obj)\n \n nodes = set(nodes) # we don't need the ordering here\n \n@@ -160,7 +164,7 @@\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None,\n- backend: Optional[Text] = None\n+ backend: Optional[Union[Text, BaseBackend]] = None\n ) -> Tuple[List[network_components.BaseNode], List[network_components.Edge],\n List[network_components.Edge]]:\n r\"\"\"Creates a network from a list of tensors according to `tensors`.\n@@ -182,9 +186,8 @@\n network_structure: List of lists specifying the tensor network.\n con_order: List of edge labels specifying the contraction order.\n out_order: List of edge labels specifying the output order.\n- backend: String specifying the backend to use. Defaults to the default\n- TensorNetwork backend.\n-\n+ backend: String or BaseBackend object specifying the backend to use. \n+ Defaults to the default TensorNetwork backend.\n Returns:\n nodes: List of constructed nodes in the same order as given in `tensors`.\n con_edges: List of internal `Edge` objects in contraction order.\n@@ -247,8 +250,9 @@\n \n \n def _build_network(\n- tensors: Sequence[Tensor], network_structure: Sequence[Sequence],\n- backend: Text\n+ tensors: Sequence[Tensor],\n+ network_structure: Sequence[Sequence],\n+ backend: Optional[Union[BaseBackend, Text]] = None,\n ) -> Tuple[List[network_components.BaseNode], Dict[Any,\n network_components.Edge]]:\n nodes = []\n", "issue": "ncon should take a BaseBackend object\nSince we are now supporting passing `BaseBackend` to `Node` constructors, we should also\r\nallow it in `ncon`\n", "before_files": [{"content": "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"NCON interface to TensorNetwork.\"\"\"\n\nimport warnings\nfrom typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict\nfrom tensornetwork import network_components\nfrom tensornetwork.backend_contextmanager import get_default_backend\nfrom tensornetwork.backends import backend_factory\nTensor = Any\n\n\ndef ncon(\n tensors: Sequence[Union[network_components.BaseNode, Tensor]],\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None,\n backend: Optional[Text] = None\n) -> Union[network_components.BaseNode, Tensor]:\n r\"\"\"Contracts a list of tensors or nodes according to a tensor network \n specification.\n\n The network is provided as a list of lists, one for each\n tensor, specifying labels for the edges connected to that tensor.\n\n If a contraction order `con_order` and an output order `out_order`\n are both provided, the edge labels can be anything.\n Otherwise (`con_order == None or out_order == None`), the edge labels \n must be nonzero integers and edges will be contracted in ascending order.\n Negative integers denote the (dangling) indices of the output tensor,\n which will be in descending order, e.g. `[-1,-2,-3,...]`.\n\n For example, matrix multiplication:\n\n .. code-block:: python\n\n A = np.array([[1.0, 2.0], [3.0, 4.0]])\n B = np.array([[1.0, 1.0], [0.0, 1.0]])\n ncon([A,B], [(-1, 1), (1, -2)])\n\n Matrix trace:\n\n .. code-block:: python\n\n A = np.array([[1.0, 2.0], [3.0, 4.0]])\n ncon([A], [(1, 1)]) # 5.0\n\n Note: \n The reason `0` is not allowed as an edge label without manually\n specifying the contraction order is to maintain compatibility with the\n `original NCON implementation`_. However, the use of `0` in `con_order` \n to denote outer products is not (currently) \n supported in this implementation.\n \n .. _original NCON implementation:\n https://arxiv.org/abs/1402.0939\n\n Args:\n tensors: List of `Tensors` or `BaseNodes`.\n network_structure: List of lists specifying the tensor network structure.\n con_order: List of edge labels specifying the contraction order.\n out_order: List of edge labels specifying the output order.\n backend: String specifying the backend to use. Defaults to\n `tensornetwork.backend_contextmanager.get_default_backend`.\n\n Returns:\n The result of the contraction. The result is returned as a `Node`\n if all elements of `tensors` are `BaseNode` objects, else\n it is returned as a `Tensor` object.\n \"\"\"\n if backend and (backend not in backend_factory._BACKENDS):\n raise ValueError(\"Backend '{}' does not exist\".format(backend))\n if backend is None:\n backend = get_default_backend()\n\n are_nodes = [isinstance(t, network_components.BaseNode) for t in tensors]\n nodes = {t for t in tensors if isinstance(t, network_components.BaseNode)}\n if not all([n.backend.name == backend for n in nodes]):\n raise ValueError(\n \"Some nodes have backends different from '{}'\".format(backend))\n\n _tensors = []\n for t in tensors:\n if isinstance(t, network_components.BaseNode):\n _tensors.append(t.tensor)\n else:\n _tensors.append(t)\n\n nodes, con_edges, out_edges = ncon_network(\n _tensors,\n network_structure,\n con_order=con_order,\n out_order=out_order,\n backend=backend)\n\n nodes = set(nodes) # we don't need the ordering here\n\n # Reverse the list so we can pop from the end: O(1).\n con_edges = con_edges[::-1]\n while con_edges:\n nodes_to_contract = con_edges[-1].get_nodes()\n edges_to_contract = network_components.get_shared_edges(*nodes_to_contract)\n\n # Eat up all parallel edges that are adjacent in the ordering.\n adjacent_parallel_edges = set()\n for edge in reversed(con_edges):\n if edge in edges_to_contract:\n adjacent_parallel_edges.add(edge)\n else:\n break\n con_edges = con_edges[:-len(adjacent_parallel_edges)]\n\n # In an optimal ordering, all edges connecting a given pair of nodes are\n # adjacent in con_order. If this is not the case, warn the user.\n leftovers = edges_to_contract - adjacent_parallel_edges\n if leftovers:\n warnings.warn(\n \"Suboptimal ordering detected. Edges {} are not adjacent in the \"\n \"contraction order to edges {}, connecting nodes {}. Deviating from \"\n \"the specified ordering!\".format(\n list(map(str, leftovers)),\n list(map(str, adjacent_parallel_edges)),\n list(map(str, nodes_to_contract))))\n con_edges = [e for e in con_edges if e not in edges_to_contract]\n\n if set(nodes_to_contract) == nodes:\n # This contraction produces the final output, so order the edges\n # here to avoid transposes in some cases.\n contraction_output_order = out_edges\n else:\n contraction_output_order = None\n\n nodes = nodes - set(nodes_to_contract)\n nodes.add(\n network_components.contract_between(\n *nodes_to_contract,\n name=\"con({},{})\".format(*nodes_to_contract),\n output_edge_order=contraction_output_order))\n\n # TODO: More efficient ordering of products based on out_edges\n res_node = network_components.outer_product_final_nodes(nodes, out_edges)\n if all(are_nodes):\n return res_node\n return res_node.tensor\n\n\ndef ncon_network(\n tensors: Sequence[Tensor],\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None,\n backend: Optional[Text] = None\n) -> Tuple[List[network_components.BaseNode], List[network_components.Edge],\n List[network_components.Edge]]:\n r\"\"\"Creates a network from a list of tensors according to `tensors`.\n\n The network is provided as a list of lists, one for each\n tensor, specifying labels for the edges connected to that tensor.\n\n If a contraction order `con_order` and an output order `out_order`\n are both provided, the edge labels can be anything.\n Otherwise (`con_order == None or out_order == None`), the edge labels \n must be integers and edges will be contracted in ascending order.\n Negative integers denote the (dangling) indices of the output tensor,\n which will be in descending order, e.g. `[-1,-2,-3,...]`.\n\n This is used internally by `ncon()`.\n\n Args:\n tensors: List of `Tensor`s.\n network_structure: List of lists specifying the tensor network.\n con_order: List of edge labels specifying the contraction order.\n out_order: List of edge labels specifying the output order.\n backend: String specifying the backend to use. Defaults to the default\n TensorNetwork backend.\n\n Returns:\n nodes: List of constructed nodes in the same order as given in `tensors`.\n con_edges: List of internal `Edge` objects in contraction order.\n out_edges: List of dangling `Edge` objects in output order.\n \"\"\"\n if len(tensors) != len(network_structure):\n raise ValueError('len(tensors) != len(network_structure)')\n\n nodes, edges = _build_network(tensors, network_structure, backend)\n\n if con_order is None:\n try:\n con_order = sorted((k for k in edges if k >= 0))\n if con_order and con_order[0] == 0:\n raise ValueError(\"'0' is not a valid edge label when the \"\n \"contraction order is not specified separately.\")\n except TypeError:\n raise ValueError(\"Non-integer edge label(s): {}\".format(\n list(edges.keys())))\n else:\n if len(con_order) != len(set(con_order)):\n raise ValueError(\"Duplicate labels in con_order: {}\".format(con_order))\n\n if out_order is None:\n try:\n out_order = sorted((k for k in edges if k < 0), reverse=True)\n except TypeError:\n raise ValueError(\"Non-integer edge label(s): {}\".format(\n list(edges.keys())))\n else:\n if len(out_order) != len(set(out_order)):\n raise ValueError(\"Duplicate labels in out_order: {}\".format(out_order))\n\n try:\n con_edges = [edges[k] for k in con_order]\n out_edges = [edges[k] for k in out_order]\n except KeyError as err:\n raise ValueError(\"Order contained an unknown edge label: {}\".format(\n err.args[0]))\n\n if len(con_edges) + len(out_edges) != len(edges):\n raise ValueError(\n \"Edges {} were not included in the contraction and output \"\n \"ordering.\".format(\n list(set(edges.keys()) - set(con_order) - set(out_order))))\n\n for e in con_edges:\n if e.is_dangling():\n raise ValueError(\n \"Contraction edge {} appears only once in the network.\".format(\n str(e)))\n\n for e in out_edges:\n if not e.is_dangling():\n raise ValueError(\n \"Output edge {} appears more than once in the network.\".format(\n str(e)))\n\n return nodes, con_edges, out_edges\n\n\ndef _build_network(\n tensors: Sequence[Tensor], network_structure: Sequence[Sequence],\n backend: Text\n) -> Tuple[List[network_components.BaseNode], Dict[Any,\n network_components.Edge]]:\n nodes = []\n edges = {}\n for i, (tensor, edge_lbls) in enumerate(zip(tensors, network_structure)):\n if len(tensor.shape) != len(edge_lbls):\n raise ValueError(\n \"Incorrect number of edge labels specified tensor {}\".format(i))\n if isinstance(tensor, network_components.BaseNode):\n node = tensor\n else:\n node = network_components.Node(\n tensor, name=\"tensor_{}\".format(i), backend=backend)\n\n nodes.append(node)\n\n for (axis_num, edge_lbl) in enumerate(edge_lbls):\n if edge_lbl not in edges:\n e = node[axis_num]\n e.set_name(str(edge_lbl))\n edges[edge_lbl] = e\n else:\n # This will raise an error if the edges are not dangling.\n e = network_components.connect(\n edges[edge_lbl], node[axis_num], name=str(edge_lbl))\n edges[edge_lbl] = e\n return nodes, edges\n", "path": "tensornetwork/ncon_interface.py"}]}
3,776
860
gh_patches_debug_13901
rasdani/github-patches
git_diff
fossasia__open-event-server-1396
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Category/type hashtags are not clickable Make the category/top hashtags under each event element in home page and search page clickable. </issue> <code> [start of open_event/views/admin/home.py] 1 """Copyright 2015 Rafal Kowalski""" 2 import logging 3 import os 4 from urllib2 import urlopen 5 6 from flask import url_for, redirect, request, session, send_from_directory 7 from flask.ext import login 8 from flask_admin import expose 9 from flask_admin.base import AdminIndexView 10 from flask.ext.scrypt import generate_password_hash 11 from wtforms import ValidationError 12 13 from open_event.helpers.flask_helpers import get_real_ip, slugify 14 from ...helpers.data import DataManager, save_to_db, get_google_auth, get_facebook_auth, create_user_password, \ 15 user_logged_in, record_activity 16 from ...helpers.data_getter import DataGetter 17 from ...helpers.helpers import send_email_with_reset_password_hash, send_email_confirmation, \ 18 get_serializer, get_request_stats 19 from open_event.helpers.oauth import OAuth, FbOAuth 20 from open_event.models.user import User 21 import geoip2.database 22 23 def intended_url(): 24 return request.args.get('next') or url_for('.index') 25 26 def record_user_login_logout(template, user): 27 req_stats = get_request_stats() 28 record_activity( 29 template, 30 user=user, 31 **req_stats 32 ) 33 34 class MyHomeView(AdminIndexView): 35 36 @expose('/') 37 def index(self): 38 call_for_speakers_events = DataGetter.get_call_for_speakers_events().limit(12).all() 39 upcoming_events = DataGetter.get_all_published_events().limit(12).all() 40 return self.render('gentelella/index.html', 41 call_for_speakers_events=call_for_speakers_events, 42 upcoming_events=upcoming_events) 43 44 @expose('/login/', methods=('GET', 'POST')) 45 def login_view(self): 46 if request.method == 'GET': 47 google = get_google_auth() 48 auth_url, state = google.authorization_url(OAuth.get_auth_uri(), access_type='offline') 49 session['oauth_state'] = state 50 51 # Add Facebook Oauth 2.0 login 52 facebook = get_facebook_auth() 53 fb_auth_url, state = facebook.authorization_url(FbOAuth.get_auth_uri(), access_type='offline') 54 session['fb_oauth_state'] = state 55 return self.render('/gentelella/admin/login/login.html', auth_url=auth_url, fb_auth_url=fb_auth_url) 56 if request.method == 'POST': 57 email = request.form['email'] 58 user = DataGetter.get_user_by_email(email) 59 if user is None: 60 logging.info('No such user') 61 return redirect(url_for('admin.login_view')) 62 if user.password != generate_password_hash(request.form['password'], user.salt): 63 logging.info('Password Incorrect') 64 return redirect(url_for('admin.login_view')) 65 login.login_user(user) 66 record_user_login_logout('user_login', user) 67 logging.info('logged successfully') 68 user_logged_in(user) 69 return redirect(intended_url()) 70 71 @expose('/register/', methods=('GET', 'POST')) 72 def register_view(self): 73 """Register view page""" 74 if request.method == 'GET': 75 return self.render('/gentelella/admin/login/register.html') 76 if request.method == 'POST': 77 users = DataGetter.get_all_users() 78 for user in users: 79 if user.email == request.form['email']: 80 raise ValidationError('Email already exists') 81 logging.info("Registration under process") 82 s = get_serializer() 83 data = [request.form['email'], request.form['password']] 84 user = DataManager.create_user(data) 85 form_hash = s.dumps(data) 86 link = url_for('.create_account_after_confirmation_view', hash=form_hash, _external=True) 87 send_email_confirmation(request.form, link) 88 login.login_user(user) 89 record_user_login_logout('user_login', user) 90 logging.info('logged successfully') 91 user_logged_in(user) 92 return redirect(intended_url()) 93 94 @expose('/account/create/<hash>', methods=('GET',)) 95 def create_account_after_confirmation_view(self, hash): 96 s = get_serializer() 97 data = s.loads(hash) 98 user = User.query.filter_by(email=data[0]).first() 99 user.is_verified = True 100 save_to_db(user, 'User updated') 101 login.login_user(user) 102 record_user_login_logout('user_login', user) 103 user_logged_in(user) 104 return redirect(intended_url()) 105 106 @expose('/password/new/<email>', methods=('GET', 'POST')) 107 def create_password_after_oauth_login(self, email): 108 s = get_serializer() 109 email = s.loads(email) 110 user = DataGetter.get_user_by_email(email) 111 if request.method == 'GET': 112 return self.render('/gentelella/admin/login/create_password.html') 113 if request.method == 'POST': 114 user = create_user_password(request.form, user) 115 if user is not None: 116 login.login_user(user) 117 record_user_login_logout('user_login', user) 118 user_logged_in(user) 119 return redirect(intended_url()) 120 121 @expose('/password/reset', methods=('GET', 'POST')) 122 def password_reset_view(self): 123 """Password reset view""" 124 if request.method == 'GET': 125 return self.render('/gentelella/admin/login/password_reminder.html') 126 if request.method == 'POST': 127 email = request.form['email'] 128 user = DataGetter.get_user_by_email(email) 129 if user: 130 link = request.host + url_for(".change_password_view", hash=user.reset_password) 131 send_email_with_reset_password_hash(email, link) 132 return redirect(intended_url()) 133 134 @expose('/reset_password/<hash>', methods=('GET', 'POST')) 135 def change_password_view(self, hash): 136 """Change password view""" 137 if request.method == 'GET': 138 return self.render('/gentelella/admin/login/change_password.html') 139 if request.method == 'POST': 140 DataManager.reset_password(request.form, hash) 141 return redirect(url_for('.index')) 142 143 @expose('/logout/') 144 def logout_view(self): 145 """Logout method which redirect to index""" 146 record_user_login_logout('user_logout', login.current_user) 147 login.logout_user() 148 return redirect(url_for('.index')) 149 150 @expose('/set_role', methods=('GET', 'POST')) 151 def set_role(self): 152 """Set user role method""" 153 id = request.args['id'] 154 role = request.args['roles'] 155 user = DataGetter.get_user(id) 156 user.role = role 157 save_to_db(user, "User Role updated") 158 return redirect(url_for('.roles_manager')) 159 160 @expose('/manage_roles') 161 def roles_manager(self): 162 """Roles manager view""" 163 users = DataGetter.get_all_users() 164 events = DataGetter.get_all_events() 165 return self.render('admin/role_manager.html', 166 users=users, 167 events=events) 168 169 @expose('/sessions/', methods=('GET',)) 170 def view_user_sessions(self): 171 sessions = DataGetter.get_user_sessions() 172 return self.render('/gentelella/admin/session/user_sessions.html', 173 sessions=sessions) 174 175 @expose('/forbidden/', methods=('GET',)) 176 def forbidden_view(self): 177 return self.render('/gentelella/admin/forbidden.html') 178 179 @expose('/browse/', methods=('GET',)) 180 def browse_view(self): 181 try: 182 reader = geoip2.database.Reader(os.path.realpath('.') + '/static/data/GeoLite2-Country.mmdb') 183 ip = get_real_ip() 184 if ip == '127.0.0.1' or ip == '0.0.0.0': 185 ip = urlopen('http://ip.42.pl/raw').read() # On local test environments 186 response = reader.country(ip) 187 country = response.country.name 188 except: 189 country = "United States" 190 return redirect(url_for('explore.explore_view', location=slugify(country))) 191 192 193 194 195 [end of open_event/views/admin/home.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/open_event/views/admin/home.py b/open_event/views/admin/home.py --- a/open_event/views/admin/home.py +++ b/open_event/views/admin/home.py @@ -1,6 +1,7 @@ """Copyright 2015 Rafal Kowalski""" import logging import os +import urllib from urllib2 import urlopen from flask import url_for, redirect, request, session, send_from_directory @@ -187,8 +188,5 @@ country = response.country.name except: country = "United States" - return redirect(url_for('explore.explore_view', location=slugify(country))) - - - - + return redirect(url_for('explore.explore_view', location=slugify(country)) + '?' + + urllib.urlencode(request.args))
{"golden_diff": "diff --git a/open_event/views/admin/home.py b/open_event/views/admin/home.py\n--- a/open_event/views/admin/home.py\n+++ b/open_event/views/admin/home.py\n@@ -1,6 +1,7 @@\n \"\"\"Copyright 2015 Rafal Kowalski\"\"\"\n import logging\n import os\n+import urllib\n from urllib2 import urlopen\n \n from flask import url_for, redirect, request, session, send_from_directory\n@@ -187,8 +188,5 @@\n country = response.country.name\n except:\n country = \"United States\"\n- return redirect(url_for('explore.explore_view', location=slugify(country)))\n-\n-\n-\n-\n+ return redirect(url_for('explore.explore_view', location=slugify(country)) + '?' +\n+ urllib.urlencode(request.args))\n", "issue": "Category/type hashtags are not clickable \nMake the category/top hashtags under each event element in home page and search page clickable.\n\n", "before_files": [{"content": "\"\"\"Copyright 2015 Rafal Kowalski\"\"\"\nimport logging\nimport os\nfrom urllib2 import urlopen\n\nfrom flask import url_for, redirect, request, session, send_from_directory\nfrom flask.ext import login\nfrom flask_admin import expose\nfrom flask_admin.base import AdminIndexView\nfrom flask.ext.scrypt import generate_password_hash\nfrom wtforms import ValidationError\n\nfrom open_event.helpers.flask_helpers import get_real_ip, slugify\nfrom ...helpers.data import DataManager, save_to_db, get_google_auth, get_facebook_auth, create_user_password, \\\n user_logged_in, record_activity\nfrom ...helpers.data_getter import DataGetter\nfrom ...helpers.helpers import send_email_with_reset_password_hash, send_email_confirmation, \\\n get_serializer, get_request_stats\nfrom open_event.helpers.oauth import OAuth, FbOAuth\nfrom open_event.models.user import User\nimport geoip2.database\n\ndef intended_url():\n return request.args.get('next') or url_for('.index')\n\ndef record_user_login_logout(template, user):\n req_stats = get_request_stats()\n record_activity(\n template,\n user=user,\n **req_stats\n )\n\nclass MyHomeView(AdminIndexView):\n\n @expose('/')\n def index(self):\n call_for_speakers_events = DataGetter.get_call_for_speakers_events().limit(12).all()\n upcoming_events = DataGetter.get_all_published_events().limit(12).all()\n return self.render('gentelella/index.html',\n call_for_speakers_events=call_for_speakers_events,\n upcoming_events=upcoming_events)\n\n @expose('/login/', methods=('GET', 'POST'))\n def login_view(self):\n if request.method == 'GET':\n google = get_google_auth()\n auth_url, state = google.authorization_url(OAuth.get_auth_uri(), access_type='offline')\n session['oauth_state'] = state\n\n # Add Facebook Oauth 2.0 login\n facebook = get_facebook_auth()\n fb_auth_url, state = facebook.authorization_url(FbOAuth.get_auth_uri(), access_type='offline')\n session['fb_oauth_state'] = state\n return self.render('/gentelella/admin/login/login.html', auth_url=auth_url, fb_auth_url=fb_auth_url)\n if request.method == 'POST':\n email = request.form['email']\n user = DataGetter.get_user_by_email(email)\n if user is None:\n logging.info('No such user')\n return redirect(url_for('admin.login_view'))\n if user.password != generate_password_hash(request.form['password'], user.salt):\n logging.info('Password Incorrect')\n return redirect(url_for('admin.login_view'))\n login.login_user(user)\n record_user_login_logout('user_login', user)\n logging.info('logged successfully')\n user_logged_in(user)\n return redirect(intended_url())\n\n @expose('/register/', methods=('GET', 'POST'))\n def register_view(self):\n \"\"\"Register view page\"\"\"\n if request.method == 'GET':\n return self.render('/gentelella/admin/login/register.html')\n if request.method == 'POST':\n users = DataGetter.get_all_users()\n for user in users:\n if user.email == request.form['email']:\n raise ValidationError('Email already exists')\n logging.info(\"Registration under process\")\n s = get_serializer()\n data = [request.form['email'], request.form['password']]\n user = DataManager.create_user(data)\n form_hash = s.dumps(data)\n link = url_for('.create_account_after_confirmation_view', hash=form_hash, _external=True)\n send_email_confirmation(request.form, link)\n login.login_user(user)\n record_user_login_logout('user_login', user)\n logging.info('logged successfully')\n user_logged_in(user)\n return redirect(intended_url())\n\n @expose('/account/create/<hash>', methods=('GET',))\n def create_account_after_confirmation_view(self, hash):\n s = get_serializer()\n data = s.loads(hash)\n user = User.query.filter_by(email=data[0]).first()\n user.is_verified = True\n save_to_db(user, 'User updated')\n login.login_user(user)\n record_user_login_logout('user_login', user)\n user_logged_in(user)\n return redirect(intended_url())\n\n @expose('/password/new/<email>', methods=('GET', 'POST'))\n def create_password_after_oauth_login(self, email):\n s = get_serializer()\n email = s.loads(email)\n user = DataGetter.get_user_by_email(email)\n if request.method == 'GET':\n return self.render('/gentelella/admin/login/create_password.html')\n if request.method == 'POST':\n user = create_user_password(request.form, user)\n if user is not None:\n login.login_user(user)\n record_user_login_logout('user_login', user)\n user_logged_in(user)\n return redirect(intended_url())\n\n @expose('/password/reset', methods=('GET', 'POST'))\n def password_reset_view(self):\n \"\"\"Password reset view\"\"\"\n if request.method == 'GET':\n return self.render('/gentelella/admin/login/password_reminder.html')\n if request.method == 'POST':\n email = request.form['email']\n user = DataGetter.get_user_by_email(email)\n if user:\n link = request.host + url_for(\".change_password_view\", hash=user.reset_password)\n send_email_with_reset_password_hash(email, link)\n return redirect(intended_url())\n\n @expose('/reset_password/<hash>', methods=('GET', 'POST'))\n def change_password_view(self, hash):\n \"\"\"Change password view\"\"\"\n if request.method == 'GET':\n return self.render('/gentelella/admin/login/change_password.html')\n if request.method == 'POST':\n DataManager.reset_password(request.form, hash)\n return redirect(url_for('.index'))\n\n @expose('/logout/')\n def logout_view(self):\n \"\"\"Logout method which redirect to index\"\"\"\n record_user_login_logout('user_logout', login.current_user)\n login.logout_user()\n return redirect(url_for('.index'))\n\n @expose('/set_role', methods=('GET', 'POST'))\n def set_role(self):\n \"\"\"Set user role method\"\"\"\n id = request.args['id']\n role = request.args['roles']\n user = DataGetter.get_user(id)\n user.role = role\n save_to_db(user, \"User Role updated\")\n return redirect(url_for('.roles_manager'))\n\n @expose('/manage_roles')\n def roles_manager(self):\n \"\"\"Roles manager view\"\"\"\n users = DataGetter.get_all_users()\n events = DataGetter.get_all_events()\n return self.render('admin/role_manager.html',\n users=users,\n events=events)\n\n @expose('/sessions/', methods=('GET',))\n def view_user_sessions(self):\n sessions = DataGetter.get_user_sessions()\n return self.render('/gentelella/admin/session/user_sessions.html',\n sessions=sessions)\n\n @expose('/forbidden/', methods=('GET',))\n def forbidden_view(self):\n return self.render('/gentelella/admin/forbidden.html')\n\n @expose('/browse/', methods=('GET',))\n def browse_view(self):\n try:\n reader = geoip2.database.Reader(os.path.realpath('.') + '/static/data/GeoLite2-Country.mmdb')\n ip = get_real_ip()\n if ip == '127.0.0.1' or ip == '0.0.0.0':\n ip = urlopen('http://ip.42.pl/raw').read() # On local test environments\n response = reader.country(ip)\n country = response.country.name\n except:\n country = \"United States\"\n return redirect(url_for('explore.explore_view', location=slugify(country)))\n\n\n\n\n", "path": "open_event/views/admin/home.py"}]}
2,707
177
gh_patches_debug_23933
rasdani/github-patches
git_diff
ietf-tools__datatracker-5109
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> secr.middleware.dbquery.QueryCountDebugMiddleware is unused. Is this still needed in the source tree? Can it be removed? cc @rpcross </issue> <code> [start of ietf/secr/middleware/dbquery.py] 1 # Copyright The IETF Trust 2014-2019, All Rights Reserved 2 #import logging 3 4 from django.db import connection 5 from django.utils.log import getLogger # type: ignore 6 7 8 logger = getLogger(__name__) 9 #logger.setLevel(logging.DEBUG) 10 #logger.addHandler(logging.FileHandler(settings.SECR_LOG_FILE)) 11 12 class QueryCountDebugMiddleware(object): 13 """ 14 This middleware will log the number of queries run 15 and the total time taken for each request (with a 16 status code of 200). It does not currently support 17 multi-db setups. 18 """ 19 def process_response(self, request, response): 20 #assert False, request.path 21 logger.debug('called middleware. %s:%s' % (request.path,len(connection.queries))) 22 if response.status_code == 200: 23 total_time = 0 24 #for query in connection.queries: 25 # query_time = query.get('time') 26 # if query_time is None: 27 # django-debug-toolbar monkeypatches the connection 28 # cursor wrapper and adds extra information in each 29 # item in connection.queries. The query time is stored 30 # under the key "duration" rather than "time" and is 31 # in milliseconds, not seconds. 32 # query_time = query.get('duration', 0) / 1000 33 # total_time += float(query_time) 34 logger.debug('%s: %s queries run, total %s seconds' % (request.path,len(connection.queries), total_time)) 35 return response 36 [end of ietf/secr/middleware/dbquery.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ietf/secr/middleware/dbquery.py b/ietf/secr/middleware/dbquery.py deleted file mode 100644 --- a/ietf/secr/middleware/dbquery.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright The IETF Trust 2014-2019, All Rights Reserved -#import logging - -from django.db import connection -from django.utils.log import getLogger # type: ignore - - -logger = getLogger(__name__) -#logger.setLevel(logging.DEBUG) -#logger.addHandler(logging.FileHandler(settings.SECR_LOG_FILE)) - -class QueryCountDebugMiddleware(object): - """ - This middleware will log the number of queries run - and the total time taken for each request (with a - status code of 200). It does not currently support - multi-db setups. - """ - def process_response(self, request, response): - #assert False, request.path - logger.debug('called middleware. %s:%s' % (request.path,len(connection.queries))) - if response.status_code == 200: - total_time = 0 - #for query in connection.queries: - # query_time = query.get('time') - # if query_time is None: - # django-debug-toolbar monkeypatches the connection - # cursor wrapper and adds extra information in each - # item in connection.queries. The query time is stored - # under the key "duration" rather than "time" and is - # in milliseconds, not seconds. - # query_time = query.get('duration', 0) / 1000 - # total_time += float(query_time) - logger.debug('%s: %s queries run, total %s seconds' % (request.path,len(connection.queries), total_time)) - return response
{"golden_diff": "diff --git a/ietf/secr/middleware/dbquery.py b/ietf/secr/middleware/dbquery.py\ndeleted file mode 100644\n--- a/ietf/secr/middleware/dbquery.py\n+++ /dev/null\n@@ -1,35 +0,0 @@\n-# Copyright The IETF Trust 2014-2019, All Rights Reserved\n-#import logging\n-\n-from django.db import connection\n-from django.utils.log import getLogger # type: ignore\n-\n-\n-logger = getLogger(__name__)\n-#logger.setLevel(logging.DEBUG)\n-#logger.addHandler(logging.FileHandler(settings.SECR_LOG_FILE))\n-\n-class QueryCountDebugMiddleware(object):\n- \"\"\"\n- This middleware will log the number of queries run\n- and the total time taken for each request (with a\n- status code of 200). It does not currently support\n- multi-db setups.\n- \"\"\"\n- def process_response(self, request, response):\n- #assert False, request.path\n- logger.debug('called middleware. %s:%s' % (request.path,len(connection.queries)))\n- if response.status_code == 200:\n- total_time = 0\n- #for query in connection.queries:\n- # query_time = query.get('time')\n- # if query_time is None:\n- # django-debug-toolbar monkeypatches the connection\n- # cursor wrapper and adds extra information in each\n- # item in connection.queries. The query time is stored\n- # under the key \"duration\" rather than \"time\" and is\n- # in milliseconds, not seconds.\n- # query_time = query.get('duration', 0) / 1000\n- # total_time += float(query_time)\n- logger.debug('%s: %s queries run, total %s seconds' % (request.path,len(connection.queries), total_time))\n- return response\n", "issue": "secr.middleware.dbquery.QueryCountDebugMiddleware is unused.\nIs this still needed in the source tree? Can it be removed?\r\ncc @rpcross\n", "before_files": [{"content": "# Copyright The IETF Trust 2014-2019, All Rights Reserved\n#import logging\n\nfrom django.db import connection\nfrom django.utils.log import getLogger # type: ignore\n\n\nlogger = getLogger(__name__)\n#logger.setLevel(logging.DEBUG)\n#logger.addHandler(logging.FileHandler(settings.SECR_LOG_FILE))\n\nclass QueryCountDebugMiddleware(object):\n \"\"\"\n This middleware will log the number of queries run\n and the total time taken for each request (with a\n status code of 200). It does not currently support\n multi-db setups.\n \"\"\"\n def process_response(self, request, response):\n #assert False, request.path\n logger.debug('called middleware. %s:%s' % (request.path,len(connection.queries)))\n if response.status_code == 200:\n total_time = 0\n #for query in connection.queries:\n # query_time = query.get('time')\n # if query_time is None:\n # django-debug-toolbar monkeypatches the connection\n # cursor wrapper and adds extra information in each\n # item in connection.queries. The query time is stored\n # under the key \"duration\" rather than \"time\" and is\n # in milliseconds, not seconds.\n # query_time = query.get('duration', 0) / 1000\n # total_time += float(query_time)\n logger.debug('%s: %s queries run, total %s seconds' % (request.path,len(connection.queries), total_time))\n return response\n", "path": "ietf/secr/middleware/dbquery.py"}]}
974
426
gh_patches_debug_8071
rasdani/github-patches
git_diff
cal-itp__benefits-2023
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `Eligibility index`: update copy to be more consistent and provide context for user | Desktop (showing MST) | Mobile (showing SBMTD) | | --- | --- | | ![image](https://github.com/cal-itp/benefits/assets/25497886/fc0aea1c-832f-4375-b1ba-ea77f1f9f306) | ![image](https://github.com/cal-itp/benefits/assets/25497886/1e78192e-7852-46f3-91c4-1324e48d8238) | ## Figma links - [Desktop_Eligibility](https://www.figma.com/file/SeSd3LaLd6WkbEYhmtKpO3/Benefits-(Full-Application)?type=design&node-id=14387-12551&mode=design&t=11PIPi0dRtWNuQmK-4) - [Mobile_Eligibility](https://www.figma.com/file/SeSd3LaLd6WkbEYhmtKpO3/Benefits-(Full-Application)?type=design&node-id=14646-18505&mode=design&t=11PIPi0dRtWNuQmK-4) ## Acceptance Criteria <!-- Remember to consider edge cases --> - [x] Body copy: first body sentence was updated - [x] Remove agency name in the copy for the radio buttons subheaders - [x] “MST Courtesy Card” is now “Courtesy Card” - [x] "SBMTD Reduced Fare Mobility ID" is now "Reduced Fare Mobility ID" - [x] Radio button descriptions were updated and for SBMTD, mentions the previous name - [x] CTA copy is updated </issue> <code> [start of benefits/eligibility/forms.py] 1 """ 2 The eligibility application: Form definition for the eligibility verification flow. 3 """ 4 5 import logging 6 7 from django import forms 8 from django.utils.translation import gettext_lazy as _ 9 10 from benefits.core import models, recaptcha, widgets 11 12 logger = logging.getLogger(__name__) 13 14 15 class EligibilityVerifierSelectionForm(forms.Form): 16 """Form to capture eligibility verifier selection.""" 17 18 action_url = "eligibility:index" 19 id = "form-verifier-selection" 20 method = "POST" 21 22 verifier = forms.ChoiceField(label="", widget=widgets.VerifierRadioSelect) 23 # sets label to empty string so the radio_select template can override the label style 24 submit_value = _("Choose this Benefit") 25 26 def __init__(self, agency: models.TransitAgency, *args, **kwargs): 27 super().__init__(*args, **kwargs) 28 verifiers = agency.active_verifiers 29 30 self.classes = "col-lg-8" 31 # second element is not used since we render the whole label using selection_label_template, 32 # therefore set to None 33 self.fields["verifier"].choices = [(v.id, None) for v in verifiers] 34 self.fields["verifier"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers} 35 36 def clean(self): 37 if not recaptcha.verify(self.data): 38 raise forms.ValidationError("reCAPTCHA failed") 39 40 41 class EligibilityVerificationForm(forms.Form): 42 """Form to collect eligibility verification details.""" 43 44 action_url = "eligibility:confirm" 45 id = "form-eligibility-verification" 46 method = "POST" 47 48 submit_value = _("Check eligibility") 49 submitting_value = _("Checking") 50 51 _error_messages = { 52 "invalid": _("Check your input. The format looks wrong."), 53 "missing": _("This field is required."), 54 } 55 56 def __init__( 57 self, 58 title, 59 headline, 60 blurb, 61 name_label, 62 name_placeholder, 63 name_help_text, 64 sub_label, 65 sub_placeholder, 66 sub_help_text, 67 name_max_length=None, 68 sub_input_mode=None, 69 sub_max_length=None, 70 sub_pattern=None, 71 *args, 72 **kwargs, 73 ): 74 """Initialize a new EligibilityVerifier form. 75 76 Args: 77 title (str): The page (i.e. tab) title for the form's page. 78 79 headline (str): The <h1> on the form's page. 80 81 blurb (str): Intro <p> on the form's page. 82 83 name_label (str): Label for the name form field. 84 85 name_placeholder (str): Field placeholder for the name form field. 86 87 name_help_text (str): Extra help text for the name form field. 88 89 sub_label (str): Label for the sub form field. 90 91 sub_placeholder (str): Field placeholder for the sub form field. 92 93 sub_help_text (str): Extra help text for the sub form field. 94 95 name_max_length (int): The maximum length accepted for the 'name' API field before sending to this verifier 96 97 sub_input_mode (str): Input mode can be "numeric", "tel", "search", etc. to override default "text" keyboard on 98 mobile devices 99 100 sub_max_length (int): The maximum length accepted for the 'sub' API field before sending to this verifier 101 102 sub_pattern (str): A regular expression used to validate the 'sub' API field before sending to this verifier 103 104 Extra args and kwargs are passed through to the underlying django.forms.Form. 105 """ 106 super().__init__(auto_id=True, label_suffix="", *args, **kwargs) 107 108 self.title = title 109 self.headline = headline 110 self.blurb = blurb 111 112 self.classes = "col-lg-6" 113 sub_widget = widgets.FormControlTextInput(placeholder=sub_placeholder) 114 if sub_pattern: 115 sub_widget.attrs.update({"pattern": sub_pattern}) 116 if sub_input_mode: 117 sub_widget.attrs.update({"inputmode": sub_input_mode}) 118 if sub_max_length: 119 sub_widget.attrs.update({"maxlength": sub_max_length}) 120 121 self.fields["sub"] = forms.CharField( 122 label=sub_label, 123 widget=sub_widget, 124 help_text=sub_help_text, 125 ) 126 127 name_widget = widgets.FormControlTextInput(placeholder=name_placeholder) 128 if name_max_length: 129 name_widget.attrs.update({"maxlength": name_max_length}) 130 131 self.fields["name"] = forms.CharField(label=name_label, widget=name_widget, help_text=name_help_text) 132 133 def clean(self): 134 if not recaptcha.verify(self.data): 135 raise forms.ValidationError("reCAPTCHA failed") 136 137 138 class MSTCourtesyCard(EligibilityVerificationForm): 139 """EligibilityVerification form for the MST Courtesy Card.""" 140 141 def __init__(self, *args, **kwargs): 142 super().__init__( 143 title=_("Agency card information"), 144 headline=_("Let’s see if we can confirm your eligibility."), 145 blurb=_("Please input your Courtesy Card number and last name below to confirm your eligibility."), 146 name_label=_("Last name (as it appears on Courtesy Card)"), 147 name_placeholder="Garcia", 148 name_help_text=_("We use this to help confirm your Courtesy Card."), 149 sub_label=_("MST Courtesy Card number"), 150 sub_help_text=_("This is a 5-digit number on the front and back of your card."), 151 sub_placeholder="12345", 152 name_max_length=255, 153 sub_input_mode="numeric", 154 sub_max_length=5, 155 sub_pattern=r"\d{5}", 156 *args, 157 **kwargs, 158 ) 159 160 161 class SBMTDMobilityPass(EligibilityVerificationForm): 162 """EligibilityVerification form for the SBMTD Reduced Fare Mobility ID.""" 163 164 def __init__(self, *args, **kwargs): 165 super().__init__( 166 title=_("Agency card information"), 167 headline=_("Let’s see if we can confirm your eligibility."), 168 blurb=_("Please input your Reduced Fare Mobility ID number and last name below to confirm your eligibility."), 169 name_label=_("Last name (as it appears on Reduced Fare Mobility ID card)"), 170 name_placeholder="Garcia", 171 name_help_text=_("We use this to help confirm your Reduced Fare Mobility ID."), 172 sub_label=_("SBMTD Reduced Fare Mobility ID number"), 173 sub_help_text=_("This is a 4-digit number on the back of your card."), 174 sub_placeholder="1234", 175 name_max_length=255, 176 sub_input_mode="numeric", 177 sub_max_length=4, 178 sub_pattern=r"\d{4}", 179 *args, 180 **kwargs, 181 ) 182 [end of benefits/eligibility/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py --- a/benefits/eligibility/forms.py +++ b/benefits/eligibility/forms.py @@ -21,7 +21,7 @@ verifier = forms.ChoiceField(label="", widget=widgets.VerifierRadioSelect) # sets label to empty string so the radio_select template can override the label style - submit_value = _("Choose this Benefit") + submit_value = _("Choose this benefit") def __init__(self, agency: models.TransitAgency, *args, **kwargs): super().__init__(*args, **kwargs)
{"golden_diff": "diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py\n--- a/benefits/eligibility/forms.py\n+++ b/benefits/eligibility/forms.py\n@@ -21,7 +21,7 @@\n \n verifier = forms.ChoiceField(label=\"\", widget=widgets.VerifierRadioSelect)\n # sets label to empty string so the radio_select template can override the label style\n- submit_value = _(\"Choose this Benefit\")\n+ submit_value = _(\"Choose this benefit\")\n \n def __init__(self, agency: models.TransitAgency, *args, **kwargs):\n super().__init__(*args, **kwargs)\n", "issue": "`Eligibility index`: update copy to be more consistent and provide context for user\n| Desktop (showing MST) | Mobile (showing SBMTD) |\r\n| --- | --- |\r\n| ![image](https://github.com/cal-itp/benefits/assets/25497886/fc0aea1c-832f-4375-b1ba-ea77f1f9f306) | ![image](https://github.com/cal-itp/benefits/assets/25497886/1e78192e-7852-46f3-91c4-1324e48d8238) |\r\n\r\n\r\n## Figma links\r\n- [Desktop_Eligibility](https://www.figma.com/file/SeSd3LaLd6WkbEYhmtKpO3/Benefits-(Full-Application)?type=design&node-id=14387-12551&mode=design&t=11PIPi0dRtWNuQmK-4)\r\n- [Mobile_Eligibility](https://www.figma.com/file/SeSd3LaLd6WkbEYhmtKpO3/Benefits-(Full-Application)?type=design&node-id=14646-18505&mode=design&t=11PIPi0dRtWNuQmK-4)\r\n\r\n\r\n## Acceptance Criteria\r\n\r\n<!-- Remember to consider edge cases -->\r\n\r\n- [x] Body copy: first body sentence was updated\r\n- [x] Remove agency name in the copy for the radio buttons subheaders\r\n - [x] \u201cMST Courtesy Card\u201d is now \u201cCourtesy Card\u201d\r\n - [x] \"SBMTD Reduced Fare Mobility ID\" is now \"Reduced Fare Mobility ID\"\r\n- [x] Radio button descriptions were updated and for SBMTD, mentions the previous name\r\n- [x] CTA copy is updated\r\n\n", "before_files": [{"content": "\"\"\"\nThe eligibility application: Form definition for the eligibility verification flow.\n\"\"\"\n\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom benefits.core import models, recaptcha, widgets\n\nlogger = logging.getLogger(__name__)\n\n\nclass EligibilityVerifierSelectionForm(forms.Form):\n \"\"\"Form to capture eligibility verifier selection.\"\"\"\n\n action_url = \"eligibility:index\"\n id = \"form-verifier-selection\"\n method = \"POST\"\n\n verifier = forms.ChoiceField(label=\"\", widget=widgets.VerifierRadioSelect)\n # sets label to empty string so the radio_select template can override the label style\n submit_value = _(\"Choose this Benefit\")\n\n def __init__(self, agency: models.TransitAgency, *args, **kwargs):\n super().__init__(*args, **kwargs)\n verifiers = agency.active_verifiers\n\n self.classes = \"col-lg-8\"\n # second element is not used since we render the whole label using selection_label_template,\n # therefore set to None\n self.fields[\"verifier\"].choices = [(v.id, None) for v in verifiers]\n self.fields[\"verifier\"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n\n\nclass EligibilityVerificationForm(forms.Form):\n \"\"\"Form to collect eligibility verification details.\"\"\"\n\n action_url = \"eligibility:confirm\"\n id = \"form-eligibility-verification\"\n method = \"POST\"\n\n submit_value = _(\"Check eligibility\")\n submitting_value = _(\"Checking\")\n\n _error_messages = {\n \"invalid\": _(\"Check your input. The format looks wrong.\"),\n \"missing\": _(\"This field is required.\"),\n }\n\n def __init__(\n self,\n title,\n headline,\n blurb,\n name_label,\n name_placeholder,\n name_help_text,\n sub_label,\n sub_placeholder,\n sub_help_text,\n name_max_length=None,\n sub_input_mode=None,\n sub_max_length=None,\n sub_pattern=None,\n *args,\n **kwargs,\n ):\n \"\"\"Initialize a new EligibilityVerifier form.\n\n Args:\n title (str): The page (i.e. tab) title for the form's page.\n\n headline (str): The <h1> on the form's page.\n\n blurb (str): Intro <p> on the form's page.\n\n name_label (str): Label for the name form field.\n\n name_placeholder (str): Field placeholder for the name form field.\n\n name_help_text (str): Extra help text for the name form field.\n\n sub_label (str): Label for the sub form field.\n\n sub_placeholder (str): Field placeholder for the sub form field.\n\n sub_help_text (str): Extra help text for the sub form field.\n\n name_max_length (int): The maximum length accepted for the 'name' API field before sending to this verifier\n\n sub_input_mode (str): Input mode can be \"numeric\", \"tel\", \"search\", etc. to override default \"text\" keyboard on\n mobile devices\n\n sub_max_length (int): The maximum length accepted for the 'sub' API field before sending to this verifier\n\n sub_pattern (str): A regular expression used to validate the 'sub' API field before sending to this verifier\n\n Extra args and kwargs are passed through to the underlying django.forms.Form.\n \"\"\"\n super().__init__(auto_id=True, label_suffix=\"\", *args, **kwargs)\n\n self.title = title\n self.headline = headline\n self.blurb = blurb\n\n self.classes = \"col-lg-6\"\n sub_widget = widgets.FormControlTextInput(placeholder=sub_placeholder)\n if sub_pattern:\n sub_widget.attrs.update({\"pattern\": sub_pattern})\n if sub_input_mode:\n sub_widget.attrs.update({\"inputmode\": sub_input_mode})\n if sub_max_length:\n sub_widget.attrs.update({\"maxlength\": sub_max_length})\n\n self.fields[\"sub\"] = forms.CharField(\n label=sub_label,\n widget=sub_widget,\n help_text=sub_help_text,\n )\n\n name_widget = widgets.FormControlTextInput(placeholder=name_placeholder)\n if name_max_length:\n name_widget.attrs.update({\"maxlength\": name_max_length})\n\n self.fields[\"name\"] = forms.CharField(label=name_label, widget=name_widget, help_text=name_help_text)\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n\n\nclass MSTCourtesyCard(EligibilityVerificationForm):\n \"\"\"EligibilityVerification form for the MST Courtesy Card.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(\n title=_(\"Agency card information\"),\n headline=_(\"Let\u2019s see if we can confirm your eligibility.\"),\n blurb=_(\"Please input your Courtesy Card number and last name below to confirm your eligibility.\"),\n name_label=_(\"Last name (as it appears on Courtesy Card)\"),\n name_placeholder=\"Garcia\",\n name_help_text=_(\"We use this to help confirm your Courtesy Card.\"),\n sub_label=_(\"MST Courtesy Card number\"),\n sub_help_text=_(\"This is a 5-digit number on the front and back of your card.\"),\n sub_placeholder=\"12345\",\n name_max_length=255,\n sub_input_mode=\"numeric\",\n sub_max_length=5,\n sub_pattern=r\"\\d{5}\",\n *args,\n **kwargs,\n )\n\n\nclass SBMTDMobilityPass(EligibilityVerificationForm):\n \"\"\"EligibilityVerification form for the SBMTD Reduced Fare Mobility ID.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(\n title=_(\"Agency card information\"),\n headline=_(\"Let\u2019s see if we can confirm your eligibility.\"),\n blurb=_(\"Please input your Reduced Fare Mobility ID number and last name below to confirm your eligibility.\"),\n name_label=_(\"Last name (as it appears on Reduced Fare Mobility ID card)\"),\n name_placeholder=\"Garcia\",\n name_help_text=_(\"We use this to help confirm your Reduced Fare Mobility ID.\"),\n sub_label=_(\"SBMTD Reduced Fare Mobility ID number\"),\n sub_help_text=_(\"This is a 4-digit number on the back of your card.\"),\n sub_placeholder=\"1234\",\n name_max_length=255,\n sub_input_mode=\"numeric\",\n sub_max_length=4,\n sub_pattern=r\"\\d{4}\",\n *args,\n **kwargs,\n )\n", "path": "benefits/eligibility/forms.py"}]}
2,822
143
gh_patches_debug_16179
rasdani/github-patches
git_diff
quantumlib__Cirq-6026
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `MatrixGate` names don't survive serialization **Description of the issue** `_name` is excluded from `MatrixGate._json_dict_()` and so is reset to `None` by serialization+deserialization **How to reproduce the issue** ```python gate = cirq.MatrixGate(np.eye(2), name="name") gate_after_serialization = cirq.read_json(json_text=cirq.to_json(gate)) print(gate._name) # "name" print(gate_after_serialization._name) # None ``` **Cirq version** ``` 1.2.0.dev20230105212249 ``` </issue> <code> [start of cirq-core/cirq/ops/matrix_gates.py] 1 # Copyright 2018 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Quantum gates defined by a matrix.""" 16 17 from typing import Any, Dict, Iterable, Optional, Tuple, TYPE_CHECKING 18 19 import numpy as np 20 21 from cirq import linalg, protocols, _import 22 from cirq._compat import proper_repr 23 from cirq.ops import raw_types, phased_x_z_gate 24 25 if TYPE_CHECKING: 26 import cirq 27 28 single_qubit_decompositions = _import.LazyLoader( 29 'single_qubit_decompositions', globals(), 'cirq.transformers.analytical_decompositions' 30 ) 31 two_qubit_to_cz = _import.LazyLoader( 32 'two_qubit_to_cz', globals(), 'cirq.transformers.analytical_decompositions' 33 ) 34 three_qubit_decomposition = _import.LazyLoader( 35 'three_qubit_decomposition', globals(), 'cirq.transformers.analytical_decompositions' 36 ) 37 38 39 class MatrixGate(raw_types.Gate): 40 r"""A unitary qubit or qudit gate defined entirely by its numpy matrix. 41 42 For example `cirq.MatrixGate(np.array([[0, 1j], [1, 0]]))` has the unitary matrix: 43 44 $$ 45 \begin{bmatrix} 46 0 & i \\ 47 1 & 0 48 \end{bmatrix} 49 $$ 50 """ 51 52 def __init__( 53 self, 54 matrix: np.ndarray, 55 *, 56 name: str = None, 57 qid_shape: Optional[Iterable[int]] = None, 58 unitary_check: bool = True, 59 unitary_check_rtol: float = 1e-5, 60 unitary_check_atol: float = 1e-8, 61 ) -> None: 62 """Initializes a matrix gate. 63 64 Args: 65 matrix: The matrix that defines the gate. 66 name: The optional name of the gate to be displayed. 67 qid_shape: The shape of state tensor that the matrix applies to. 68 If not specified, this value is inferred by assuming that the 69 matrix is supposed to apply to qubits. 70 unitary_check: If True, check that the supplied matrix is unitary up to the 71 given tolerances. This should only be disabled if the matrix has already been 72 checked for unitarity, in which case we get a slight performance improvement by 73 not checking again. 74 unitary_check_rtol: The relative tolerance for checking whether the supplied matrix 75 is unitary. See `cirq.is_unitary`. 76 unitary_check_atol: The absolute tolerance for checking whether the supplied matrix 77 is unitary. See `cirq.is_unitary`. 78 79 Raises: 80 ValueError: If the matrix is not a square numpy array, if the matrix does not match 81 the `qid_shape`, if `qid_shape` is not supplied and the matrix dimension is 82 not a power of 2, or if the matrix not unitary (to the supplied precisions). 83 """ 84 if len(matrix.shape) != 2 or matrix.shape[0] != matrix.shape[1]: 85 raise ValueError('`matrix` must be a square 2d numpy array.') 86 87 if qid_shape is None: 88 n = int(np.round(np.log2(matrix.shape[0] or 1))) 89 if 2**n != matrix.shape[0]: 90 raise ValueError( 91 f'Matrix width ({matrix.shape[0]}) is not a power of 2 and ' 92 f'qid_shape is not specified.' 93 ) 94 qid_shape = (2,) * n 95 96 self._matrix = matrix 97 self._qid_shape = tuple(qid_shape) 98 self._name = name 99 m = int(np.prod(self._qid_shape, dtype=np.int64)) 100 if self._matrix.shape != (m, m): 101 raise ValueError( 102 'Wrong matrix shape for qid_shape.\n' 103 f'Matrix shape: {self._matrix.shape}\n' 104 f'qid_shape: {self._qid_shape}\n' 105 ) 106 107 if unitary_check and not linalg.is_unitary( 108 matrix, rtol=unitary_check_rtol, atol=unitary_check_atol 109 ): 110 raise ValueError(f'Not a unitary matrix: {matrix}') 111 112 def with_name(self, name: str) -> 'MatrixGate': 113 """Creates a new MatrixGate with the same matrix and a new name.""" 114 return MatrixGate(self._matrix, name=name, qid_shape=self._qid_shape, unitary_check=False) 115 116 def _json_dict_(self) -> Dict[str, Any]: 117 return {'matrix': self._matrix.tolist(), 'qid_shape': self._qid_shape} 118 119 @classmethod 120 def _from_json_dict_(cls, matrix, qid_shape, **kwargs): 121 return cls(matrix=np.array(matrix), qid_shape=qid_shape) 122 123 def _qid_shape_(self) -> Tuple[int, ...]: 124 return self._qid_shape 125 126 def __pow__(self, exponent: Any) -> 'MatrixGate': 127 if not isinstance(exponent, (int, float)): 128 return NotImplemented 129 new_mat = linalg.map_eigenvalues(self._matrix, lambda b: b**exponent) 130 return MatrixGate(new_mat, qid_shape=self._qid_shape) 131 132 def _phase_by_(self, phase_turns: float, qubit_index: int) -> 'MatrixGate': 133 if not isinstance(phase_turns, (int, float)): 134 return NotImplemented 135 if self._qid_shape[qubit_index] != 2: 136 return NotImplemented 137 result = np.copy(self._matrix).reshape(self._qid_shape * 2) 138 139 p = np.exp(2j * np.pi * phase_turns) 140 i = qubit_index 141 j = qubit_index + len(self._qid_shape) 142 result[linalg.slice_for_qubits_equal_to([i], 1)] *= p 143 result[linalg.slice_for_qubits_equal_to([j], 1)] *= np.conj(p) 144 return MatrixGate(matrix=result.reshape(self._matrix.shape), qid_shape=self._qid_shape) 145 146 def _decompose_(self, qubits: Tuple['cirq.Qid', ...]) -> 'cirq.OP_TREE': 147 if self._qid_shape == (2,): 148 return [ 149 g.on(qubits[0]) 150 for g in single_qubit_decompositions.single_qubit_matrix_to_gates(self._matrix) 151 ] 152 if self._qid_shape == (2,) * 2: 153 return two_qubit_to_cz.two_qubit_matrix_to_cz_operations( 154 *qubits, self._matrix, allow_partial_czs=True 155 ) 156 if self._qid_shape == (2,) * 3: 157 return three_qubit_decomposition.three_qubit_matrix_to_operations(*qubits, self._matrix) 158 return NotImplemented 159 160 def _has_unitary_(self) -> bool: 161 return True 162 163 def _unitary_(self) -> np.ndarray: 164 return np.copy(self._matrix) 165 166 def _circuit_diagram_info_( 167 self, args: 'cirq.CircuitDiagramInfoArgs' 168 ) -> 'cirq.CircuitDiagramInfo': 169 n_qubits = len(self._qid_shape) 170 if self._name is not None: 171 symbols = ( 172 [self._name] if n_qubits == 1 else [f'{self._name}[{i+1}]' for i in range(n_qubits)] 173 ) 174 return protocols.CircuitDiagramInfo(wire_symbols=symbols) 175 main = _matrix_to_diagram_symbol(self._matrix, args) 176 rest = [f'#{i+1}' for i in range(1, n_qubits)] 177 return protocols.CircuitDiagramInfo(wire_symbols=[main, *rest]) 178 179 def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]: 180 args.validate_version('2.0') 181 if self._qid_shape == (2,): 182 return protocols.qasm( 183 phased_x_z_gate.PhasedXZGate.from_matrix(self._matrix), args=args, qubits=qubits 184 ) 185 return None 186 187 def __hash__(self) -> int: 188 vals = tuple(v for _, v in np.ndenumerate(self._matrix)) 189 return hash((MatrixGate, vals)) 190 191 def _approx_eq_(self, other: Any, atol) -> bool: 192 if not isinstance(other, type(self)): 193 return NotImplemented 194 return np.allclose(self._matrix, other._matrix, rtol=0, atol=atol) 195 196 def __eq__(self, other): 197 if not isinstance(other, type(self)): 198 return NotImplemented 199 return self._qid_shape == other._qid_shape and np.array_equal(self._matrix, other._matrix) 200 201 def __ne__(self, other): 202 return not self == other 203 204 def __repr__(self) -> str: 205 if all(e == 2 for e in self._qid_shape): 206 return f'cirq.MatrixGate({proper_repr(self._matrix)})' 207 return f'cirq.MatrixGate({proper_repr(self._matrix)}, qid_shape={self._qid_shape})' 208 209 def __str__(self) -> str: 210 return str(self._matrix.round(3)) 211 212 213 def _matrix_to_diagram_symbol(matrix: np.ndarray, args: 'protocols.CircuitDiagramInfoArgs') -> str: 214 if args.precision is not None: 215 matrix = matrix.round(args.precision) 216 result = str(matrix) 217 if args.use_unicode_characters: 218 lines = result.split('\n') 219 for i in range(len(lines)): 220 lines[i] = lines[i].replace('[[', '') 221 lines[i] = lines[i].replace(' [', '') 222 lines[i] = lines[i].replace(']', '') 223 w = max(len(line) for line in lines) 224 for i in range(len(lines)): 225 lines[i] = '│' + lines[i].ljust(w) + '│' 226 lines.insert(0, '┌' + ' ' * w + '┐') 227 lines.append('└' + ' ' * w + '┘') 228 result = '\n'.join(lines) 229 return result 230 [end of cirq-core/cirq/ops/matrix_gates.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cirq-core/cirq/ops/matrix_gates.py b/cirq-core/cirq/ops/matrix_gates.py --- a/cirq-core/cirq/ops/matrix_gates.py +++ b/cirq-core/cirq/ops/matrix_gates.py @@ -114,11 +114,15 @@ return MatrixGate(self._matrix, name=name, qid_shape=self._qid_shape, unitary_check=False) def _json_dict_(self) -> Dict[str, Any]: - return {'matrix': self._matrix.tolist(), 'qid_shape': self._qid_shape} + return { + 'matrix': self._matrix.tolist(), + 'qid_shape': self._qid_shape, + **({'name': self._name} if self._name is not None else {}), + } @classmethod - def _from_json_dict_(cls, matrix, qid_shape, **kwargs): - return cls(matrix=np.array(matrix), qid_shape=qid_shape) + def _from_json_dict_(cls, matrix, qid_shape, name=None, **kwargs): + return cls(matrix=np.array(matrix), qid_shape=qid_shape, name=name) def _qid_shape_(self) -> Tuple[int, ...]: return self._qid_shape
{"golden_diff": "diff --git a/cirq-core/cirq/ops/matrix_gates.py b/cirq-core/cirq/ops/matrix_gates.py\n--- a/cirq-core/cirq/ops/matrix_gates.py\n+++ b/cirq-core/cirq/ops/matrix_gates.py\n@@ -114,11 +114,15 @@\n return MatrixGate(self._matrix, name=name, qid_shape=self._qid_shape, unitary_check=False)\n \n def _json_dict_(self) -> Dict[str, Any]:\n- return {'matrix': self._matrix.tolist(), 'qid_shape': self._qid_shape}\n+ return {\n+ 'matrix': self._matrix.tolist(),\n+ 'qid_shape': self._qid_shape,\n+ **({'name': self._name} if self._name is not None else {}),\n+ }\n \n @classmethod\n- def _from_json_dict_(cls, matrix, qid_shape, **kwargs):\n- return cls(matrix=np.array(matrix), qid_shape=qid_shape)\n+ def _from_json_dict_(cls, matrix, qid_shape, name=None, **kwargs):\n+ return cls(matrix=np.array(matrix), qid_shape=qid_shape, name=name)\n \n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n", "issue": "`MatrixGate` names don't survive serialization\n**Description of the issue**\r\n\r\n`_name` is excluded from `MatrixGate._json_dict_()` and so is reset to `None` by serialization+deserialization\r\n\r\n**How to reproduce the issue**\r\n\r\n```python\r\ngate = cirq.MatrixGate(np.eye(2), name=\"name\")\r\ngate_after_serialization = cirq.read_json(json_text=cirq.to_json(gate))\r\n\r\nprint(gate._name) # \"name\"\r\nprint(gate_after_serialization._name) # None\r\n```\r\n\r\n**Cirq version**\r\n\r\n```\r\n1.2.0.dev20230105212249\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Quantum gates defined by a matrix.\"\"\"\n\nfrom typing import Any, Dict, Iterable, Optional, Tuple, TYPE_CHECKING\n\nimport numpy as np\n\nfrom cirq import linalg, protocols, _import\nfrom cirq._compat import proper_repr\nfrom cirq.ops import raw_types, phased_x_z_gate\n\nif TYPE_CHECKING:\n import cirq\n\nsingle_qubit_decompositions = _import.LazyLoader(\n 'single_qubit_decompositions', globals(), 'cirq.transformers.analytical_decompositions'\n)\ntwo_qubit_to_cz = _import.LazyLoader(\n 'two_qubit_to_cz', globals(), 'cirq.transformers.analytical_decompositions'\n)\nthree_qubit_decomposition = _import.LazyLoader(\n 'three_qubit_decomposition', globals(), 'cirq.transformers.analytical_decompositions'\n)\n\n\nclass MatrixGate(raw_types.Gate):\n r\"\"\"A unitary qubit or qudit gate defined entirely by its numpy matrix.\n\n For example `cirq.MatrixGate(np.array([[0, 1j], [1, 0]]))` has the unitary matrix:\n\n $$\n \\begin{bmatrix}\n 0 & i \\\\\n 1 & 0\n \\end{bmatrix}\n $$\n \"\"\"\n\n def __init__(\n self,\n matrix: np.ndarray,\n *,\n name: str = None,\n qid_shape: Optional[Iterable[int]] = None,\n unitary_check: bool = True,\n unitary_check_rtol: float = 1e-5,\n unitary_check_atol: float = 1e-8,\n ) -> None:\n \"\"\"Initializes a matrix gate.\n\n Args:\n matrix: The matrix that defines the gate.\n name: The optional name of the gate to be displayed.\n qid_shape: The shape of state tensor that the matrix applies to.\n If not specified, this value is inferred by assuming that the\n matrix is supposed to apply to qubits.\n unitary_check: If True, check that the supplied matrix is unitary up to the\n given tolerances. This should only be disabled if the matrix has already been\n checked for unitarity, in which case we get a slight performance improvement by\n not checking again.\n unitary_check_rtol: The relative tolerance for checking whether the supplied matrix\n is unitary. See `cirq.is_unitary`.\n unitary_check_atol: The absolute tolerance for checking whether the supplied matrix\n is unitary. See `cirq.is_unitary`.\n\n Raises:\n ValueError: If the matrix is not a square numpy array, if the matrix does not match\n the `qid_shape`, if `qid_shape` is not supplied and the matrix dimension is\n not a power of 2, or if the matrix not unitary (to the supplied precisions).\n \"\"\"\n if len(matrix.shape) != 2 or matrix.shape[0] != matrix.shape[1]:\n raise ValueError('`matrix` must be a square 2d numpy array.')\n\n if qid_shape is None:\n n = int(np.round(np.log2(matrix.shape[0] or 1)))\n if 2**n != matrix.shape[0]:\n raise ValueError(\n f'Matrix width ({matrix.shape[0]}) is not a power of 2 and '\n f'qid_shape is not specified.'\n )\n qid_shape = (2,) * n\n\n self._matrix = matrix\n self._qid_shape = tuple(qid_shape)\n self._name = name\n m = int(np.prod(self._qid_shape, dtype=np.int64))\n if self._matrix.shape != (m, m):\n raise ValueError(\n 'Wrong matrix shape for qid_shape.\\n'\n f'Matrix shape: {self._matrix.shape}\\n'\n f'qid_shape: {self._qid_shape}\\n'\n )\n\n if unitary_check and not linalg.is_unitary(\n matrix, rtol=unitary_check_rtol, atol=unitary_check_atol\n ):\n raise ValueError(f'Not a unitary matrix: {matrix}')\n\n def with_name(self, name: str) -> 'MatrixGate':\n \"\"\"Creates a new MatrixGate with the same matrix and a new name.\"\"\"\n return MatrixGate(self._matrix, name=name, qid_shape=self._qid_shape, unitary_check=False)\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {'matrix': self._matrix.tolist(), 'qid_shape': self._qid_shape}\n\n @classmethod\n def _from_json_dict_(cls, matrix, qid_shape, **kwargs):\n return cls(matrix=np.array(matrix), qid_shape=qid_shape)\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n\n def __pow__(self, exponent: Any) -> 'MatrixGate':\n if not isinstance(exponent, (int, float)):\n return NotImplemented\n new_mat = linalg.map_eigenvalues(self._matrix, lambda b: b**exponent)\n return MatrixGate(new_mat, qid_shape=self._qid_shape)\n\n def _phase_by_(self, phase_turns: float, qubit_index: int) -> 'MatrixGate':\n if not isinstance(phase_turns, (int, float)):\n return NotImplemented\n if self._qid_shape[qubit_index] != 2:\n return NotImplemented\n result = np.copy(self._matrix).reshape(self._qid_shape * 2)\n\n p = np.exp(2j * np.pi * phase_turns)\n i = qubit_index\n j = qubit_index + len(self._qid_shape)\n result[linalg.slice_for_qubits_equal_to([i], 1)] *= p\n result[linalg.slice_for_qubits_equal_to([j], 1)] *= np.conj(p)\n return MatrixGate(matrix=result.reshape(self._matrix.shape), qid_shape=self._qid_shape)\n\n def _decompose_(self, qubits: Tuple['cirq.Qid', ...]) -> 'cirq.OP_TREE':\n if self._qid_shape == (2,):\n return [\n g.on(qubits[0])\n for g in single_qubit_decompositions.single_qubit_matrix_to_gates(self._matrix)\n ]\n if self._qid_shape == (2,) * 2:\n return two_qubit_to_cz.two_qubit_matrix_to_cz_operations(\n *qubits, self._matrix, allow_partial_czs=True\n )\n if self._qid_shape == (2,) * 3:\n return three_qubit_decomposition.three_qubit_matrix_to_operations(*qubits, self._matrix)\n return NotImplemented\n\n def _has_unitary_(self) -> bool:\n return True\n\n def _unitary_(self) -> np.ndarray:\n return np.copy(self._matrix)\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> 'cirq.CircuitDiagramInfo':\n n_qubits = len(self._qid_shape)\n if self._name is not None:\n symbols = (\n [self._name] if n_qubits == 1 else [f'{self._name}[{i+1}]' for i in range(n_qubits)]\n )\n return protocols.CircuitDiagramInfo(wire_symbols=symbols)\n main = _matrix_to_diagram_symbol(self._matrix, args)\n rest = [f'#{i+1}' for i in range(1, n_qubits)]\n return protocols.CircuitDiagramInfo(wire_symbols=[main, *rest])\n\n def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._qid_shape == (2,):\n return protocols.qasm(\n phased_x_z_gate.PhasedXZGate.from_matrix(self._matrix), args=args, qubits=qubits\n )\n return None\n\n def __hash__(self) -> int:\n vals = tuple(v for _, v in np.ndenumerate(self._matrix))\n return hash((MatrixGate, vals))\n\n def _approx_eq_(self, other: Any, atol) -> bool:\n if not isinstance(other, type(self)):\n return NotImplemented\n return np.allclose(self._matrix, other._matrix, rtol=0, atol=atol)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return NotImplemented\n return self._qid_shape == other._qid_shape and np.array_equal(self._matrix, other._matrix)\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self) -> str:\n if all(e == 2 for e in self._qid_shape):\n return f'cirq.MatrixGate({proper_repr(self._matrix)})'\n return f'cirq.MatrixGate({proper_repr(self._matrix)}, qid_shape={self._qid_shape})'\n\n def __str__(self) -> str:\n return str(self._matrix.round(3))\n\n\ndef _matrix_to_diagram_symbol(matrix: np.ndarray, args: 'protocols.CircuitDiagramInfoArgs') -> str:\n if args.precision is not None:\n matrix = matrix.round(args.precision)\n result = str(matrix)\n if args.use_unicode_characters:\n lines = result.split('\\n')\n for i in range(len(lines)):\n lines[i] = lines[i].replace('[[', '')\n lines[i] = lines[i].replace(' [', '')\n lines[i] = lines[i].replace(']', '')\n w = max(len(line) for line in lines)\n for i in range(len(lines)):\n lines[i] = '\u2502' + lines[i].ljust(w) + '\u2502'\n lines.insert(0, '\u250c' + ' ' * w + '\u2510')\n lines.append('\u2514' + ' ' * w + '\u2518')\n result = '\\n'.join(lines)\n return result\n", "path": "cirq-core/cirq/ops/matrix_gates.py"}]}
3,603
285
gh_patches_debug_50452
rasdani/github-patches
git_diff
jupyterhub__jupyterhub-364
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 404 on single user notebook server anytime I restart jupyterhub I just deployed jupyterhub from scratch using my ansible scripts this morning. The script are unchanged from 2-3 weeks ago when I did it previously and it was all working. I am running from latest master of jupyterhub. Anytime I restart jupyterhub (hub+proxy) I see the following behavior: - I can log in (using GitHub OAuth) - Trying to start my server gives me a 404 on `/hub/users/ellisonbg`. I can tell from the server log that the single user server isn't being started. - I can go to the Admin page and start my server from there. - But then, I get a redirect loop when I try to go to my single user server. - If I clear all my cookies while my single user server (started through the Admin page) is still running, it starts to work as expected. I can start and stop the single user server on the control panel page just fine. - If a restart jupyterhub, the problems start all over again. I am using a fixed proxy_auth_token and cookie secret. </issue> <code> [start of jupyterhub/handlers/pages.py] 1 """Basic html-rendering handlers.""" 2 3 # Copyright (c) Jupyter Development Team. 4 # Distributed under the terms of the Modified BSD License. 5 6 from tornado import web, gen 7 8 from .. import orm 9 from ..utils import admin_only, url_path_join 10 from .base import BaseHandler 11 from .login import LoginHandler 12 13 14 class RootHandler(BaseHandler): 15 """Render the Hub root page. 16 17 If logged in, redirects to: 18 19 - single-user server if running 20 - hub home, otherwise 21 22 Otherwise, renders login page. 23 """ 24 def get(self): 25 user = self.get_current_user() 26 if user: 27 if user.running: 28 url = user.server.base_url 29 self.log.debug("User is running: %s", url) 30 else: 31 url = url_path_join(self.hub.server.base_url, 'home') 32 self.log.debug("User is not running: %s", url) 33 self.redirect(url) 34 return 35 url = url_path_join(self.hub.server.base_url, 'login') 36 self.redirect(url) 37 38 39 class HomeHandler(BaseHandler): 40 """Render the user's home page.""" 41 42 @web.authenticated 43 def get(self): 44 html = self.render_template('home.html', 45 user=self.get_current_user(), 46 ) 47 self.finish(html) 48 49 50 class SpawnHandler(BaseHandler): 51 """Handle spawning of single-user servers via form. 52 53 GET renders the form, POST handles form submission. 54 55 Only enabled when Spawner.options_form is defined. 56 """ 57 @web.authenticated 58 def get(self): 59 """GET renders form for spawning with user-specified options""" 60 user = self.get_current_user() 61 if user.running: 62 url = user.server.base_url 63 self.log.debug("User is running: %s", url) 64 self.redirect(url) 65 return 66 if user.spawner.options_form: 67 html = self.render_template('spawn.html', 68 user=self.get_current_user(), 69 spawner_options_form=user.spawner.options_form, 70 ) 71 self.finish(html) 72 else: 73 # not running, no form. Trigger spawn. 74 url = url_path_join(self.base_url, 'users', user.name) 75 self.redirect(url) 76 77 @web.authenticated 78 @gen.coroutine 79 def post(self): 80 """POST spawns with user-specified options""" 81 user = self.get_current_user() 82 if user.running: 83 url = user.server.base_url 84 self.log.warning("User is already running: %s", url) 85 self.redirect(url) 86 return 87 form_options = {} 88 for key, byte_list in self.request.body_arguments.items(): 89 form_options[key] = [ bs.decode('utf8') for bs in byte_list ] 90 options = user.spawner.options_from_form(form_options) 91 yield self.spawn_single_user(user, options=options) 92 self.set_login_cookie(user) 93 url = user.server.base_url 94 self.redirect(url) 95 96 class AdminHandler(BaseHandler): 97 """Render the admin page.""" 98 99 @admin_only 100 def get(self): 101 available = {'name', 'admin', 'running', 'last_activity'} 102 default_sort = ['admin', 'name'] 103 mapping = { 104 'running': '_server_id' 105 } 106 default_order = { 107 'name': 'asc', 108 'last_activity': 'desc', 109 'admin': 'desc', 110 'running': 'desc', 111 } 112 sorts = self.get_arguments('sort') or default_sort 113 orders = self.get_arguments('order') 114 115 for bad in set(sorts).difference(available): 116 self.log.warn("ignoring invalid sort: %r", bad) 117 sorts.remove(bad) 118 for bad in set(orders).difference({'asc', 'desc'}): 119 self.log.warn("ignoring invalid order: %r", bad) 120 orders.remove(bad) 121 122 # add default sort as secondary 123 for s in default_sort: 124 if s not in sorts: 125 sorts.append(s) 126 if len(orders) < len(sorts): 127 for col in sorts[len(orders):]: 128 orders.append(default_order[col]) 129 else: 130 orders = orders[:len(sorts)] 131 132 # this could be one incomprehensible nested list comprehension 133 # get User columns 134 cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ] 135 # get User.col.desc() order objects 136 ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ] 137 138 users = self.db.query(orm.User).order_by(*ordered) 139 users = [ self._user_from_orm(u) for u in users ] 140 running = [ u for u in users if u.running ] 141 142 html = self.render_template('admin.html', 143 user=self.get_current_user(), 144 admin_access=self.settings.get('admin_access', False), 145 users=users, 146 running=running, 147 sort={s:o for s,o in zip(sorts, orders)}, 148 ) 149 self.finish(html) 150 151 152 default_handlers = [ 153 (r'/', RootHandler), 154 (r'/home', HomeHandler), 155 (r'/admin', AdminHandler), 156 (r'/spawn', SpawnHandler), 157 ] 158 [end of jupyterhub/handlers/pages.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/jupyterhub/handlers/pages.py b/jupyterhub/handlers/pages.py --- a/jupyterhub/handlers/pages.py +++ b/jupyterhub/handlers/pages.py @@ -71,7 +71,7 @@ self.finish(html) else: # not running, no form. Trigger spawn. - url = url_path_join(self.base_url, 'users', user.name) + url = url_path_join(self.base_url, 'user', user.name) self.redirect(url) @web.authenticated
{"golden_diff": "diff --git a/jupyterhub/handlers/pages.py b/jupyterhub/handlers/pages.py\n--- a/jupyterhub/handlers/pages.py\n+++ b/jupyterhub/handlers/pages.py\n@@ -71,7 +71,7 @@\n self.finish(html)\n else:\n # not running, no form. Trigger spawn.\n- url = url_path_join(self.base_url, 'users', user.name)\n+ url = url_path_join(self.base_url, 'user', user.name)\n self.redirect(url)\n \n @web.authenticated\n", "issue": "404 on single user notebook server anytime I restart jupyterhub\nI just deployed jupyterhub from scratch using my ansible scripts this morning. The script are unchanged from 2-3 weeks ago when I did it previously and it was all working. I am running from latest master of jupyterhub.\n\nAnytime I restart jupyterhub (hub+proxy) I see the following behavior:\n- I can log in (using GitHub OAuth)\n- Trying to start my server gives me a 404 on `/hub/users/ellisonbg`. I can tell from the server log that the single user server isn't being started.\n- I can go to the Admin page and start my server from there.\n- But then, I get a redirect loop when I try to go to my single user server.\n- If I clear all my cookies while my single user server (started through the Admin page) is still running, it starts to work as expected. I can start and stop the single user server on the control panel page just fine.\n- If a restart jupyterhub, the problems start all over again.\n\nI am using a fixed proxy_auth_token and cookie secret.\n\n", "before_files": [{"content": "\"\"\"Basic html-rendering handlers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom tornado import web, gen\n\nfrom .. import orm\nfrom ..utils import admin_only, url_path_join\nfrom .base import BaseHandler\nfrom .login import LoginHandler\n\n\nclass RootHandler(BaseHandler):\n \"\"\"Render the Hub root page.\n \n If logged in, redirects to:\n \n - single-user server if running\n - hub home, otherwise\n \n Otherwise, renders login page.\n \"\"\"\n def get(self):\n user = self.get_current_user()\n if user:\n if user.running:\n url = user.server.base_url\n self.log.debug(\"User is running: %s\", url)\n else:\n url = url_path_join(self.hub.server.base_url, 'home')\n self.log.debug(\"User is not running: %s\", url)\n self.redirect(url)\n return\n url = url_path_join(self.hub.server.base_url, 'login')\n self.redirect(url)\n\n\nclass HomeHandler(BaseHandler):\n \"\"\"Render the user's home page.\"\"\"\n\n @web.authenticated\n def get(self):\n html = self.render_template('home.html',\n user=self.get_current_user(),\n )\n self.finish(html)\n\n\nclass SpawnHandler(BaseHandler):\n \"\"\"Handle spawning of single-user servers via form.\n \n GET renders the form, POST handles form submission.\n \n Only enabled when Spawner.options_form is defined.\n \"\"\"\n @web.authenticated\n def get(self):\n \"\"\"GET renders form for spawning with user-specified options\"\"\"\n user = self.get_current_user()\n if user.running:\n url = user.server.base_url\n self.log.debug(\"User is running: %s\", url)\n self.redirect(url)\n return\n if user.spawner.options_form:\n html = self.render_template('spawn.html',\n user=self.get_current_user(),\n spawner_options_form=user.spawner.options_form,\n )\n self.finish(html)\n else:\n # not running, no form. Trigger spawn.\n url = url_path_join(self.base_url, 'users', user.name)\n self.redirect(url)\n \n @web.authenticated\n @gen.coroutine\n def post(self):\n \"\"\"POST spawns with user-specified options\"\"\"\n user = self.get_current_user()\n if user.running:\n url = user.server.base_url\n self.log.warning(\"User is already running: %s\", url)\n self.redirect(url)\n return\n form_options = {}\n for key, byte_list in self.request.body_arguments.items():\n form_options[key] = [ bs.decode('utf8') for bs in byte_list ]\n options = user.spawner.options_from_form(form_options)\n yield self.spawn_single_user(user, options=options)\n self.set_login_cookie(user)\n url = user.server.base_url\n self.redirect(url)\n\nclass AdminHandler(BaseHandler):\n \"\"\"Render the admin page.\"\"\"\n\n @admin_only\n def get(self):\n available = {'name', 'admin', 'running', 'last_activity'}\n default_sort = ['admin', 'name']\n mapping = {\n 'running': '_server_id'\n }\n default_order = {\n 'name': 'asc',\n 'last_activity': 'desc',\n 'admin': 'desc',\n 'running': 'desc',\n }\n sorts = self.get_arguments('sort') or default_sort\n orders = self.get_arguments('order')\n \n for bad in set(sorts).difference(available):\n self.log.warn(\"ignoring invalid sort: %r\", bad)\n sorts.remove(bad)\n for bad in set(orders).difference({'asc', 'desc'}):\n self.log.warn(\"ignoring invalid order: %r\", bad)\n orders.remove(bad)\n \n # add default sort as secondary\n for s in default_sort:\n if s not in sorts:\n sorts.append(s)\n if len(orders) < len(sorts):\n for col in sorts[len(orders):]:\n orders.append(default_order[col])\n else:\n orders = orders[:len(sorts)]\n \n # this could be one incomprehensible nested list comprehension\n # get User columns\n cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]\n # get User.col.desc() order objects\n ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ]\n \n users = self.db.query(orm.User).order_by(*ordered)\n users = [ self._user_from_orm(u) for u in users ]\n running = [ u for u in users if u.running ]\n \n html = self.render_template('admin.html',\n user=self.get_current_user(),\n admin_access=self.settings.get('admin_access', False),\n users=users,\n running=running,\n sort={s:o for s,o in zip(sorts, orders)},\n )\n self.finish(html)\n\n\ndefault_handlers = [\n (r'/', RootHandler),\n (r'/home', HomeHandler),\n (r'/admin', AdminHandler),\n (r'/spawn', SpawnHandler),\n]\n", "path": "jupyterhub/handlers/pages.py"}]}
2,250
121
gh_patches_debug_25460
rasdani/github-patches
git_diff
DataDog__integrations-extras-662
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Importing ABC directly from collections will be removed in Python 3.10 Use collections for Python 2 and collections.abc for Python 3 https://github.com/DataDog/integrations-extras/blob/7e676a2d256e142ddf4cb44ac11a6e590a502779/filebeat/datadog_checks/filebeat/filebeat.py#L148 </issue> <code> [start of filebeat/datadog_checks/filebeat/filebeat.py] 1 # (C) Datadog, Inc. 2010-2016 2 # All rights reserved 3 # Licensed under Simplified BSD License (see LICENSE) 4 5 # stdlib 6 import collections 7 import errno 8 import json 9 import numbers 10 import os 11 import re 12 import sre_constants 13 14 import requests 15 from six import iteritems 16 17 from datadog_checks.base import AgentCheck, is_affirmative 18 from datadog_checks.base.utils.containers import hash_mutable 19 20 EVENT_TYPE = SOURCE_TYPE_NAME = "filebeat" 21 22 23 class FilebeatCheckHttpProfiler: 24 """ 25 Filebeat's HTTP profiler gives a bunch of counter variables; their value holds little interest, 26 what we really want is the delta in between runs. This class is responsible for caching the 27 values from the previous run 28 """ 29 30 INCREMENT_METRIC_NAMES = [ 31 "filebeat.events.done", 32 "filebeat.harvester.closed", 33 "filebeat.harvester.files.truncated", 34 "filebeat.harvester.open_files", 35 "filebeat.harvester.skipped", 36 "filebeat.harvester.started", 37 "filebeat.prospector.log.files.renamed", 38 "filebeat.prospector.log.files.truncated", 39 "libbeat.config.module.running", 40 "libbeat.config.module.starts", 41 "libbeat.config.module.stops", 42 "libbeat.config.reloads", 43 "libbeat.es.call_count.PublishEvents", 44 "libbeat.es.publish.read_bytes", 45 "libbeat.es.publish.read_errors", 46 "libbeat.es.publish.write_bytes", 47 "libbeat.es.publish.write_errors", 48 "libbeat.es.published_and_acked_events", 49 "libbeat.es.published_but_not_acked_events", 50 "libbeat.kafka.call_count.PublishEvents", 51 "libbeat.kafka.published_and_acked_events", 52 "libbeat.kafka.published_but_not_acked_events", 53 "libbeat.logstash.call_count.PublishEvents", 54 "libbeat.logstash.publish.read_bytes", 55 "libbeat.logstash.publish.read_errors", 56 "libbeat.logstash.publish.write_bytes", 57 "libbeat.logstash.publish.write_errors", 58 "libbeat.logstash.published_and_acked_events", 59 "libbeat.logstash.published_but_not_acked_events", 60 "libbeat.output.events.acked", 61 "libbeat.output.events.dropped", 62 "libbeat.output.events.failed", 63 "libbeat.output.events.total", 64 "libbeat.pipeline.events.dropped", 65 "libbeat.pipeline.events.failed", 66 "libbeat.pipeline.events.filtered", 67 "libbeat.pipeline.events.published", 68 "libbeat.pipeline.events.total", 69 "libbeat.publisher.messages_in_worker_queues", 70 "libbeat.publisher.published_events", 71 "libbeat.redis.publish.read_bytes", 72 "libbeat.redis.publish.read_errors", 73 "libbeat.redis.publish.write_bytes", 74 "libbeat.redis.publish.write_errors", 75 "publish.events", 76 "registrar.states.cleanup", 77 "registrar.states.current", 78 "registrar.states.update", 79 "registrar.writes", 80 ] 81 82 GAUGE_METRIC_NAMES = ["filebeat.harvester.running"] 83 84 VARS_ROUTE = "debug/vars" 85 86 def __init__(self, config): 87 self._config = config 88 self._previous_increment_values = {} 89 # regex matching ain't free, let's cache this 90 self._should_keep_metrics = {} 91 92 def gather_metrics(self): 93 if not self._config.stats_endpoint: 94 return {} 95 96 response = self._make_request() 97 98 return {"increment": self._gather_increment_metrics(response), "gauge": self._gather_gauge_metrics(response)} 99 100 def _make_request(self): 101 102 response = requests.get(self._config.stats_endpoint, timeout=self._config.timeout) 103 response.raise_for_status() 104 105 return self.flatten(response.json()) 106 107 def _gather_increment_metrics(self, response): 108 new_values = { 109 name: response[name] 110 for name in self.INCREMENT_METRIC_NAMES 111 if self._should_keep_metric(name) and name in response 112 } 113 114 deltas = self._compute_increment_deltas(new_values) 115 116 self._previous_increment_values = new_values 117 118 return deltas 119 120 def _compute_increment_deltas(self, new_values): 121 deltas = {} 122 123 for name, new_value in iteritems(new_values): 124 if name not in self._previous_increment_values or self._previous_increment_values[name] > new_value: 125 # either the agent or filebeat got restarted, we're not 126 # reporting anything this time around 127 return {} 128 deltas[name] = new_value - self._previous_increment_values[name] 129 130 return deltas 131 132 def _gather_gauge_metrics(self, response): 133 return { 134 name: response[name] 135 for name in self.GAUGE_METRIC_NAMES 136 if self._should_keep_metric(name) and name in response 137 } 138 139 def _should_keep_metric(self, name): 140 if name not in self._should_keep_metrics: 141 self._should_keep_metrics[name] = self._config.should_keep_metric(name) 142 return self._should_keep_metrics[name] 143 144 def flatten(self, d, parent_key="", sep="."): 145 items = [] 146 for k, v in d.items(): 147 new_key = parent_key + sep + k if parent_key else k 148 if isinstance(v, collections.MutableMapping): 149 items.extend(self.flatten(v, new_key, sep=sep).items()) 150 else: 151 items.append((new_key, v)) 152 return dict(items) 153 154 155 class FilebeatCheckInstanceConfig: 156 157 _only_metrics_regexes = None 158 159 def __init__(self, instance): 160 self._registry_file_path = instance.get("registry_file_path") 161 if self._registry_file_path is None: 162 raise Exception("An absolute path to a filebeat registry path must be specified") 163 164 self._stats_endpoint = instance.get("stats_endpoint") 165 166 self._only_metrics = instance.get("only_metrics", []) 167 168 if not isinstance(self._only_metrics, list): 169 raise Exception( 170 "If given, filebeat's only_metrics must be a list of regexes, got %s" % (self._only_metrics,) 171 ) 172 173 self._timeout = instance.get("timeout", 2) 174 if not isinstance(self._timeout, numbers.Real) or self._timeout <= 0: 175 raise Exception("If given, filebeats timeout must be a positive number, got %s" % (self._timeout,)) 176 177 @property 178 def registry_file_path(self): 179 return self._registry_file_path 180 181 @property 182 def stats_endpoint(self): 183 return self._stats_endpoint 184 185 @property 186 def timeout(self): 187 return self._timeout 188 189 def should_keep_metric(self, metric_name): 190 191 if not self._only_metrics: 192 return True 193 194 return any(re.search(regex, metric_name) for regex in self._compiled_regexes()) 195 196 def _compiled_regexes(self): 197 if self._only_metrics_regexes is None: 198 self._only_metrics_regexes = self._compile_regexes() 199 return self._only_metrics_regexes 200 201 def _compile_regexes(self): 202 compiled_regexes = [] 203 204 for regex in self._only_metrics: 205 try: 206 compiled_regexes.append(re.compile(regex)) 207 except sre_constants.error as ex: 208 raise Exception('Invalid only_metric regex for filebeat: "%s", error: %s' % (regex, ex)) 209 210 return compiled_regexes 211 212 213 class FilebeatCheck(AgentCheck): 214 215 METRIC_PREFIX = "filebeat." 216 217 def __init__(self, *args, **kwargs): 218 AgentCheck.__init__(self, *args, **kwargs) 219 self.instance_cache = {} 220 221 def check(self, instance): 222 normalize_metrics = is_affirmative(instance.get("normalize_metrics", False)) 223 224 instance_key = hash_mutable(instance) 225 if instance_key in self.instance_cache: 226 config = self.instance_cache[instance_key]["config"] 227 profiler = self.instance_cache[instance_key]["profiler"] 228 else: 229 config = FilebeatCheckInstanceConfig(instance) 230 profiler = FilebeatCheckHttpProfiler(config) 231 self.instance_cache[instance_key] = {"config": config, "profiler": profiler} 232 233 self._process_registry(config) 234 self._gather_http_profiler_metrics(config, profiler, normalize_metrics) 235 236 def _process_registry(self, config): 237 registry_contents = self._parse_registry_file(config.registry_file_path) 238 239 if isinstance(registry_contents, dict): 240 # filebeat version < 5 241 registry_contents = registry_contents.values() 242 243 for item in registry_contents: 244 self._process_registry_item(item) 245 246 def _parse_registry_file(self, registry_file_path): 247 try: 248 with open(registry_file_path) as registry_file: 249 return json.load(registry_file) 250 except IOError as ex: 251 self.log.error("Cannot read the registry log file at %s: %s", registry_file_path, ex) 252 253 if ex.errno == errno.EACCES: 254 self.log.error( 255 "You might be interesting in having a look at " "https://github.com/elastic/beats/pull/6455" 256 ) 257 258 return [] 259 260 def _process_registry_item(self, item): 261 source = item["source"] 262 offset = item["offset"] 263 264 try: 265 stats = os.stat(source) 266 267 if self._is_same_file(stats, item["FileStateOS"]): 268 unprocessed_bytes = stats.st_size - offset 269 270 self.gauge("filebeat.registry.unprocessed_bytes", unprocessed_bytes, tags=["source:{0}".format(source)]) 271 else: 272 self.log.debug("Filebeat source %s appears to have changed", source) 273 except OSError: 274 self.log.debug("Unable to get stats on filebeat source %s", source) 275 276 def _is_same_file(self, stats, file_state_os): 277 return stats.st_dev == file_state_os["device"] and stats.st_ino == file_state_os["inode"] 278 279 def _gather_http_profiler_metrics(self, config, profiler, normalize_metrics): 280 try: 281 all_metrics = profiler.gather_metrics() 282 except Exception as ex: 283 self.log.error("Error when fetching metrics from %s: %s", config.stats_endpoint, ex) 284 return 285 286 tags = ["stats_endpoint:{0}".format(config.stats_endpoint)] 287 288 for action, metrics in iteritems(all_metrics): 289 method = getattr(self, action) 290 291 for name, value in iteritems(metrics): 292 if not name.startswith(self.METRIC_PREFIX) and normalize_metrics: 293 name = self.METRIC_PREFIX + name 294 method(name, value, tags) 295 [end of filebeat/datadog_checks/filebeat/filebeat.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/filebeat/datadog_checks/filebeat/filebeat.py b/filebeat/datadog_checks/filebeat/filebeat.py --- a/filebeat/datadog_checks/filebeat/filebeat.py +++ b/filebeat/datadog_checks/filebeat/filebeat.py @@ -3,7 +3,6 @@ # Licensed under Simplified BSD License (see LICENSE) # stdlib -import collections import errno import json import numbers @@ -12,11 +11,17 @@ import sre_constants import requests +import six from six import iteritems from datadog_checks.base import AgentCheck, is_affirmative from datadog_checks.base.utils.containers import hash_mutable +if six.PY3: + from collections.abc import MutableMapping +else: + from collections import MutableMapping + EVENT_TYPE = SOURCE_TYPE_NAME = "filebeat" @@ -145,7 +150,7 @@ items = [] for k, v in d.items(): new_key = parent_key + sep + k if parent_key else k - if isinstance(v, collections.MutableMapping): + if isinstance(v, MutableMapping): items.extend(self.flatten(v, new_key, sep=sep).items()) else: items.append((new_key, v))
{"golden_diff": "diff --git a/filebeat/datadog_checks/filebeat/filebeat.py b/filebeat/datadog_checks/filebeat/filebeat.py\n--- a/filebeat/datadog_checks/filebeat/filebeat.py\n+++ b/filebeat/datadog_checks/filebeat/filebeat.py\n@@ -3,7 +3,6 @@\n # Licensed under Simplified BSD License (see LICENSE)\n \n # stdlib\n-import collections\n import errno\n import json\n import numbers\n@@ -12,11 +11,17 @@\n import sre_constants\n \n import requests\n+import six\n from six import iteritems\n \n from datadog_checks.base import AgentCheck, is_affirmative\n from datadog_checks.base.utils.containers import hash_mutable\n \n+if six.PY3:\n+ from collections.abc import MutableMapping\n+else:\n+ from collections import MutableMapping\n+\n EVENT_TYPE = SOURCE_TYPE_NAME = \"filebeat\"\n \n \n@@ -145,7 +150,7 @@\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n- if isinstance(v, collections.MutableMapping):\n+ if isinstance(v, MutableMapping):\n items.extend(self.flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n", "issue": " Importing ABC directly from collections will be removed in Python 3.10\nUse collections for Python 2 and collections.abc for Python 3\r\n\r\nhttps://github.com/DataDog/integrations-extras/blob/7e676a2d256e142ddf4cb44ac11a6e590a502779/filebeat/datadog_checks/filebeat/filebeat.py#L148\n", "before_files": [{"content": "# (C) Datadog, Inc. 2010-2016\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\n# stdlib\nimport collections\nimport errno\nimport json\nimport numbers\nimport os\nimport re\nimport sre_constants\n\nimport requests\nfrom six import iteritems\n\nfrom datadog_checks.base import AgentCheck, is_affirmative\nfrom datadog_checks.base.utils.containers import hash_mutable\n\nEVENT_TYPE = SOURCE_TYPE_NAME = \"filebeat\"\n\n\nclass FilebeatCheckHttpProfiler:\n \"\"\"\n Filebeat's HTTP profiler gives a bunch of counter variables; their value holds little interest,\n what we really want is the delta in between runs. This class is responsible for caching the\n values from the previous run\n \"\"\"\n\n INCREMENT_METRIC_NAMES = [\n \"filebeat.events.done\",\n \"filebeat.harvester.closed\",\n \"filebeat.harvester.files.truncated\",\n \"filebeat.harvester.open_files\",\n \"filebeat.harvester.skipped\",\n \"filebeat.harvester.started\",\n \"filebeat.prospector.log.files.renamed\",\n \"filebeat.prospector.log.files.truncated\",\n \"libbeat.config.module.running\",\n \"libbeat.config.module.starts\",\n \"libbeat.config.module.stops\",\n \"libbeat.config.reloads\",\n \"libbeat.es.call_count.PublishEvents\",\n \"libbeat.es.publish.read_bytes\",\n \"libbeat.es.publish.read_errors\",\n \"libbeat.es.publish.write_bytes\",\n \"libbeat.es.publish.write_errors\",\n \"libbeat.es.published_and_acked_events\",\n \"libbeat.es.published_but_not_acked_events\",\n \"libbeat.kafka.call_count.PublishEvents\",\n \"libbeat.kafka.published_and_acked_events\",\n \"libbeat.kafka.published_but_not_acked_events\",\n \"libbeat.logstash.call_count.PublishEvents\",\n \"libbeat.logstash.publish.read_bytes\",\n \"libbeat.logstash.publish.read_errors\",\n \"libbeat.logstash.publish.write_bytes\",\n \"libbeat.logstash.publish.write_errors\",\n \"libbeat.logstash.published_and_acked_events\",\n \"libbeat.logstash.published_but_not_acked_events\",\n \"libbeat.output.events.acked\",\n \"libbeat.output.events.dropped\",\n \"libbeat.output.events.failed\",\n \"libbeat.output.events.total\",\n \"libbeat.pipeline.events.dropped\",\n \"libbeat.pipeline.events.failed\",\n \"libbeat.pipeline.events.filtered\",\n \"libbeat.pipeline.events.published\",\n \"libbeat.pipeline.events.total\",\n \"libbeat.publisher.messages_in_worker_queues\",\n \"libbeat.publisher.published_events\",\n \"libbeat.redis.publish.read_bytes\",\n \"libbeat.redis.publish.read_errors\",\n \"libbeat.redis.publish.write_bytes\",\n \"libbeat.redis.publish.write_errors\",\n \"publish.events\",\n \"registrar.states.cleanup\",\n \"registrar.states.current\",\n \"registrar.states.update\",\n \"registrar.writes\",\n ]\n\n GAUGE_METRIC_NAMES = [\"filebeat.harvester.running\"]\n\n VARS_ROUTE = \"debug/vars\"\n\n def __init__(self, config):\n self._config = config\n self._previous_increment_values = {}\n # regex matching ain't free, let's cache this\n self._should_keep_metrics = {}\n\n def gather_metrics(self):\n if not self._config.stats_endpoint:\n return {}\n\n response = self._make_request()\n\n return {\"increment\": self._gather_increment_metrics(response), \"gauge\": self._gather_gauge_metrics(response)}\n\n def _make_request(self):\n\n response = requests.get(self._config.stats_endpoint, timeout=self._config.timeout)\n response.raise_for_status()\n\n return self.flatten(response.json())\n\n def _gather_increment_metrics(self, response):\n new_values = {\n name: response[name]\n for name in self.INCREMENT_METRIC_NAMES\n if self._should_keep_metric(name) and name in response\n }\n\n deltas = self._compute_increment_deltas(new_values)\n\n self._previous_increment_values = new_values\n\n return deltas\n\n def _compute_increment_deltas(self, new_values):\n deltas = {}\n\n for name, new_value in iteritems(new_values):\n if name not in self._previous_increment_values or self._previous_increment_values[name] > new_value:\n # either the agent or filebeat got restarted, we're not\n # reporting anything this time around\n return {}\n deltas[name] = new_value - self._previous_increment_values[name]\n\n return deltas\n\n def _gather_gauge_metrics(self, response):\n return {\n name: response[name]\n for name in self.GAUGE_METRIC_NAMES\n if self._should_keep_metric(name) and name in response\n }\n\n def _should_keep_metric(self, name):\n if name not in self._should_keep_metrics:\n self._should_keep_metrics[name] = self._config.should_keep_metric(name)\n return self._should_keep_metrics[name]\n\n def flatten(self, d, parent_key=\"\", sep=\".\"):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(self.flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\nclass FilebeatCheckInstanceConfig:\n\n _only_metrics_regexes = None\n\n def __init__(self, instance):\n self._registry_file_path = instance.get(\"registry_file_path\")\n if self._registry_file_path is None:\n raise Exception(\"An absolute path to a filebeat registry path must be specified\")\n\n self._stats_endpoint = instance.get(\"stats_endpoint\")\n\n self._only_metrics = instance.get(\"only_metrics\", [])\n\n if not isinstance(self._only_metrics, list):\n raise Exception(\n \"If given, filebeat's only_metrics must be a list of regexes, got %s\" % (self._only_metrics,)\n )\n\n self._timeout = instance.get(\"timeout\", 2)\n if not isinstance(self._timeout, numbers.Real) or self._timeout <= 0:\n raise Exception(\"If given, filebeats timeout must be a positive number, got %s\" % (self._timeout,))\n\n @property\n def registry_file_path(self):\n return self._registry_file_path\n\n @property\n def stats_endpoint(self):\n return self._stats_endpoint\n\n @property\n def timeout(self):\n return self._timeout\n\n def should_keep_metric(self, metric_name):\n\n if not self._only_metrics:\n return True\n\n return any(re.search(regex, metric_name) for regex in self._compiled_regexes())\n\n def _compiled_regexes(self):\n if self._only_metrics_regexes is None:\n self._only_metrics_regexes = self._compile_regexes()\n return self._only_metrics_regexes\n\n def _compile_regexes(self):\n compiled_regexes = []\n\n for regex in self._only_metrics:\n try:\n compiled_regexes.append(re.compile(regex))\n except sre_constants.error as ex:\n raise Exception('Invalid only_metric regex for filebeat: \"%s\", error: %s' % (regex, ex))\n\n return compiled_regexes\n\n\nclass FilebeatCheck(AgentCheck):\n\n METRIC_PREFIX = \"filebeat.\"\n\n def __init__(self, *args, **kwargs):\n AgentCheck.__init__(self, *args, **kwargs)\n self.instance_cache = {}\n\n def check(self, instance):\n normalize_metrics = is_affirmative(instance.get(\"normalize_metrics\", False))\n\n instance_key = hash_mutable(instance)\n if instance_key in self.instance_cache:\n config = self.instance_cache[instance_key][\"config\"]\n profiler = self.instance_cache[instance_key][\"profiler\"]\n else:\n config = FilebeatCheckInstanceConfig(instance)\n profiler = FilebeatCheckHttpProfiler(config)\n self.instance_cache[instance_key] = {\"config\": config, \"profiler\": profiler}\n\n self._process_registry(config)\n self._gather_http_profiler_metrics(config, profiler, normalize_metrics)\n\n def _process_registry(self, config):\n registry_contents = self._parse_registry_file(config.registry_file_path)\n\n if isinstance(registry_contents, dict):\n # filebeat version < 5\n registry_contents = registry_contents.values()\n\n for item in registry_contents:\n self._process_registry_item(item)\n\n def _parse_registry_file(self, registry_file_path):\n try:\n with open(registry_file_path) as registry_file:\n return json.load(registry_file)\n except IOError as ex:\n self.log.error(\"Cannot read the registry log file at %s: %s\", registry_file_path, ex)\n\n if ex.errno == errno.EACCES:\n self.log.error(\n \"You might be interesting in having a look at \" \"https://github.com/elastic/beats/pull/6455\"\n )\n\n return []\n\n def _process_registry_item(self, item):\n source = item[\"source\"]\n offset = item[\"offset\"]\n\n try:\n stats = os.stat(source)\n\n if self._is_same_file(stats, item[\"FileStateOS\"]):\n unprocessed_bytes = stats.st_size - offset\n\n self.gauge(\"filebeat.registry.unprocessed_bytes\", unprocessed_bytes, tags=[\"source:{0}\".format(source)])\n else:\n self.log.debug(\"Filebeat source %s appears to have changed\", source)\n except OSError:\n self.log.debug(\"Unable to get stats on filebeat source %s\", source)\n\n def _is_same_file(self, stats, file_state_os):\n return stats.st_dev == file_state_os[\"device\"] and stats.st_ino == file_state_os[\"inode\"]\n\n def _gather_http_profiler_metrics(self, config, profiler, normalize_metrics):\n try:\n all_metrics = profiler.gather_metrics()\n except Exception as ex:\n self.log.error(\"Error when fetching metrics from %s: %s\", config.stats_endpoint, ex)\n return\n\n tags = [\"stats_endpoint:{0}\".format(config.stats_endpoint)]\n\n for action, metrics in iteritems(all_metrics):\n method = getattr(self, action)\n\n for name, value in iteritems(metrics):\n if not name.startswith(self.METRIC_PREFIX) and normalize_metrics:\n name = self.METRIC_PREFIX + name\n method(name, value, tags)\n", "path": "filebeat/datadog_checks/filebeat/filebeat.py"}]}
3,690
286
gh_patches_debug_33154
rasdani/github-patches
git_diff
scikit-image__scikit-image-2509
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Inconsistent cython version requirements ## Description setup.py requires cython >= 0.21 while skimage/_build.py requires >= 0.23. Also, the two checks seem redundant. </issue> <code> [start of setup.py] 1 #! /usr/bin/env python 2 3 descr = """Image Processing SciKit 4 5 Image processing algorithms for SciPy, including IO, morphology, filtering, 6 warping, color manipulation, object detection, etc. 7 8 Please refer to the online documentation at 9 http://scikit-image.org/ 10 """ 11 12 DISTNAME = 'scikit-image' 13 DESCRIPTION = 'Image processing routines for SciPy' 14 LONG_DESCRIPTION = descr 15 MAINTAINER = 'Stefan van der Walt' 16 MAINTAINER_EMAIL = '[email protected]' 17 URL = 'http://scikit-image.org' 18 LICENSE = 'Modified BSD' 19 DOWNLOAD_URL = 'http://github.com/scikit-image/scikit-image' 20 21 import os 22 import sys 23 24 import setuptools 25 from distutils.command.build_py import build_py 26 from distutils.command.sdist import sdist 27 28 if sys.version_info[0] < 3: 29 import __builtin__ as builtins 30 else: 31 import builtins 32 33 # This is a bit (!) hackish: we are setting a global variable so that the main 34 # skimage __init__ can detect if it is being loaded by the setup routine, to 35 # avoid attempting to load components that aren't built yet: 36 # the numpy distutils extensions that are used by scikit-image to recursively 37 # build the compiled extensions in sub-packages is based on the Python import 38 # machinery. 39 builtins.__SKIMAGE_SETUP__ = True 40 41 42 with open('skimage/__init__.py') as fid: 43 for line in fid: 44 if line.startswith('__version__'): 45 VERSION = line.strip().split()[-1][1:-1] 46 break 47 48 with open('requirements.txt') as fid: 49 INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l] 50 51 # requirements for those browsing PyPI 52 REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES] 53 REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES] 54 REQUIRES = [r.replace('[array]', '') for r in REQUIRES] 55 56 57 def configuration(parent_package='', top_path=None): 58 if os.path.exists('MANIFEST'): 59 os.remove('MANIFEST') 60 61 from numpy.distutils.misc_util import Configuration 62 config = Configuration(None, parent_package, top_path) 63 64 config.set_options( 65 ignore_setup_xxx_py=True, 66 assume_default_configuration=True, 67 delegate_options_to_subpackages=True, 68 quiet=True) 69 70 config.add_subpackage('skimage') 71 config.add_data_dir('skimage/data') 72 73 return config 74 75 76 if __name__ == "__main__": 77 try: 78 from numpy.distutils.core import setup 79 extra = {'configuration': configuration} 80 # Do not try and upgrade larger dependencies 81 for lib in ['scipy', 'numpy', 'matplotlib', 'pillow']: 82 try: 83 __import__(lib) 84 INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES 85 if lib not in i] 86 except ImportError: 87 pass 88 except ImportError: 89 if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or 90 sys.argv[1] in ('--help-commands', 91 '--version', 92 'clean', 93 'egg_info', 94 'install_egg_info', 95 'rotate')): 96 # For these actions, NumPy is not required. 97 # 98 # They are required to succeed without Numpy for example when 99 # pip is used to install scikit-image when Numpy is not yet 100 # present in the system. 101 from setuptools import setup 102 extra = {} 103 else: 104 print('To install scikit-image from source, you will need numpy.\n' + 105 'Install numpy with pip:\n' + 106 'pip install numpy\n' 107 'Or use your operating system package manager. For more\n' + 108 'details, see http://scikit-image.org/docs/stable/install.html') 109 sys.exit(1) 110 111 setup( 112 name=DISTNAME, 113 description=DESCRIPTION, 114 long_description=LONG_DESCRIPTION, 115 maintainer=MAINTAINER, 116 maintainer_email=MAINTAINER_EMAIL, 117 url=URL, 118 license=LICENSE, 119 download_url=DOWNLOAD_URL, 120 version=VERSION, 121 122 classifiers=[ 123 'Development Status :: 4 - Beta', 124 'Environment :: Console', 125 'Intended Audience :: Developers', 126 'Intended Audience :: Science/Research', 127 'License :: OSI Approved :: BSD License', 128 'Programming Language :: C', 129 'Programming Language :: Python', 130 'Programming Language :: Python :: 3', 131 'Topic :: Scientific/Engineering', 132 'Operating System :: Microsoft :: Windows', 133 'Operating System :: POSIX', 134 'Operating System :: Unix', 135 'Operating System :: MacOS', 136 ], 137 install_requires=INSTALL_REQUIRES, 138 # install cython when running setup.py (source install) 139 setup_requires=['cython>=0.21'], 140 requires=REQUIRES, 141 packages=setuptools.find_packages(exclude=['doc']), 142 include_package_data=True, 143 zip_safe=False, # the package can run out of an .egg file 144 145 entry_points={ 146 'console_scripts': ['skivi = skimage.scripts.skivi:main'], 147 }, 148 149 cmdclass={'build_py': build_py, 150 'sdist': sdist}, 151 **extra 152 ) 153 [end of setup.py] [start of skimage/_build.py] 1 import sys 2 import os 3 import hashlib 4 from distutils.version import LooseVersion 5 6 # WindowsError is not defined on unix systems 7 try: 8 WindowsError 9 except NameError: 10 class WindowsError(Exception): 11 pass 12 13 14 def cython(pyx_files, working_path=''): 15 """Use Cython to convert the given files to C. 16 17 Parameters 18 ---------- 19 pyx_files : list of str 20 The input .pyx files. 21 22 """ 23 # Do not build cython files if target is clean 24 if len(sys.argv) >= 2 and sys.argv[1] == 'clean': 25 return 26 27 try: 28 from Cython import __version__ 29 if LooseVersion(__version__) < '0.23': 30 raise RuntimeError('Cython >= 0.23 needed to build scikit-image') 31 32 from Cython.Build import cythonize 33 except ImportError: 34 # If cython is not found, we do nothing -- the build will make use of 35 # the distributed .c files 36 print("Cython not found; falling back to pre-built %s" 37 % " ".join([f.replace('.pyx.in', 'c').replace('.pyx', '.c') 38 for f in pyx_files])) 39 else: 40 for pyxfile in [os.path.join(working_path, f) for f in pyx_files]: 41 42 # if the .pyx file stayed the same, we don't need to recompile 43 if not _changed(pyxfile): 44 continue 45 46 if pyxfile.endswith('.pyx.in'): 47 process_tempita_pyx(pyxfile) 48 pyxfile = pyxfile.replace('.pyx.in', '.pyx') 49 50 cythonize(pyxfile) 51 52 53 def _md5sum(f): 54 m = hashlib.new('md5') 55 while True: 56 # Hash one 8096 byte block at a time 57 d = f.read(8096) 58 if not d: 59 break 60 m.update(d) 61 return m.hexdigest() 62 63 64 def _changed(filename): 65 """Compare the hash of a Cython file to the cached hash value on disk. 66 67 """ 68 filename_cache = filename + '.md5' 69 70 try: 71 md5_cached = open(filename_cache, 'rb').read() 72 except IOError: 73 md5_cached = '0' 74 75 with open(filename, 'rb') as f: 76 md5_new = _md5sum(f) 77 78 with open(filename_cache, 'wb') as cf: 79 cf.write(md5_new.encode('utf-8')) 80 81 return md5_cached != md5_new.encode('utf-8') 82 83 84 def process_tempita_pyx(fromfile): 85 try: 86 try: 87 from Cython import Tempita as tempita 88 except ImportError: 89 import tempita 90 except ImportError: 91 raise Exception('Building requires Tempita: ' 92 'pip install --user Tempita') 93 template = tempita.Template.from_filename(fromfile, 94 encoding=sys.getdefaultencoding()) 95 pyxcontent = template.substitute() 96 if not fromfile.endswith('.pyx.in'): 97 raise ValueError("Unexpected extension of %s." % fromfile) 98 99 pyxfile = os.path.splitext(fromfile)[0] # split off the .in ending 100 with open(pyxfile, "w") as f: 101 f.write(pyxcontent) 102 [end of skimage/_build.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -135,8 +135,6 @@ 'Operating System :: MacOS', ], install_requires=INSTALL_REQUIRES, - # install cython when running setup.py (source install) - setup_requires=['cython>=0.21'], requires=REQUIRES, packages=setuptools.find_packages(exclude=['doc']), include_package_data=True, diff --git a/skimage/_build.py b/skimage/_build.py --- a/skimage/_build.py +++ b/skimage/_build.py @@ -3,6 +3,8 @@ import hashlib from distutils.version import LooseVersion +CYTHON_VERSION = '0.23' + # WindowsError is not defined on unix systems try: WindowsError @@ -26,16 +28,21 @@ try: from Cython import __version__ - if LooseVersion(__version__) < '0.23': - raise RuntimeError('Cython >= 0.23 needed to build scikit-image') + if LooseVersion(__version__) < CYTHON_VERSION: + raise RuntimeError('Cython >= %s needed to build scikit-image' % CYTHON_VERSION) from Cython.Build import cythonize except ImportError: - # If cython is not found, we do nothing -- the build will make use of - # the distributed .c files - print("Cython not found; falling back to pre-built %s" - % " ".join([f.replace('.pyx.in', 'c').replace('.pyx', '.c') - for f in pyx_files])) + # If cython is not found, the build will make use of + # the distributed .c files if present + c_files = [f.replace('.pyx.in', '.c').replace('.pyx', '.c') for f in pyx_files] + for cfile in [os.path.join(working_path, f) for f in c_files]: + if not os.path.isfile(cfile): + raise RuntimeError('Cython >= %s is required to build scikit-image from git checkout' \ + % CYTHON_VERSION) + + print("Cython >= %s not found; falling back to pre-built %s" \ + % (CYTHON_VERSION, " ".join(c_files))) else: for pyxfile in [os.path.join(working_path, f) for f in pyx_files]:
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -135,8 +135,6 @@\n 'Operating System :: MacOS',\n ],\n install_requires=INSTALL_REQUIRES,\n- # install cython when running setup.py (source install)\n- setup_requires=['cython>=0.21'],\n requires=REQUIRES,\n packages=setuptools.find_packages(exclude=['doc']),\n include_package_data=True,\ndiff --git a/skimage/_build.py b/skimage/_build.py\n--- a/skimage/_build.py\n+++ b/skimage/_build.py\n@@ -3,6 +3,8 @@\n import hashlib\n from distutils.version import LooseVersion\n \n+CYTHON_VERSION = '0.23'\n+\n # WindowsError is not defined on unix systems\n try:\n WindowsError\n@@ -26,16 +28,21 @@\n \n try:\n from Cython import __version__\n- if LooseVersion(__version__) < '0.23':\n- raise RuntimeError('Cython >= 0.23 needed to build scikit-image')\n+ if LooseVersion(__version__) < CYTHON_VERSION:\n+ raise RuntimeError('Cython >= %s needed to build scikit-image' % CYTHON_VERSION)\n \n from Cython.Build import cythonize\n except ImportError:\n- # If cython is not found, we do nothing -- the build will make use of\n- # the distributed .c files\n- print(\"Cython not found; falling back to pre-built %s\"\n- % \" \".join([f.replace('.pyx.in', 'c').replace('.pyx', '.c')\n- for f in pyx_files]))\n+ # If cython is not found, the build will make use of\n+ # the distributed .c files if present\n+ c_files = [f.replace('.pyx.in', '.c').replace('.pyx', '.c') for f in pyx_files]\n+ for cfile in [os.path.join(working_path, f) for f in c_files]:\n+ if not os.path.isfile(cfile):\n+ raise RuntimeError('Cython >= %s is required to build scikit-image from git checkout' \\\n+ % CYTHON_VERSION)\n+\n+ print(\"Cython >= %s not found; falling back to pre-built %s\" \\\n+ % (CYTHON_VERSION, \" \".join(c_files)))\n else:\n for pyxfile in [os.path.join(working_path, f) for f in pyx_files]:\n", "issue": "Inconsistent cython version requirements\n## Description\n\nsetup.py requires cython >= 0.21 while skimage/_build.py requires >= 0.23. Also, the two checks seem redundant.\n\n", "before_files": [{"content": "#! /usr/bin/env python\n\ndescr = \"\"\"Image Processing SciKit\n\nImage processing algorithms for SciPy, including IO, morphology, filtering,\nwarping, color manipulation, object detection, etc.\n\nPlease refer to the online documentation at\nhttp://scikit-image.org/\n\"\"\"\n\nDISTNAME = 'scikit-image'\nDESCRIPTION = 'Image processing routines for SciPy'\nLONG_DESCRIPTION = descr\nMAINTAINER = 'Stefan van der Walt'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'http://scikit-image.org'\nLICENSE = 'Modified BSD'\nDOWNLOAD_URL = 'http://github.com/scikit-image/scikit-image'\n\nimport os\nimport sys\n\nimport setuptools\nfrom distutils.command.build_py import build_py\nfrom distutils.command.sdist import sdist\n\nif sys.version_info[0] < 3:\n import __builtin__ as builtins\nelse:\n import builtins\n\n# This is a bit (!) hackish: we are setting a global variable so that the main\n# skimage __init__ can detect if it is being loaded by the setup routine, to\n# avoid attempting to load components that aren't built yet:\n# the numpy distutils extensions that are used by scikit-image to recursively\n# build the compiled extensions in sub-packages is based on the Python import\n# machinery.\nbuiltins.__SKIMAGE_SETUP__ = True\n\n\nwith open('skimage/__init__.py') as fid:\n for line in fid:\n if line.startswith('__version__'):\n VERSION = line.strip().split()[-1][1:-1]\n break\n\nwith open('requirements.txt') as fid:\n INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]\n\n# requirements for those browsing PyPI\nREQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]\nREQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]\nREQUIRES = [r.replace('[array]', '') for r in REQUIRES]\n\n\ndef configuration(parent_package='', top_path=None):\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n from numpy.distutils.misc_util import Configuration\n config = Configuration(None, parent_package, top_path)\n\n config.set_options(\n ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n\n config.add_subpackage('skimage')\n config.add_data_dir('skimage/data')\n\n return config\n\n\nif __name__ == \"__main__\":\n try:\n from numpy.distutils.core import setup\n extra = {'configuration': configuration}\n # Do not try and upgrade larger dependencies\n for lib in ['scipy', 'numpy', 'matplotlib', 'pillow']:\n try:\n __import__(lib)\n INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES\n if lib not in i]\n except ImportError:\n pass\n except ImportError:\n if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or\n sys.argv[1] in ('--help-commands',\n '--version',\n 'clean',\n 'egg_info',\n 'install_egg_info',\n 'rotate')):\n # For these actions, NumPy is not required.\n #\n # They are required to succeed without Numpy for example when\n # pip is used to install scikit-image when Numpy is not yet\n # present in the system.\n from setuptools import setup\n extra = {}\n else:\n print('To install scikit-image from source, you will need numpy.\\n' +\n 'Install numpy with pip:\\n' +\n 'pip install numpy\\n'\n 'Or use your operating system package manager. For more\\n' +\n 'details, see http://scikit-image.org/docs/stable/install.html')\n sys.exit(1)\n\n setup(\n name=DISTNAME,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n url=URL,\n license=LICENSE,\n download_url=DOWNLOAD_URL,\n version=VERSION,\n\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n install_requires=INSTALL_REQUIRES,\n # install cython when running setup.py (source install)\n setup_requires=['cython>=0.21'],\n requires=REQUIRES,\n packages=setuptools.find_packages(exclude=['doc']),\n include_package_data=True,\n zip_safe=False, # the package can run out of an .egg file\n\n entry_points={\n 'console_scripts': ['skivi = skimage.scripts.skivi:main'],\n },\n\n cmdclass={'build_py': build_py,\n 'sdist': sdist},\n **extra\n )\n", "path": "setup.py"}, {"content": "import sys\nimport os\nimport hashlib\nfrom distutils.version import LooseVersion\n\n# WindowsError is not defined on unix systems\ntry:\n WindowsError\nexcept NameError:\n class WindowsError(Exception):\n pass\n\n\ndef cython(pyx_files, working_path=''):\n \"\"\"Use Cython to convert the given files to C.\n\n Parameters\n ----------\n pyx_files : list of str\n The input .pyx files.\n\n \"\"\"\n # Do not build cython files if target is clean\n if len(sys.argv) >= 2 and sys.argv[1] == 'clean':\n return\n\n try:\n from Cython import __version__\n if LooseVersion(__version__) < '0.23':\n raise RuntimeError('Cython >= 0.23 needed to build scikit-image')\n\n from Cython.Build import cythonize\n except ImportError:\n # If cython is not found, we do nothing -- the build will make use of\n # the distributed .c files\n print(\"Cython not found; falling back to pre-built %s\"\n % \" \".join([f.replace('.pyx.in', 'c').replace('.pyx', '.c')\n for f in pyx_files]))\n else:\n for pyxfile in [os.path.join(working_path, f) for f in pyx_files]:\n\n # if the .pyx file stayed the same, we don't need to recompile\n if not _changed(pyxfile):\n continue\n\n if pyxfile.endswith('.pyx.in'):\n process_tempita_pyx(pyxfile)\n pyxfile = pyxfile.replace('.pyx.in', '.pyx')\n\n cythonize(pyxfile)\n\n\ndef _md5sum(f):\n m = hashlib.new('md5')\n while True:\n # Hash one 8096 byte block at a time\n d = f.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\n\ndef _changed(filename):\n \"\"\"Compare the hash of a Cython file to the cached hash value on disk.\n\n \"\"\"\n filename_cache = filename + '.md5'\n\n try:\n md5_cached = open(filename_cache, 'rb').read()\n except IOError:\n md5_cached = '0'\n\n with open(filename, 'rb') as f:\n md5_new = _md5sum(f)\n\n with open(filename_cache, 'wb') as cf:\n cf.write(md5_new.encode('utf-8'))\n\n return md5_cached != md5_new.encode('utf-8')\n\n\ndef process_tempita_pyx(fromfile):\n try:\n try:\n from Cython import Tempita as tempita\n except ImportError:\n import tempita\n except ImportError:\n raise Exception('Building requires Tempita: '\n 'pip install --user Tempita')\n template = tempita.Template.from_filename(fromfile,\n encoding=sys.getdefaultencoding())\n pyxcontent = template.substitute()\n if not fromfile.endswith('.pyx.in'):\n raise ValueError(\"Unexpected extension of %s.\" % fromfile)\n\n pyxfile = os.path.splitext(fromfile)[0] # split off the .in ending\n with open(pyxfile, \"w\") as f:\n f.write(pyxcontent)\n", "path": "skimage/_build.py"}]}
2,998
556
gh_patches_debug_395
rasdani/github-patches
git_diff
wright-group__WrightTools-640
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CITATION text file not distributed </issue> <code> [start of setup.py] 1 #! /usr/bin/env python3 2 3 import os 4 from setuptools import setup, find_packages 5 6 7 here = os.path.abspath(os.path.dirname(__file__)) 8 9 10 def read(fname): 11 return open(os.path.join(here, fname)).read() 12 13 14 extra_files = { 15 "WrightTools": [ 16 "datasets", 17 "datasets/*", 18 "datasets/*/*", 19 "datasets/*/*/*", 20 "datasets/*/*/*/*", 21 "VERSION", 22 "WT5_VERSION", 23 ] 24 } 25 26 with open(os.path.join(here, "WrightTools", "VERSION")) as version_file: 27 version = version_file.read().strip() 28 29 setup( 30 name="WrightTools", 31 packages=find_packages(exclude=("tests", "tests.*")), 32 package_data=extra_files, 33 python_requires=">=3.5", 34 setup_requires=["pytest-runner"], 35 tests_require=[ 36 "pytest", 37 "pytest-cov", 38 "sphinx==1.6.5", 39 "sphinx-gallery==0.1.12", 40 "sphinx-rtd-theme", 41 ], 42 install_requires=[ 43 "h5py", 44 "imageio", 45 "matplotlib>=2.0", 46 "numexpr", 47 "numpy", 48 "python-dateutil", 49 "pytz", 50 "scipy", 51 "tidy_headers", 52 ], 53 extras_require={ 54 "docs": ["sphinx-gallery==0.1.12"], 55 "dev": ["black", "pre-commit", "pydocstyle"], 56 }, 57 version=version, 58 description="Tools for loading, processing, and plotting multidimensional spectroscopy data.", 59 long_description=read("README.rst"), 60 author="WrightTools Developers", 61 license="MIT", 62 url="http://wright.tools", 63 keywords="spectroscopy science multidimensional visualization", 64 classifiers=[ 65 "Development Status :: 5 - Production/Stable", 66 "Intended Audience :: Science/Research", 67 "License :: OSI Approved :: MIT License", 68 "Natural Language :: English", 69 "Programming Language :: Python :: 3", 70 "Programming Language :: Python :: 3.5", 71 "Programming Language :: Python :: 3.6", 72 "Topic :: Scientific/Engineering", 73 ], 74 ) 75 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -18,6 +18,7 @@ "datasets/*/*", "datasets/*/*/*", "datasets/*/*/*/*", + "CITATION", "VERSION", "WT5_VERSION", ]
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,6 +18,7 @@\n \"datasets/*/*\",\n \"datasets/*/*/*\",\n \"datasets/*/*/*/*\",\n+ \"CITATION\",\n \"VERSION\",\n \"WT5_VERSION\",\n ]\n", "issue": "CITATION text file not distributed\n\n", "before_files": [{"content": "#! /usr/bin/env python3\n\nimport os\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(fname):\n return open(os.path.join(here, fname)).read()\n\n\nextra_files = {\n \"WrightTools\": [\n \"datasets\",\n \"datasets/*\",\n \"datasets/*/*\",\n \"datasets/*/*/*\",\n \"datasets/*/*/*/*\",\n \"VERSION\",\n \"WT5_VERSION\",\n ]\n}\n\nwith open(os.path.join(here, \"WrightTools\", \"VERSION\")) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name=\"WrightTools\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n package_data=extra_files,\n python_requires=\">=3.5\",\n setup_requires=[\"pytest-runner\"],\n tests_require=[\n \"pytest\",\n \"pytest-cov\",\n \"sphinx==1.6.5\",\n \"sphinx-gallery==0.1.12\",\n \"sphinx-rtd-theme\",\n ],\n install_requires=[\n \"h5py\",\n \"imageio\",\n \"matplotlib>=2.0\",\n \"numexpr\",\n \"numpy\",\n \"python-dateutil\",\n \"pytz\",\n \"scipy\",\n \"tidy_headers\",\n ],\n extras_require={\n \"docs\": [\"sphinx-gallery==0.1.12\"],\n \"dev\": [\"black\", \"pre-commit\", \"pydocstyle\"],\n },\n version=version,\n description=\"Tools for loading, processing, and plotting multidimensional spectroscopy data.\",\n long_description=read(\"README.rst\"),\n author=\"WrightTools Developers\",\n license=\"MIT\",\n url=\"http://wright.tools\",\n keywords=\"spectroscopy science multidimensional visualization\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering\",\n ],\n)\n", "path": "setup.py"}]}
1,155
72
gh_patches_debug_8858
rasdani/github-patches
git_diff
acl-org__acl-anthology-447
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Volume page aliases generated incorrectly The Volume page Spoken Language Translation [here](https://aclweb.org/anthology/volumes/spoken-language-translation/) does not exist. Same with the [Tutorials volume](https://aclweb.org/anthology/volumes/tutorials/). I couldn't concoct any other volume names to test out, but both pages appear on Google. _Originally posted by @aryamccarthy in https://github.com/acl-org/acl-anthology/issues/170#issuecomment-507788799_ </issue> <code> [start of bin/create_hugo_pages.py] 1 #!/usr/bin/env python3 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright 2019 Marcel Bollmann <[email protected]> 5 # 6 # Licensed under the Apache License, Version 2.0 (the "License"); 7 # you may not use this file except in compliance with the License. 8 # You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, software 13 # distributed under the License is distributed on an "AS IS" BASIS, 14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 # See the License for the specific language governing permissions and 16 # limitations under the License. 17 18 """Usage: create_hugo_pages.py [--dir=DIR] [-c] [--debug] 19 20 Creates page stubs for the full anthology based on the YAML data files. 21 22 This script can only be run after create_hugo_yaml.py! 23 24 Options: 25 --dir=DIR Hugo project directory. [default: {scriptdir}/../build/] 26 --debug Output debug-level log messages. 27 -c, --clean Delete existing files in target directory before generation. 28 -h, --help Display this helpful text. 29 """ 30 31 from docopt import docopt 32 from glob import glob 33 from slugify import slugify 34 from tqdm import tqdm 35 import logging as log 36 import os 37 import shutil 38 import yaml 39 40 try: 41 from yaml import CLoader as Loader 42 except ImportError: 43 from yaml import Loader 44 45 from anthology.utils import SeverityTracker 46 47 48 def check_directory(cdir, clean=False): 49 if not os.path.isdir(cdir) and not os.path.exists(cdir): 50 os.mkdir(cdir) 51 return True 52 entries = os.listdir(cdir) 53 if "_index.md" in entries: 54 entries.remove("_index.md") 55 if entries and not clean: 56 log.critical("Directory already exists and has content files: {}".format(cdir)) 57 log.info( 58 "Call this script with the -c/--clean flag to automatically DELETE existing files" 59 ) 60 return False 61 for entry in entries: 62 entry = "{}/{}".format(cdir, entry) 63 if os.path.isdir(entry): 64 shutil.rmtree(entry) 65 else: 66 os.remove(entry) 67 return True 68 69 70 def create_papers(srcdir, clean=False): 71 """Creates page stubs for all papers in the Anthology.""" 72 log.info("Creating stubs for papers...") 73 if not check_directory("{}/content/papers".format(srcdir), clean=clean): 74 return 75 76 # Go through all paper volumes 77 for yamlfile in tqdm(glob("{}/data/papers/*.yaml".format(srcdir))): 78 log.debug("Processing {}".format(yamlfile)) 79 with open(yamlfile, "r") as f: 80 data = yaml.load(f, Loader=Loader) 81 # Create a paper stub for each entry in the volume 82 for anthology_id, entry in data.items(): 83 paper_dir = "{}/content/papers/{}/{}".format( 84 srcdir, anthology_id[0], anthology_id[:3] 85 ) 86 if not os.path.exists(paper_dir): 87 os.makedirs(paper_dir) 88 with open("{}/{}.md".format(paper_dir, anthology_id), "w") as f: 89 print("---", file=f) 90 yaml.dump( 91 {"anthology_id": anthology_id, "title": entry["title"]}, 92 default_flow_style=False, 93 stream=f, 94 ) 95 print("---", file=f) 96 97 98 def create_volumes(srcdir, clean=False): 99 """Creates page stubs for all proceedings volumes in the Anthology.""" 100 log.info("Creating stubs for volumes...") 101 if not check_directory("{}/content/volumes".format(srcdir), clean=clean): 102 return 103 104 yamlfile = "{}/data/volumes.yaml".format(srcdir) 105 log.debug("Processing {}".format(yamlfile)) 106 with open(yamlfile, "r") as f: 107 data = yaml.load(f, Loader=Loader) 108 # Create a paper stub for each proceedings volume 109 for anthology_id, entry in data.items(): 110 with open("{}/content/volumes/{}.md".format(srcdir, anthology_id), "w") as f: 111 print("---", file=f) 112 yaml.dump( 113 { 114 "anthology_id": anthology_id, 115 "title": entry["title"], 116 "aliases": [ 117 slugify(entry["title"]), 118 "/papers/{}/{}/{}/".format( 119 anthology_id[0], anthology_id[:3], anthology_id 120 ), 121 ], 122 }, 123 default_flow_style=False, 124 stream=f, 125 ) 126 print("---", file=f) 127 128 return data 129 130 131 def create_people(srcdir, clean=False): 132 """Creates page stubs for all authors/editors in the Anthology.""" 133 log.info("Creating stubs for people...") 134 if not check_directory("{}/content/people".format(srcdir), clean=clean): 135 return 136 137 for yamlfile in tqdm(glob("{}/data/people/*.yaml".format(srcdir))): 138 log.debug("Processing {}".format(yamlfile)) 139 with open(yamlfile, "r") as f: 140 data = yaml.load(f, Loader=Loader) 141 # Create a page stub for each person 142 for name, entry in data.items(): 143 person_dir = "{}/content/people/{}".format(srcdir, name[0]) 144 if not os.path.exists(person_dir): 145 os.makedirs(person_dir) 146 yaml_data = { 147 "name": name, 148 "title": entry["full"], 149 "lastname": entry["last"], 150 } 151 with open("{}/{}.md".format(person_dir, name), "w") as f: 152 print("---", file=f) 153 # "lastname" is dumped to allow sorting by it in Hugo 154 yaml.dump(yaml_data, default_flow_style=False, stream=f) 155 print("---", file=f) 156 157 return data 158 159 160 def create_venues_and_events(srcdir, clean=False): 161 """Creates page stubs for all venues and events in the Anthology.""" 162 yamlfile = "{}/data/venues.yaml".format(srcdir) 163 log.debug("Processing {}".format(yamlfile)) 164 with open(yamlfile, "r") as f: 165 data = yaml.load(f, Loader=Loader) 166 167 log.info("Creating stubs for venues...") 168 if not check_directory("{}/content/venues".format(srcdir), clean=clean): 169 return 170 # Create a paper stub for each venue (e.g. ACL) 171 for venue, venue_data in data.items(): 172 venue_str = venue_data["slug"] 173 with open("{}/content/venues/{}.md".format(srcdir, venue_str), "w") as f: 174 print("---", file=f) 175 yaml_data = {"venue": venue, "title": venue_data["name"]} 176 if venue_data["is_toplevel"]: 177 main_letter = venue_data["main_letter"] 178 yaml_data["aliases"] = ["/papers/{}/".format(main_letter)] 179 yaml.dump(yaml_data, default_flow_style=False, stream=f) 180 print("---", file=f) 181 182 log.info("Creating stubs for events...") 183 if not check_directory("{}/content/events".format(srcdir), clean=clean): 184 return 185 # Create a paper stub for each event (= venue + year, e.g. ACL 2018) 186 for venue, venue_data in data.items(): 187 venue_str = venue_data["slug"] 188 for year in venue_data["volumes_by_year"]: 189 with open( 190 "{}/content/events/{}-{}.md".format(srcdir, venue_str, year), "w" 191 ) as f: 192 print("---", file=f) 193 yaml_data = { 194 "venue": venue, 195 "year": year, 196 "title": "{} ({})".format(venue_data["name"], year), 197 } 198 if venue_data["is_toplevel"]: 199 main_letter = venue_data["main_letter"] 200 main_prefix = main_letter + year[-2:] # e.g., P05 201 yaml_data["aliases"] = [ 202 "/papers/{}/{}/".format(main_letter, main_prefix) 203 ] 204 yaml.dump(yaml_data, default_flow_style=False, stream=f) 205 print("---", file=f) 206 207 208 def create_sigs(srcdir, clean=False): 209 """Creates page stubs for all SIGs in the Anthology.""" 210 yamlfile = "{}/data/sigs.yaml".format(srcdir) 211 log.debug("Processing {}".format(yamlfile)) 212 with open(yamlfile, "r") as f: 213 data = yaml.load(f, Loader=Loader) 214 215 log.info("Creating stubs for SIGs...") 216 if not check_directory("{}/content/sigs".format(srcdir), clean=clean): 217 return 218 # Create a paper stub for each SIGS (e.g. SIGMORPHON) 219 for sig, sig_data in data.items(): 220 sig_str = sig_data["slug"] 221 with open("{}/content/sigs/{}.md".format(srcdir, sig_str), "w") as f: 222 print("---", file=f) 223 yaml.dump( 224 { 225 "acronym": sig, 226 "short_acronym": sig[3:] if sig.startswith("SIG") else sig, 227 "title": sig_data["name"], 228 }, 229 default_flow_style=False, 230 stream=f, 231 ) 232 print("---", file=f) 233 234 235 if __name__ == "__main__": 236 args = docopt(__doc__) 237 scriptdir = os.path.dirname(os.path.abspath(__file__)) 238 if "{scriptdir}" in args["--dir"]: 239 args["--dir"] = args["--dir"].format(scriptdir=scriptdir) 240 dir_ = os.path.abspath(args["--dir"]) 241 242 log_level = log.DEBUG if args["--debug"] else log.INFO 243 log.basicConfig(format="%(levelname)-8s %(message)s", level=log_level) 244 tracker = SeverityTracker() 245 log.getLogger().addHandler(tracker) 246 247 create_papers(dir_, clean=args["--clean"]) 248 create_volumes(dir_, clean=args["--clean"]) 249 create_people(dir_, clean=args["--clean"]) 250 create_venues_and_events(dir_, clean=args["--clean"]) 251 create_sigs(dir_, clean=args["--clean"]) 252 253 if tracker.highest >= log.ERROR: 254 exit(1) 255 [end of bin/create_hugo_pages.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bin/create_hugo_pages.py b/bin/create_hugo_pages.py --- a/bin/create_hugo_pages.py +++ b/bin/create_hugo_pages.py @@ -114,7 +114,7 @@ "anthology_id": anthology_id, "title": entry["title"], "aliases": [ - slugify(entry["title"]), + "/volumes/{}/".format(slugify(entry["title"])), "/papers/{}/{}/{}/".format( anthology_id[0], anthology_id[:3], anthology_id ),
{"golden_diff": "diff --git a/bin/create_hugo_pages.py b/bin/create_hugo_pages.py\n--- a/bin/create_hugo_pages.py\n+++ b/bin/create_hugo_pages.py\n@@ -114,7 +114,7 @@\n \"anthology_id\": anthology_id,\n \"title\": entry[\"title\"],\n \"aliases\": [\n- slugify(entry[\"title\"]),\n+ \"/volumes/{}/\".format(slugify(entry[\"title\"])),\n \"/papers/{}/{}/{}/\".format(\n anthology_id[0], anthology_id[:3], anthology_id\n ),\n", "issue": "Volume page aliases generated incorrectly\nThe Volume page Spoken Language Translation [here](https://aclweb.org/anthology/volumes/spoken-language-translation/) does not exist. Same with the [Tutorials volume](https://aclweb.org/anthology/volumes/tutorials/). I couldn't concoct any other volume names to test out, but both pages appear on Google.\r\n\r\n_Originally posted by @aryamccarthy in https://github.com/acl-org/acl-anthology/issues/170#issuecomment-507788799_\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Usage: create_hugo_pages.py [--dir=DIR] [-c] [--debug]\n\nCreates page stubs for the full anthology based on the YAML data files.\n\nThis script can only be run after create_hugo_yaml.py!\n\nOptions:\n --dir=DIR Hugo project directory. [default: {scriptdir}/../build/]\n --debug Output debug-level log messages.\n -c, --clean Delete existing files in target directory before generation.\n -h, --help Display this helpful text.\n\"\"\"\n\nfrom docopt import docopt\nfrom glob import glob\nfrom slugify import slugify\nfrom tqdm import tqdm\nimport logging as log\nimport os\nimport shutil\nimport yaml\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\nfrom anthology.utils import SeverityTracker\n\n\ndef check_directory(cdir, clean=False):\n if not os.path.isdir(cdir) and not os.path.exists(cdir):\n os.mkdir(cdir)\n return True\n entries = os.listdir(cdir)\n if \"_index.md\" in entries:\n entries.remove(\"_index.md\")\n if entries and not clean:\n log.critical(\"Directory already exists and has content files: {}\".format(cdir))\n log.info(\n \"Call this script with the -c/--clean flag to automatically DELETE existing files\"\n )\n return False\n for entry in entries:\n entry = \"{}/{}\".format(cdir, entry)\n if os.path.isdir(entry):\n shutil.rmtree(entry)\n else:\n os.remove(entry)\n return True\n\n\ndef create_papers(srcdir, clean=False):\n \"\"\"Creates page stubs for all papers in the Anthology.\"\"\"\n log.info(\"Creating stubs for papers...\")\n if not check_directory(\"{}/content/papers\".format(srcdir), clean=clean):\n return\n\n # Go through all paper volumes\n for yamlfile in tqdm(glob(\"{}/data/papers/*.yaml\".format(srcdir))):\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a paper stub for each entry in the volume\n for anthology_id, entry in data.items():\n paper_dir = \"{}/content/papers/{}/{}\".format(\n srcdir, anthology_id[0], anthology_id[:3]\n )\n if not os.path.exists(paper_dir):\n os.makedirs(paper_dir)\n with open(\"{}/{}.md\".format(paper_dir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\"anthology_id\": anthology_id, \"title\": entry[\"title\"]},\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n\ndef create_volumes(srcdir, clean=False):\n \"\"\"Creates page stubs for all proceedings volumes in the Anthology.\"\"\"\n log.info(\"Creating stubs for volumes...\")\n if not check_directory(\"{}/content/volumes\".format(srcdir), clean=clean):\n return\n\n yamlfile = \"{}/data/volumes.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a paper stub for each proceedings volume\n for anthology_id, entry in data.items():\n with open(\"{}/content/volumes/{}.md\".format(srcdir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\n \"anthology_id\": anthology_id,\n \"title\": entry[\"title\"],\n \"aliases\": [\n slugify(entry[\"title\"]),\n \"/papers/{}/{}/{}/\".format(\n anthology_id[0], anthology_id[:3], anthology_id\n ),\n ],\n },\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n return data\n\n\ndef create_people(srcdir, clean=False):\n \"\"\"Creates page stubs for all authors/editors in the Anthology.\"\"\"\n log.info(\"Creating stubs for people...\")\n if not check_directory(\"{}/content/people\".format(srcdir), clean=clean):\n return\n\n for yamlfile in tqdm(glob(\"{}/data/people/*.yaml\".format(srcdir))):\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a page stub for each person\n for name, entry in data.items():\n person_dir = \"{}/content/people/{}\".format(srcdir, name[0])\n if not os.path.exists(person_dir):\n os.makedirs(person_dir)\n yaml_data = {\n \"name\": name,\n \"title\": entry[\"full\"],\n \"lastname\": entry[\"last\"],\n }\n with open(\"{}/{}.md\".format(person_dir, name), \"w\") as f:\n print(\"---\", file=f)\n # \"lastname\" is dumped to allow sorting by it in Hugo\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n return data\n\n\ndef create_venues_and_events(srcdir, clean=False):\n \"\"\"Creates page stubs for all venues and events in the Anthology.\"\"\"\n yamlfile = \"{}/data/venues.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n\n log.info(\"Creating stubs for venues...\")\n if not check_directory(\"{}/content/venues\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each venue (e.g. ACL)\n for venue, venue_data in data.items():\n venue_str = venue_data[\"slug\"]\n with open(\"{}/content/venues/{}.md\".format(srcdir, venue_str), \"w\") as f:\n print(\"---\", file=f)\n yaml_data = {\"venue\": venue, \"title\": venue_data[\"name\"]}\n if venue_data[\"is_toplevel\"]:\n main_letter = venue_data[\"main_letter\"]\n yaml_data[\"aliases\"] = [\"/papers/{}/\".format(main_letter)]\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n log.info(\"Creating stubs for events...\")\n if not check_directory(\"{}/content/events\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each event (= venue + year, e.g. ACL 2018)\n for venue, venue_data in data.items():\n venue_str = venue_data[\"slug\"]\n for year in venue_data[\"volumes_by_year\"]:\n with open(\n \"{}/content/events/{}-{}.md\".format(srcdir, venue_str, year), \"w\"\n ) as f:\n print(\"---\", file=f)\n yaml_data = {\n \"venue\": venue,\n \"year\": year,\n \"title\": \"{} ({})\".format(venue_data[\"name\"], year),\n }\n if venue_data[\"is_toplevel\"]:\n main_letter = venue_data[\"main_letter\"]\n main_prefix = main_letter + year[-2:] # e.g., P05\n yaml_data[\"aliases\"] = [\n \"/papers/{}/{}/\".format(main_letter, main_prefix)\n ]\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n\ndef create_sigs(srcdir, clean=False):\n \"\"\"Creates page stubs for all SIGs in the Anthology.\"\"\"\n yamlfile = \"{}/data/sigs.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n\n log.info(\"Creating stubs for SIGs...\")\n if not check_directory(\"{}/content/sigs\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each SIGS (e.g. SIGMORPHON)\n for sig, sig_data in data.items():\n sig_str = sig_data[\"slug\"]\n with open(\"{}/content/sigs/{}.md\".format(srcdir, sig_str), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\n \"acronym\": sig,\n \"short_acronym\": sig[3:] if sig.startswith(\"SIG\") else sig,\n \"title\": sig_data[\"name\"],\n },\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--dir\"]:\n args[\"--dir\"] = args[\"--dir\"].format(scriptdir=scriptdir)\n dir_ = os.path.abspath(args[\"--dir\"])\n\n log_level = log.DEBUG if args[\"--debug\"] else log.INFO\n log.basicConfig(format=\"%(levelname)-8s %(message)s\", level=log_level)\n tracker = SeverityTracker()\n log.getLogger().addHandler(tracker)\n\n create_papers(dir_, clean=args[\"--clean\"])\n create_volumes(dir_, clean=args[\"--clean\"])\n create_people(dir_, clean=args[\"--clean\"])\n create_venues_and_events(dir_, clean=args[\"--clean\"])\n create_sigs(dir_, clean=args[\"--clean\"])\n\n if tracker.highest >= log.ERROR:\n exit(1)\n", "path": "bin/create_hugo_pages.py"}]}
3,529
121
gh_patches_debug_24933
rasdani/github-patches
git_diff
urllib3__urllib3-1665
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Multipart mixed responses generate warnings If you read a multipart/mixed response, the connectionpool issues a couple of warnings about defects in the message. I'm not sure what the expected, correct behavior is, but these warnings seem spurious. Stick this perl script somewhere: ``` #!/usr/bin/perl print "Server: Some Server Name\r\n"; print "Content-Type: multipart/mixed; boundary=36eeb8c4e26d842a\r\n"; print "Content-Length: 178\r\n"; print "\r\n\r\n"; print "--36eeb8c4e26d842a\r\n"; print "Content-Type: text/plain\r\n"; print "\r\n"; print "7\r\n"; print "--36eeb8c4e26d842a\r\n"; print "Content-Type: text/plain\r\n"; print "\r\n"; print "9\r\n"; print "--36eeb8c4e26d842a\r\n"; print "Content-Type: text/plain\r\n"; print "\r\n"; print "11\r\n"; print "--36eeb8c4e26d842a--\r\n"; ``` Read it with requests (naturally, you'll have to change the URI to wherever you put the script): ``` import requests, logging logging.basicConfig(level=logging.WARNING) logging.getLogger("requests").setLevel(logging.DEBUG) headers = {'accept': "multipart/mixed"} r = requests.get("http://localhost:8124/cgi-bin/mpm.pl", headers=headers) print(r) ``` The following errors are displayed: ``` DEBUG:requests.packages.urllib3.connectionpool:"GET http://localhost:8124/cgi-bin/mpm.pl HTTP/1.1" 200 178 WARNING:requests.packages.urllib3.connectionpool:Failed to parse headers (url=http://localhost:8888/http://localhost:8124/cgi-bin/mpm.pl): [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: '' Traceback (most recent call last): File "/home/ndw/.virtualenvs/pyapi/lib/python3.4/site-packages/requests-2.8.0-py3.4.egg/requests/packages/urllib3/connectionpool.py", line 390, in _make_request assert_header_parsing(httplib_response.msg) File "/home/ndw/.virtualenvs/pyapi/lib/python3.4/site-packages/requests-2.8.0-py3.4.egg/requests/packages/urllib3/util/response.py", line 58, in assert_header_parsing raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) requests.packages.urllib3.exceptions.HeaderParsingError: [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: '' ``` It took me quite a while to work out that they were spurious (because in real life, the server side that is generating the multipart/mixed is more complicated!) See also kennethreitz/requests#3001 ## </issue> <code> [start of src/urllib3/util/response.py] 1 from __future__ import absolute_import 2 from ..packages.six.moves import http_client as httplib 3 4 from ..exceptions import HeaderParsingError 5 6 7 def is_fp_closed(obj): 8 """ 9 Checks whether a given file-like object is closed. 10 11 :param obj: 12 The file-like object to check. 13 """ 14 15 try: 16 # Check `isclosed()` first, in case Python3 doesn't set `closed`. 17 # GH Issue #928 18 return obj.isclosed() 19 except AttributeError: 20 pass 21 22 try: 23 # Check via the official file-like-object way. 24 return obj.closed 25 except AttributeError: 26 pass 27 28 try: 29 # Check if the object is a container for another file-like object that 30 # gets released on exhaustion (e.g. HTTPResponse). 31 return obj.fp is None 32 except AttributeError: 33 pass 34 35 raise ValueError("Unable to determine whether fp is closed.") 36 37 38 def assert_header_parsing(headers): 39 """ 40 Asserts whether all headers have been successfully parsed. 41 Extracts encountered errors from the result of parsing headers. 42 43 Only works on Python 3. 44 45 :param headers: Headers to verify. 46 :type headers: `httplib.HTTPMessage`. 47 48 :raises urllib3.exceptions.HeaderParsingError: 49 If parsing errors are found. 50 """ 51 52 # This will fail silently if we pass in the wrong kind of parameter. 53 # To make debugging easier add an explicit check. 54 if not isinstance(headers, httplib.HTTPMessage): 55 raise TypeError("expected httplib.Message, got {0}.".format(type(headers))) 56 57 defects = getattr(headers, "defects", None) 58 get_payload = getattr(headers, "get_payload", None) 59 60 unparsed_data = None 61 if get_payload: 62 # get_payload is actually email.message.Message.get_payload; 63 # we're only interested in the result if it's not a multipart message 64 if not headers.is_multipart(): 65 payload = get_payload() 66 67 if isinstance(payload, (bytes, str)): 68 unparsed_data = payload 69 70 if defects or unparsed_data: 71 raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) 72 73 74 def is_response_to_head(response): 75 """ 76 Checks whether the request of a response has been a HEAD-request. 77 Handles the quirks of AppEngine. 78 79 :param conn: 80 :type conn: :class:`httplib.HTTPResponse` 81 """ 82 # FIXME: Can we do this somehow without accessing private httplib _method? 83 method = response._method 84 if isinstance(method, int): # Platform-specific: Appengine 85 return method == 3 86 return method.upper() == "HEAD" 87 [end of src/urllib3/util/response.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py --- a/src/urllib3/util/response.py +++ b/src/urllib3/util/response.py @@ -1,4 +1,5 @@ from __future__ import absolute_import +from email.errors import StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect from ..packages.six.moves import http_client as httplib from ..exceptions import HeaderParsingError @@ -66,6 +67,25 @@ if isinstance(payload, (bytes, str)): unparsed_data = payload + if defects: + # httplib is assuming a response body is available + # when parsing headers even when httplib only sends + # header data to parse_headers() This results in + # defects on multipart responses in particular. + # See: https://github.com/urllib3/urllib3/issues/800 + + # So we ignore the following defects: + # - StartBoundaryNotFoundDefect: + # The claimed start boundary was never found. + # - MultipartInvariantViolationDefect: + # A message claimed to be a multipart but no subparts were found. + defects = [ + defect + for defect in defects + if not isinstance( + defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect) + ) + ] if defects or unparsed_data: raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
{"golden_diff": "diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py\n--- a/src/urllib3/util/response.py\n+++ b/src/urllib3/util/response.py\n@@ -1,4 +1,5 @@\n from __future__ import absolute_import\n+from email.errors import StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect\n from ..packages.six.moves import http_client as httplib\n \n from ..exceptions import HeaderParsingError\n@@ -66,6 +67,25 @@\n \n if isinstance(payload, (bytes, str)):\n unparsed_data = payload\n+ if defects:\n+ # httplib is assuming a response body is available\n+ # when parsing headers even when httplib only sends\n+ # header data to parse_headers() This results in\n+ # defects on multipart responses in particular.\n+ # See: https://github.com/urllib3/urllib3/issues/800\n+\n+ # So we ignore the following defects:\n+ # - StartBoundaryNotFoundDefect:\n+ # The claimed start boundary was never found.\n+ # - MultipartInvariantViolationDefect:\n+ # A message claimed to be a multipart but no subparts were found.\n+ defects = [\n+ defect\n+ for defect in defects\n+ if not isinstance(\n+ defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)\n+ )\n+ ]\n \n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n", "issue": "Multipart mixed responses generate warnings\nIf you read a multipart/mixed response, the connectionpool issues a couple of warnings about defects in the message. I'm not sure what the expected, correct behavior is, but these warnings seem spurious.\n\nStick this perl script somewhere:\n\n```\n#!/usr/bin/perl\n\nprint \"Server: Some Server Name\\r\\n\";\nprint \"Content-Type: multipart/mixed; boundary=36eeb8c4e26d842a\\r\\n\";\nprint \"Content-Length: 178\\r\\n\";\nprint \"\\r\\n\\r\\n\";\nprint \"--36eeb8c4e26d842a\\r\\n\";\nprint \"Content-Type: text/plain\\r\\n\";\nprint \"\\r\\n\";\nprint \"7\\r\\n\";\nprint \"--36eeb8c4e26d842a\\r\\n\";\nprint \"Content-Type: text/plain\\r\\n\";\nprint \"\\r\\n\";\nprint \"9\\r\\n\";\nprint \"--36eeb8c4e26d842a\\r\\n\";\nprint \"Content-Type: text/plain\\r\\n\";\nprint \"\\r\\n\";\nprint \"11\\r\\n\";\nprint \"--36eeb8c4e26d842a--\\r\\n\";\n```\n\nRead it with requests (naturally, you'll have to change the URI to wherever you put the script):\n\n```\nimport requests, logging\n\nlogging.basicConfig(level=logging.WARNING)\nlogging.getLogger(\"requests\").setLevel(logging.DEBUG)\n\nheaders = {'accept': \"multipart/mixed\"}\nr = requests.get(\"http://localhost:8124/cgi-bin/mpm.pl\", headers=headers)\n\nprint(r)\n```\n\nThe following errors are displayed:\n\n```\nDEBUG:requests.packages.urllib3.connectionpool:\"GET http://localhost:8124/cgi-bin/mpm.pl HTTP/1.1\" 200 178\nWARNING:requests.packages.urllib3.connectionpool:Failed to parse headers (url=http://localhost:8888/http://localhost:8124/cgi-bin/mpm.pl): [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: ''\nTraceback (most recent call last):\n File \"/home/ndw/.virtualenvs/pyapi/lib/python3.4/site-packages/requests-2.8.0-py3.4.egg/requests/packages/urllib3/connectionpool.py\", line 390, in _make_request\n assert_header_parsing(httplib_response.msg)\n File \"/home/ndw/.virtualenvs/pyapi/lib/python3.4/site-packages/requests-2.8.0-py3.4.egg/requests/packages/urllib3/util/response.py\", line 58, in assert_header_parsing\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\nrequests.packages.urllib3.exceptions.HeaderParsingError: [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: ''\n```\n\nIt took me quite a while to work out that they were spurious (because in real life, the server side that is generating the multipart/mixed is more complicated!)\n\nSee also kennethreitz/requests#3001\n## \n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom ..packages.six.moves import http_client as httplib\n\nfrom ..exceptions import HeaderParsingError\n\n\ndef is_fp_closed(obj):\n \"\"\"\n Checks whether a given file-like object is closed.\n\n :param obj:\n The file-like object to check.\n \"\"\"\n\n try:\n # Check `isclosed()` first, in case Python3 doesn't set `closed`.\n # GH Issue #928\n return obj.isclosed()\n except AttributeError:\n pass\n\n try:\n # Check via the official file-like-object way.\n return obj.closed\n except AttributeError:\n pass\n\n try:\n # Check if the object is a container for another file-like object that\n # gets released on exhaustion (e.g. HTTPResponse).\n return obj.fp is None\n except AttributeError:\n pass\n\n raise ValueError(\"Unable to determine whether fp is closed.\")\n\n\ndef assert_header_parsing(headers):\n \"\"\"\n Asserts whether all headers have been successfully parsed.\n Extracts encountered errors from the result of parsing headers.\n\n Only works on Python 3.\n\n :param headers: Headers to verify.\n :type headers: `httplib.HTTPMessage`.\n\n :raises urllib3.exceptions.HeaderParsingError:\n If parsing errors are found.\n \"\"\"\n\n # This will fail silently if we pass in the wrong kind of parameter.\n # To make debugging easier add an explicit check.\n if not isinstance(headers, httplib.HTTPMessage):\n raise TypeError(\"expected httplib.Message, got {0}.\".format(type(headers)))\n\n defects = getattr(headers, \"defects\", None)\n get_payload = getattr(headers, \"get_payload\", None)\n\n unparsed_data = None\n if get_payload:\n # get_payload is actually email.message.Message.get_payload;\n # we're only interested in the result if it's not a multipart message\n if not headers.is_multipart():\n payload = get_payload()\n\n if isinstance(payload, (bytes, str)):\n unparsed_data = payload\n\n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n\n\ndef is_response_to_head(response):\n \"\"\"\n Checks whether the request of a response has been a HEAD-request.\n Handles the quirks of AppEngine.\n\n :param conn:\n :type conn: :class:`httplib.HTTPResponse`\n \"\"\"\n # FIXME: Can we do this somehow without accessing private httplib _method?\n method = response._method\n if isinstance(method, int): # Platform-specific: Appengine\n return method == 3\n return method.upper() == \"HEAD\"\n", "path": "src/urllib3/util/response.py"}]}
1,977
347
gh_patches_debug_1056
rasdani/github-patches
git_diff
DDMAL__CantusDB-776
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Chant Search Manuscript view - change URL path to match OldCantus I understand we're trying to keep URLs the same between OldCantus and NewCantus, but there's a difference in the Chant Search Manuscript view. OldCantus uses `/searchms/` (e.g. https://cantus.uwaterloo.ca/searchms/123610?t=est), whereas NewCantus uses `/chant-search-ms/` (e.g. http://206.12.93.196/chant-search-ms/123610?t=est). This doesn't strike me as a particularly vital difference - I doubt many people will have bookmarked and or cited a Search Manuscript page. But this would be a fairly simple fix, so we may as well make NewCantus work the same as OldCantus in this case. Bigger picture question: how important is it that all URL paths match between OldCantus and New? @annamorphism, do you have a sense of this? </issue> <code> [start of django/cantusdb_project/main_app/urls.py] 1 from django.urls import include, path, reverse 2 from django.contrib.auth.views import ( 3 PasswordResetView, 4 PasswordResetDoneView, 5 PasswordResetConfirmView, 6 PasswordResetCompleteView, 7 ) 8 from main_app.views import views 9 import debug_toolbar 10 from main_app.views.century import ( 11 CenturyDetailView, 12 ) 13 from main_app.views.chant import ( 14 ChantByCantusIDView, 15 ChantCreateView, 16 ChantDeleteView, 17 ChantDetailView, 18 ChantEditSyllabificationView, 19 ChantIndexView, 20 ChantListView, 21 ChantProofreadView, 22 ChantSearchView, 23 ChantSearchMSView, 24 CISearchView, 25 MelodySearchView, 26 SourceEditChantsView, 27 ) 28 from main_app.views.feast import ( 29 FeastDetailView, 30 FeastListView, 31 ) 32 from main_app.views.genre import ( 33 GenreDetailView, 34 GenreListView, 35 ) 36 from main_app.views.notation import ( 37 NotationDetailView, 38 ) 39 from main_app.views.office import ( 40 OfficeListView, 41 OfficeDetailView, 42 ) 43 from main_app.views.provenance import ( 44 ProvenanceDetailView, 45 ) 46 from main_app.views.sequence import ( 47 SequenceDetailView, 48 SequenceEditView, 49 SequenceListView, 50 ) 51 from main_app.views.source import ( 52 SourceCreateView, 53 SourceDetailView, 54 SourceEditView, 55 SourceListView, 56 ) 57 from main_app.views.user import ( 58 CustomLoginView, 59 CustomLogoutView, 60 IndexerListView, 61 UserDetailView, 62 UserListView, 63 UserSourceListView, 64 ) 65 66 urlpatterns = [ 67 path("__debug__/", include(debug_toolbar.urls)), 68 path( 69 "contact/", 70 views.contact, 71 name="contact", 72 ), 73 # login/logout/user 74 path( 75 "login/", 76 CustomLoginView.as_view(redirect_authenticated_user=True), 77 name="login", 78 ), 79 path( 80 "logout/", 81 CustomLogoutView.as_view(), 82 name="logout", 83 ), 84 path( 85 "my-sources/", 86 UserSourceListView.as_view(), 87 name="my-sources", 88 ), 89 path( 90 "user/<int:pk>", 91 UserDetailView.as_view(), 92 name="user-detail", 93 ), 94 path( 95 "users/", 96 UserListView.as_view(), 97 name="user-list", 98 ), 99 path( 100 "change-password/", 101 views.change_password, 102 name="change-password", 103 ), 104 # password reset views 105 path( 106 # here, user can initiate a request to send a password reset email 107 "reset-password/", 108 PasswordResetView.as_view( 109 template_name="registration/reset_password.html", 110 email_template_name="registration/reset_password_email.html", 111 success_url="/reset-password-sent/", 112 ), 113 name="reset_password", 114 ), 115 path( 116 # we display this page once the password reset email has been sent 117 "reset-password-sent/", 118 PasswordResetDoneView.as_view( 119 template_name="registration/reset_password_sent.html", 120 ), 121 name="reset_password_done", 122 ), 123 path( 124 # here, the user can specify their new password 125 "reset/<uidb64>/<token>", 126 PasswordResetConfirmView.as_view( 127 template_name="registration/reset_password_confirm.html", 128 success_url="/reset-password-complete/", 129 ), 130 name="reset_password_confirm", 131 ), 132 path( 133 # we display this page once a user has completed a password reset 134 # depending on whether their attempt was successful, this page either shows 135 # a success message or a non-success message. 136 "reset-password-complete/", 137 PasswordResetCompleteView.as_view( 138 template_name="registration/reset_password_complete.html" 139 ), 140 name="reset_password_complete", 141 ), 142 # century 143 path("century/<int:pk>", CenturyDetailView.as_view(), name="century-detail"), 144 # chant 145 path( 146 "chants/", 147 ChantListView.as_view(), 148 name="chant-list", 149 ), # /chants/?source={source id} 150 path( 151 "chant/<int:pk>", 152 ChantDetailView.as_view(), 153 name="chant-detail", 154 ), 155 path( 156 "chant-search/", 157 ChantSearchView.as_view(), 158 name="chant-search", 159 ), 160 path( 161 "chant-create/<int:source_pk>", 162 ChantCreateView.as_view(), 163 name="chant-create", 164 ), 165 path( 166 "id/<str:cantus_id>", 167 ChantByCantusIDView.as_view(), 168 name="chant-by-cantus-id", 169 ), 170 path( 171 "chant-delete/<int:pk>", 172 ChantDeleteView.as_view(), 173 name="chant-delete", 174 ), 175 path( 176 "edit-chants/<int:source_id>", 177 SourceEditChantsView.as_view(), 178 name="source-edit-chants", 179 ), 180 path( 181 "proofread-chant/<int:source_id>", 182 ChantProofreadView.as_view(), 183 name="chant-proofread", 184 ), 185 path( 186 "edit-syllabification/<int:chant_id>", 187 ChantEditSyllabificationView.as_view(), 188 name="source-edit-syllabification", 189 ), 190 path( 191 "index/", 192 ChantIndexView.as_view(), 193 name="chant-index", 194 ), # /index/?source={source id} 195 # feast 196 path( 197 "feasts/", 198 FeastListView.as_view(), 199 name="feast-list", 200 ), 201 path( 202 "feast/<int:pk>", 203 FeastDetailView.as_view(), 204 name="feast-detail", 205 ), 206 # genre 207 path( 208 "genres/", 209 GenreListView.as_view(), 210 name="genre-list", 211 ), 212 path( 213 "genre/<int:pk>", 214 GenreDetailView.as_view(), 215 name="genre-detail", 216 ), 217 # indexer 218 path( 219 "indexers/", 220 IndexerListView.as_view(), 221 name="indexer-list", 222 ), 223 # notation 224 path( 225 "notation/<int:pk>", 226 NotationDetailView.as_view(), 227 name="notation-detail", 228 ), 229 # office 230 path( 231 "offices/", 232 OfficeListView.as_view(), 233 name="office-list", 234 ), 235 path( 236 "office/<int:pk>", 237 OfficeDetailView.as_view(), 238 name="office-detail", 239 ), 240 # provenance 241 path( 242 "provenance/<int:pk>", 243 ProvenanceDetailView.as_view(), 244 name="provenance-detail", 245 ), 246 # sequence 247 path( 248 "sequences/", 249 SequenceListView.as_view(), 250 name="sequence-list", 251 ), 252 path( 253 "sequence/<int:pk>", 254 SequenceDetailView.as_view(), 255 name="sequence-detail", 256 ), 257 path( 258 "edit-sequence/<int:sequence_id>", 259 SequenceEditView.as_view(), 260 name="sequence-edit", 261 ), 262 # source 263 path( 264 "sources/", 265 SourceListView.as_view(), 266 name="source-list", 267 ), 268 path( 269 "source/<int:pk>", 270 SourceDetailView.as_view(), 271 name="source-detail", 272 ), 273 path( 274 "source-create/", 275 SourceCreateView.as_view(), 276 name="source-create", 277 ), 278 path( 279 "edit-source/<int:source_id>", 280 SourceEditView.as_view(), 281 name="source-edit", 282 ), 283 # melody 284 path( 285 "melody/", 286 MelodySearchView.as_view(), 287 name="melody-search", 288 ), 289 path( 290 "ajax/melody/<str:cantus_id>", 291 views.ajax_melody_list, 292 name="ajax-melody", 293 ), 294 path( 295 "ajax/melody-search/", 296 views.ajax_melody_search, 297 name="ajax-melody-search", 298 ), 299 # json api 300 path( 301 "json-sources/", 302 views.json_sources_export, 303 name="json-sources-export", 304 ), 305 path( 306 "json-node/<str:id>", 307 views.json_node_export, 308 name="json-node-export", 309 ), 310 path( 311 "json-nextchants/<str:cantus_id>", 312 views.json_nextchants, 313 name="json-nextchants", 314 ), 315 path( 316 "json-melody/<str:cantus_id>", 317 views.json_melody_export, 318 name="json-melody-export", 319 ), 320 # misc search 321 path( 322 "chant-search-ms/<int:source_pk>", 323 ChantSearchMSView.as_view(), 324 name="chant-search-ms", 325 ), 326 path( 327 "ci-search/<str:search_term>", 328 CISearchView.as_view(), 329 name="ci-search", 330 ), 331 path( 332 "ajax/search-bar/<str:search_term>", 333 views.ajax_search_bar, 334 name="ajax-search-bar", 335 ), 336 # misc 337 path( 338 "content-statistics", 339 views.items_count, 340 name="items-count", 341 ), 342 path( 343 "source/<str:source_id>/csv/", 344 views.csv_export, 345 name="csv-export", 346 ), 347 path( 348 "sites/default/files/csv/<str:source_id>.csv", 349 views.csv_export_redirect_from_old_path, 350 name="csv-export-old-path", 351 ), 352 path( 353 "ajax/concordance/<str:cantus_id>", 354 views.ajax_concordance_list, 355 name="ajax-concordance", 356 ), 357 # content overview (for project managers) 358 path( 359 "content-overview/", 360 views.content_overview, 361 name="content-overview", 362 ), 363 # /node/ url redirects 364 path( 365 "node/<int:pk>", 366 views.redirect_node_url, 367 name="redirect-node-url", 368 ), 369 # /indexer/ url redirects 370 path( 371 "indexer/<int:pk>", 372 views.redirect_indexer, 373 name="redirect-indexer", 374 ), 375 ] 376 377 handler404 = "main_app.views.views.handle404" 378 [end of django/cantusdb_project/main_app/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django/cantusdb_project/main_app/urls.py b/django/cantusdb_project/main_app/urls.py --- a/django/cantusdb_project/main_app/urls.py +++ b/django/cantusdb_project/main_app/urls.py @@ -319,7 +319,7 @@ ), # misc search path( - "chant-search-ms/<int:source_pk>", + "searchms/<int:source_pk>", ChantSearchMSView.as_view(), name="chant-search-ms", ),
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/urls.py b/django/cantusdb_project/main_app/urls.py\n--- a/django/cantusdb_project/main_app/urls.py\n+++ b/django/cantusdb_project/main_app/urls.py\n@@ -319,7 +319,7 @@\n ),\n # misc search\n path(\n- \"chant-search-ms/<int:source_pk>\",\n+ \"searchms/<int:source_pk>\",\n ChantSearchMSView.as_view(),\n name=\"chant-search-ms\",\n ),\n", "issue": "Chant Search Manuscript view - change URL path to match OldCantus\nI understand we're trying to keep URLs the same between OldCantus and NewCantus, but there's a difference in the Chant Search Manuscript view. OldCantus uses `/searchms/` (e.g. https://cantus.uwaterloo.ca/searchms/123610?t=est), whereas NewCantus uses `/chant-search-ms/` (e.g. http://206.12.93.196/chant-search-ms/123610?t=est).\r\n\r\nThis doesn't strike me as a particularly vital difference - I doubt many people will have bookmarked and or cited a Search Manuscript page. But this would be a fairly simple fix, so we may as well make NewCantus work the same as OldCantus in this case.\r\n\r\nBigger picture question: how important is it that all URL paths match between OldCantus and New? @annamorphism, do you have a sense of this?\n", "before_files": [{"content": "from django.urls import include, path, reverse\nfrom django.contrib.auth.views import (\n PasswordResetView,\n PasswordResetDoneView,\n PasswordResetConfirmView,\n PasswordResetCompleteView,\n)\nfrom main_app.views import views\nimport debug_toolbar\nfrom main_app.views.century import (\n CenturyDetailView,\n)\nfrom main_app.views.chant import (\n ChantByCantusIDView,\n ChantCreateView,\n ChantDeleteView,\n ChantDetailView,\n ChantEditSyllabificationView,\n ChantIndexView,\n ChantListView,\n ChantProofreadView,\n ChantSearchView,\n ChantSearchMSView,\n CISearchView,\n MelodySearchView,\n SourceEditChantsView,\n)\nfrom main_app.views.feast import (\n FeastDetailView,\n FeastListView,\n)\nfrom main_app.views.genre import (\n GenreDetailView,\n GenreListView,\n)\nfrom main_app.views.notation import (\n NotationDetailView,\n)\nfrom main_app.views.office import (\n OfficeListView,\n OfficeDetailView,\n)\nfrom main_app.views.provenance import (\n ProvenanceDetailView,\n)\nfrom main_app.views.sequence import (\n SequenceDetailView,\n SequenceEditView,\n SequenceListView,\n)\nfrom main_app.views.source import (\n SourceCreateView,\n SourceDetailView,\n SourceEditView,\n SourceListView,\n)\nfrom main_app.views.user import (\n CustomLoginView,\n CustomLogoutView,\n IndexerListView,\n UserDetailView,\n UserListView,\n UserSourceListView,\n)\n\nurlpatterns = [\n path(\"__debug__/\", include(debug_toolbar.urls)),\n path(\n \"contact/\",\n views.contact,\n name=\"contact\",\n ),\n # login/logout/user\n path(\n \"login/\",\n CustomLoginView.as_view(redirect_authenticated_user=True),\n name=\"login\",\n ),\n path(\n \"logout/\",\n CustomLogoutView.as_view(),\n name=\"logout\",\n ),\n path(\n \"my-sources/\",\n UserSourceListView.as_view(),\n name=\"my-sources\",\n ),\n path(\n \"user/<int:pk>\",\n UserDetailView.as_view(),\n name=\"user-detail\",\n ),\n path(\n \"users/\",\n UserListView.as_view(),\n name=\"user-list\",\n ),\n path(\n \"change-password/\",\n views.change_password,\n name=\"change-password\",\n ),\n # password reset views\n path(\n # here, user can initiate a request to send a password reset email\n \"reset-password/\",\n PasswordResetView.as_view(\n template_name=\"registration/reset_password.html\",\n email_template_name=\"registration/reset_password_email.html\",\n success_url=\"/reset-password-sent/\",\n ),\n name=\"reset_password\",\n ),\n path(\n # we display this page once the password reset email has been sent\n \"reset-password-sent/\",\n PasswordResetDoneView.as_view(\n template_name=\"registration/reset_password_sent.html\",\n ),\n name=\"reset_password_done\",\n ),\n path(\n # here, the user can specify their new password\n \"reset/<uidb64>/<token>\",\n PasswordResetConfirmView.as_view(\n template_name=\"registration/reset_password_confirm.html\",\n success_url=\"/reset-password-complete/\",\n ),\n name=\"reset_password_confirm\",\n ),\n path(\n # we display this page once a user has completed a password reset\n # depending on whether their attempt was successful, this page either shows\n # a success message or a non-success message.\n \"reset-password-complete/\",\n PasswordResetCompleteView.as_view(\n template_name=\"registration/reset_password_complete.html\"\n ),\n name=\"reset_password_complete\",\n ),\n # century\n path(\"century/<int:pk>\", CenturyDetailView.as_view(), name=\"century-detail\"),\n # chant\n path(\n \"chants/\",\n ChantListView.as_view(),\n name=\"chant-list\",\n ), # /chants/?source={source id}\n path(\n \"chant/<int:pk>\",\n ChantDetailView.as_view(),\n name=\"chant-detail\",\n ),\n path(\n \"chant-search/\",\n ChantSearchView.as_view(),\n name=\"chant-search\",\n ),\n path(\n \"chant-create/<int:source_pk>\",\n ChantCreateView.as_view(),\n name=\"chant-create\",\n ),\n path(\n \"id/<str:cantus_id>\",\n ChantByCantusIDView.as_view(),\n name=\"chant-by-cantus-id\",\n ),\n path(\n \"chant-delete/<int:pk>\",\n ChantDeleteView.as_view(),\n name=\"chant-delete\",\n ),\n path(\n \"edit-chants/<int:source_id>\",\n SourceEditChantsView.as_view(),\n name=\"source-edit-chants\",\n ),\n path(\n \"proofread-chant/<int:source_id>\",\n ChantProofreadView.as_view(),\n name=\"chant-proofread\",\n ),\n path(\n \"edit-syllabification/<int:chant_id>\",\n ChantEditSyllabificationView.as_view(),\n name=\"source-edit-syllabification\",\n ),\n path(\n \"index/\",\n ChantIndexView.as_view(),\n name=\"chant-index\",\n ), # /index/?source={source id}\n # feast\n path(\n \"feasts/\",\n FeastListView.as_view(),\n name=\"feast-list\",\n ),\n path(\n \"feast/<int:pk>\",\n FeastDetailView.as_view(),\n name=\"feast-detail\",\n ),\n # genre\n path(\n \"genres/\",\n GenreListView.as_view(),\n name=\"genre-list\",\n ),\n path(\n \"genre/<int:pk>\",\n GenreDetailView.as_view(),\n name=\"genre-detail\",\n ),\n # indexer\n path(\n \"indexers/\",\n IndexerListView.as_view(),\n name=\"indexer-list\",\n ),\n # notation\n path(\n \"notation/<int:pk>\",\n NotationDetailView.as_view(),\n name=\"notation-detail\",\n ),\n # office\n path(\n \"offices/\",\n OfficeListView.as_view(),\n name=\"office-list\",\n ),\n path(\n \"office/<int:pk>\",\n OfficeDetailView.as_view(),\n name=\"office-detail\",\n ),\n # provenance\n path(\n \"provenance/<int:pk>\",\n ProvenanceDetailView.as_view(),\n name=\"provenance-detail\",\n ),\n # sequence\n path(\n \"sequences/\",\n SequenceListView.as_view(),\n name=\"sequence-list\",\n ),\n path(\n \"sequence/<int:pk>\",\n SequenceDetailView.as_view(),\n name=\"sequence-detail\",\n ),\n path(\n \"edit-sequence/<int:sequence_id>\",\n SequenceEditView.as_view(),\n name=\"sequence-edit\",\n ),\n # source\n path(\n \"sources/\",\n SourceListView.as_view(),\n name=\"source-list\",\n ),\n path(\n \"source/<int:pk>\",\n SourceDetailView.as_view(),\n name=\"source-detail\",\n ),\n path(\n \"source-create/\",\n SourceCreateView.as_view(),\n name=\"source-create\",\n ),\n path(\n \"edit-source/<int:source_id>\",\n SourceEditView.as_view(),\n name=\"source-edit\",\n ),\n # melody\n path(\n \"melody/\",\n MelodySearchView.as_view(),\n name=\"melody-search\",\n ),\n path(\n \"ajax/melody/<str:cantus_id>\",\n views.ajax_melody_list,\n name=\"ajax-melody\",\n ),\n path(\n \"ajax/melody-search/\",\n views.ajax_melody_search,\n name=\"ajax-melody-search\",\n ),\n # json api\n path(\n \"json-sources/\",\n views.json_sources_export,\n name=\"json-sources-export\",\n ),\n path(\n \"json-node/<str:id>\",\n views.json_node_export,\n name=\"json-node-export\",\n ),\n path(\n \"json-nextchants/<str:cantus_id>\",\n views.json_nextchants,\n name=\"json-nextchants\",\n ),\n path(\n \"json-melody/<str:cantus_id>\",\n views.json_melody_export,\n name=\"json-melody-export\",\n ),\n # misc search\n path(\n \"chant-search-ms/<int:source_pk>\",\n ChantSearchMSView.as_view(),\n name=\"chant-search-ms\",\n ),\n path(\n \"ci-search/<str:search_term>\",\n CISearchView.as_view(),\n name=\"ci-search\",\n ),\n path(\n \"ajax/search-bar/<str:search_term>\",\n views.ajax_search_bar,\n name=\"ajax-search-bar\",\n ),\n # misc\n path(\n \"content-statistics\",\n views.items_count,\n name=\"items-count\",\n ),\n path(\n \"source/<str:source_id>/csv/\",\n views.csv_export,\n name=\"csv-export\",\n ),\n path(\n \"sites/default/files/csv/<str:source_id>.csv\",\n views.csv_export_redirect_from_old_path,\n name=\"csv-export-old-path\",\n ),\n path(\n \"ajax/concordance/<str:cantus_id>\",\n views.ajax_concordance_list,\n name=\"ajax-concordance\",\n ),\n # content overview (for project managers)\n path(\n \"content-overview/\",\n views.content_overview,\n name=\"content-overview\",\n ),\n # /node/ url redirects\n path(\n \"node/<int:pk>\",\n views.redirect_node_url,\n name=\"redirect-node-url\",\n ),\n # /indexer/ url redirects\n path(\n \"indexer/<int:pk>\",\n views.redirect_indexer,\n name=\"redirect-indexer\",\n ),\n]\n\nhandler404 = \"main_app.views.views.handle404\"\n", "path": "django/cantusdb_project/main_app/urls.py"}]}
3,884
125
gh_patches_debug_22952
rasdani/github-patches
git_diff
pwndbg__pwndbg-648
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> xuntil doesn't work with PIE binaries before being run `xuntil` was added in #604 and works except that it doesn't work with PIE binaries before being run. This is because it expects an int argument. I'll look into using sloppy parsing. </issue> <code> [start of pwndbg/commands/peda.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from __future__ import absolute_import 4 from __future__ import division 5 from __future__ import print_function 6 from __future__ import unicode_literals 7 8 import argparse 9 import sys 10 11 import gdb 12 13 import pwndbg.auxv 14 import pwndbg.color.message as message 15 import pwndbg.commands 16 import pwndbg.commands.context 17 import pwndbg.commands.telescope 18 import pwndbg.proc 19 20 21 @pwndbg.commands.ArgparsedCommand("Gets the current file.") 22 @pwndbg.commands.OnlyWhenRunning 23 def getfile(): 24 print(repr(pwndbg.auxv.get().AT_EXECFN)) 25 26 @pwndbg.commands.ArgparsedCommand("Get the pid.") 27 @pwndbg.commands.OnlyWhenRunning 28 def getpid(): 29 print(pwndbg.proc.pid) 30 31 32 parser = argparse.ArgumentParser(description='Continue execution until an address or function.') 33 parser.add_argument('target', help='Address or function to stop execution at') 34 35 36 @pwndbg.commands.ArgparsedCommand(parser) 37 def xuntil(target): 38 addr = int(target) 39 40 if not pwndbg.memory.peek(addr): 41 print(message.error('Invalid address %#x' % addr)) 42 return 43 44 spec = "*%#x" % (addr) 45 b = gdb.Breakpoint(spec, temporary=True) 46 if pwndbg.proc.alive: 47 gdb.execute("continue", from_tty=False) 48 else: 49 gdb.execute("run", from_tty=False) 50 51 xinfo = pwndbg.commands.context.context 52 xprint = pwndbg.commands.telescope.telescope 53 [end of pwndbg/commands/peda.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwndbg/commands/peda.py b/pwndbg/commands/peda.py --- a/pwndbg/commands/peda.py +++ b/pwndbg/commands/peda.py @@ -30,18 +30,27 @@ parser = argparse.ArgumentParser(description='Continue execution until an address or function.') -parser.add_argument('target', help='Address or function to stop execution at') - +parser.add_argument('target', type=str, help='Address or function to stop execution at') @pwndbg.commands.ArgparsedCommand(parser) def xuntil(target): - addr = int(target) - - if not pwndbg.memory.peek(addr): - print(message.error('Invalid address %#x' % addr)) - return + try: + addr = int(target,0) + + if not pwndbg.memory.peek(addr): + print(message.error('Invalid address %#x' % addr)) + return + + spec = "*%#x" % (addr) + except (TypeError, ValueError): + #The following gdb command will throw an error if the symbol is not defined. + try: + result = gdb.execute('info address %s' % target, to_string=True, from_tty=False) + except gdb.error: + print(message.error("Unable to resolve %s" % target)) + return + spec = target - spec = "*%#x" % (addr) b = gdb.Breakpoint(spec, temporary=True) if pwndbg.proc.alive: gdb.execute("continue", from_tty=False)
{"golden_diff": "diff --git a/pwndbg/commands/peda.py b/pwndbg/commands/peda.py\n--- a/pwndbg/commands/peda.py\n+++ b/pwndbg/commands/peda.py\n@@ -30,18 +30,27 @@\n \n \n parser = argparse.ArgumentParser(description='Continue execution until an address or function.')\n-parser.add_argument('target', help='Address or function to stop execution at')\n-\n+parser.add_argument('target', type=str, help='Address or function to stop execution at')\n \n @pwndbg.commands.ArgparsedCommand(parser)\n def xuntil(target):\n- addr = int(target)\n-\n- if not pwndbg.memory.peek(addr):\n- print(message.error('Invalid address %#x' % addr))\n- return\n+ try:\n+ addr = int(target,0)\n+ \n+ if not pwndbg.memory.peek(addr):\n+ print(message.error('Invalid address %#x' % addr))\n+ return\n+\n+ spec = \"*%#x\" % (addr)\n+ except (TypeError, ValueError):\n+ #The following gdb command will throw an error if the symbol is not defined.\n+ try:\n+ result = gdb.execute('info address %s' % target, to_string=True, from_tty=False)\n+ except gdb.error:\n+ print(message.error(\"Unable to resolve %s\" % target))\n+ return \n+ spec = target\n \n- spec = \"*%#x\" % (addr)\n b = gdb.Breakpoint(spec, temporary=True)\n if pwndbg.proc.alive:\n gdb.execute(\"continue\", from_tty=False)\n", "issue": "xuntil doesn't work with PIE binaries before being run\n`xuntil` was added in #604 and works except that it doesn't work with PIE binaries before being run.\r\nThis is because it expects an int argument. I'll look into using sloppy parsing.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport sys\n\nimport gdb\n\nimport pwndbg.auxv\nimport pwndbg.color.message as message\nimport pwndbg.commands\nimport pwndbg.commands.context\nimport pwndbg.commands.telescope\nimport pwndbg.proc\n\n\[email protected](\"Gets the current file.\")\[email protected]\ndef getfile():\n print(repr(pwndbg.auxv.get().AT_EXECFN))\n\[email protected](\"Get the pid.\")\[email protected]\ndef getpid():\n print(pwndbg.proc.pid)\n\n\nparser = argparse.ArgumentParser(description='Continue execution until an address or function.')\nparser.add_argument('target', help='Address or function to stop execution at')\n\n\[email protected](parser)\ndef xuntil(target):\n addr = int(target)\n\n if not pwndbg.memory.peek(addr):\n print(message.error('Invalid address %#x' % addr))\n return\n\n spec = \"*%#x\" % (addr)\n b = gdb.Breakpoint(spec, temporary=True)\n if pwndbg.proc.alive:\n gdb.execute(\"continue\", from_tty=False)\n else:\n gdb.execute(\"run\", from_tty=False)\n\nxinfo = pwndbg.commands.context.context\nxprint = pwndbg.commands.telescope.telescope\n", "path": "pwndbg/commands/peda.py"}]}
1,040
351
gh_patches_debug_859
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-169
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Change development environment from docker-compose to venv - [ ] Remove the django docker container - [ ] Install package and requirements in venv - [ ] Keep database docker container and manage connection to django </issue> <code> [start of backend/backend/settings.py] 1 """ 2 Django settings for backend project. 3 4 Generated by 'django-admin startproject' using Django 1.11.11. 5 6 For more information on this file, see 7 https://docs.djangoproject.com/en/1.11/topics/settings/ 8 9 For the full list of settings and their values, see 10 https://docs.djangoproject.com/en/1.11/ref/settings/ 11 """ 12 13 import os 14 15 # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 16 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 17 18 19 # Quick-start development settings - unsuitable for production 20 # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ 21 22 # SECURITY WARNING: keep the secret key used in production secret! 23 SECRET_KEY = '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_' 24 25 # SECURITY WARNING: don't run with debug turned on in production! 26 DEBUG = True 27 28 ALLOWED_HOSTS = [ 29 'localhost', 30 '127.0.0.1', 31 '0.0.0.0' 32 ] 33 34 35 # Application definition 36 37 INSTALLED_APPS = [ 38 'cms.apps.CmsConfig', 39 'django.contrib.admin', 40 'django.contrib.auth', 41 'django.contrib.contenttypes', 42 'django.contrib.messages', 43 'django.contrib.sessions', 44 'django.contrib.staticfiles', 45 'widget_tweaks', 46 'easy_thumbnails', 47 'filer', 48 'drf_yasg', 49 'mptt', 50 'rest_framework', 51 'rules.apps.AutodiscoverRulesConfig', 52 ] 53 54 MIDDLEWARE = [ 55 'django.middleware.security.SecurityMiddleware', 56 'django.contrib.sessions.middleware.SessionMiddleware', 57 'django.middleware.locale.LocaleMiddleware', 58 'django.middleware.common.CommonMiddleware', 59 'django.middleware.csrf.CsrfViewMiddleware', 60 'django.contrib.auth.middleware.AuthenticationMiddleware', 61 'django.contrib.messages.middleware.MessageMiddleware', 62 'django.middleware.clickjacking.XFrameOptionsMiddleware', 63 ] 64 65 ROOT_URLCONF = 'backend.urls' 66 THUMBNAIL_HIGH_RESOLUTION = True 67 68 TEMPLATES = [ 69 { 70 'BACKEND': 'django.template.backends.django.DjangoTemplates', 71 'DIRS': [], 72 'APP_DIRS': True, 73 'OPTIONS': { 74 'context_processors': [ 75 'django.template.context_processors.debug', 76 'django.template.context_processors.request', 77 'django.contrib.auth.context_processors.auth', 78 'django.contrib.messages.context_processors.messages', 79 'backend.context_processors.site_slug_processor', 80 ], 81 }, 82 }, 83 ] 84 85 WSGI_APPLICATION = 'backend.wsgi.application' 86 87 88 # Database 89 # https://docs.djangoproject.com/en/1.11/ref/settings/#databases 90 91 DATABASES = { 92 'default': { 93 'ENGINE': 'django.db.backends.postgresql_psycopg2', 94 'NAME': 'integreat', 95 'USER': 'integreat', 96 'PASSWORD': 'password', 97 'HOST': 'postgres', 98 'PORT': '5432', 99 } 100 } 101 102 # Directory for initial database contents 103 104 FIXTURE_DIRS = ( 105 os.path.join(BASE_DIR, 'cms/fixtures/'), 106 ) 107 108 # Authentication backends 109 110 AUTHENTICATION_BACKENDS = ( 111 'rules.permissions.ObjectPermissionBackend', 112 'django.contrib.auth.backends.ModelBackend', # this is default 113 ) 114 115 116 # Password validation 117 # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators 118 119 AUTH_PASSWORD_VALIDATORS = [ 120 { 121 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', 122 }, 123 { 124 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 125 }, 126 { 127 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', 128 }, 129 { 130 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', 131 }, 132 ] 133 134 135 # Internationalization 136 # https://docs.djangoproject.com/en/1.11/topics/i18n/ 137 138 LANGUAGES = ( 139 ('en-us', 'English'), 140 ('de-de', 'Deutsch'), 141 ) 142 143 LOCALE_PATHS = ( 144 os.path.join(BASE_DIR, 'locale'), 145 ) 146 147 LANGUAGE_CODE = 'de-de' 148 149 TIME_ZONE = 'UTC' 150 151 USE_I18N = True 152 153 USE_L10N = True 154 155 USE_TZ = True 156 157 158 # Static files (CSS, JavaScript, Images) 159 # https://docs.djangoproject.com/en/1.11/howto/static-files/ 160 161 STATIC_URL = '/static/' 162 163 164 # Login 165 LOGIN_URL = '/login' 166 LOGIN_REDIRECT_URL = '/' 167 LOGOUT_REDIRECT_URL = '/login' 168 169 # API FRAMEWORK 170 REST_FRAMEWORK = { 171 # Use Django's standard `django.contrib.auth` permissions, 172 # or allow read-only access for unauthenticated users. 173 'DEFAULT_PERMISSION_CLASSES': [ 174 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' 175 ] 176 } 177 178 # Miscellaneous 179 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' 180 CSRF_FAILURE_VIEW = 'cms.views.general.csrf_failure' 181 182 MEDIA_URL = '/media/' 183 MEDIA_ROOT = os.path.join(BASE_DIR, 'media') 184 FILER_CANONICAL_URL = 'media/' 185 186 LOGGING = { 187 'version': 1, 188 'disable_existing_loggers': False, 189 'handlers': { 190 'console': { 191 'class': 'logging.StreamHandler' 192 }, 193 }, 194 'loggers': { 195 'django': { 196 'handlers': ['console'], 197 'level': 'WARN', 198 'propagate': True, 199 }, 200 'api': { 201 'handlers': ['console'], 202 'level': 'INFO', 203 'propagate': True, 204 }, 205 'cms': { 206 'handlers': ['console'], 207 'level': 'INFO', 208 'propagate': True, 209 }, 210 'rules': { 211 'handlers': ['console'], 212 'level': 'DEBUG', 213 'propagate': True, 214 }, 215 } 216 } 217 [end of backend/backend/settings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/backend/backend/settings.py b/backend/backend/settings.py --- a/backend/backend/settings.py +++ b/backend/backend/settings.py @@ -94,7 +94,7 @@ 'NAME': 'integreat', 'USER': 'integreat', 'PASSWORD': 'password', - 'HOST': 'postgres', + 'HOST': 'localhost', 'PORT': '5432', } }
{"golden_diff": "diff --git a/backend/backend/settings.py b/backend/backend/settings.py\n--- a/backend/backend/settings.py\n+++ b/backend/backend/settings.py\n@@ -94,7 +94,7 @@\n 'NAME': 'integreat',\n 'USER': 'integreat',\n 'PASSWORD': 'password',\n- 'HOST': 'postgres',\n+ 'HOST': 'localhost',\n 'PORT': '5432',\n }\n }\n", "issue": "Change development environment from docker-compose to venv\n- [ ] Remove the django docker container\r\n- [ ] Install package and requirements in venv\r\n- [ ] Keep database docker container and manage connection to django\n", "before_files": [{"content": "\"\"\"\nDjango settings for backend project.\n\nGenerated by 'django-admin startproject' using Django 1.11.11.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.11/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.11/ref/settings/\n\"\"\"\n\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = [\n 'localhost',\n '127.0.0.1',\n '0.0.0.0'\n]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'cms.apps.CmsConfig',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.staticfiles',\n 'widget_tweaks',\n 'easy_thumbnails',\n 'filer',\n 'drf_yasg',\n 'mptt',\n 'rest_framework',\n 'rules.apps.AutodiscoverRulesConfig',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'backend.urls'\nTHUMBNAIL_HIGH_RESOLUTION = True\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'backend.context_processors.site_slug_processor',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'backend.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'integreat',\n 'USER': 'integreat',\n 'PASSWORD': 'password',\n 'HOST': 'postgres',\n 'PORT': '5432',\n }\n}\n\n# Directory for initial database contents\n\nFIXTURE_DIRS = (\n os.path.join(BASE_DIR, 'cms/fixtures/'),\n)\n\n# Authentication backends\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend', # this is default\n)\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\n\nLANGUAGES = (\n ('en-us', 'English'),\n ('de-de', 'Deutsch'),\n)\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'locale'),\n)\n\nLANGUAGE_CODE = 'de-de'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n\n# Login\nLOGIN_URL = '/login'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = '/login'\n\n# API FRAMEWORK\nREST_FRAMEWORK = {\n # Use Django's standard `django.contrib.auth` permissions,\n # or allow read-only access for unauthenticated users.\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'\n ]\n}\n\n# Miscellaneous\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nCSRF_FAILURE_VIEW = 'cms.views.general.csrf_failure'\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nFILER_CANONICAL_URL = 'media/'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': 'WARN',\n 'propagate': True,\n },\n 'api': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'cms': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'rules': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n}\n", "path": "backend/backend/settings.py"}]}
2,404
94
gh_patches_debug_17968
rasdani/github-patches
git_diff
Pycord-Development__pycord-621
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cog.get_commands() does not return ext.commands commands ### Summary `Cog.get_commands()` no longer returns anything but slash/interaction based commands, breaking all help commands ### Reproduction Steps Run the example below with a cog that has `ext.commands` based commands. ### Minimal Reproducible Code ```python cog = bot.get_cog("MyCog") commands = cog.get_commands() print(commands) #This will return [] unless MyCog contains slash/interaction based commands ``` ### Expected Results Commands inside cog returned as a list. ### Actual Results Only interaction-based commands are returned ### Intents ``` discord.Intents(guilds = True, members = True, bans = True, emojis = True, messages = True, invites = True, reactions = True) ``` ### System Information ``` - Python v3.10.1-final - py-cord v2.0.0-alpha - py-cord pkg_resources: v2.0.0a4627+gca1a76cb - aiohttp v3.7.4.post0 - system info: Linux 5.15.10-zen1-1-zen #1 ZEN SMP PREEMPT Fri, 17 Dec 2021 11:17:39 +0000 ``` ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. ### Additional Context _No response_ </issue> <code> [start of discord/ext/commands/cog.py] 1 """ 2 The MIT License (MIT) 3 4 Copyright (c) 2015-2021 Rapptz 5 Copyright (c) 2021-present Pycord Development 6 7 Permission is hereby granted, free of charge, to any person obtaining a 8 copy of this software and associated documentation files (the "Software"), 9 to deal in the Software without restriction, including without limitation 10 the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 and/or sell copies of the Software, and to permit persons to whom the 12 Software is furnished to do so, subject to the following conditions: 13 14 The above copyright notice and this permission notice shall be included in 15 all copies or substantial portions of the Software. 16 17 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 DEALINGS IN THE SOFTWARE. 24 """ 25 from __future__ import annotations 26 27 import discord 28 from ...cog import Cog 29 30 from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type 31 32 from ...commands import ApplicationCommand 33 34 if TYPE_CHECKING: 35 from .core import Command 36 37 __all__ = ('Cog',) 38 39 CogT = TypeVar('CogT', bound='Cog') 40 FuncT = TypeVar('FuncT', bound=Callable[..., Any]) 41 42 MISSING: Any = discord.utils.MISSING 43 44 class Cog(Cog): 45 def __new__(cls: Type[CogT], *args: Any, **kwargs: Any) -> CogT: 46 # For issue 426, we need to store a copy of the command objects 47 # since we modify them to inject `self` to them. 48 # To do this, we need to interfere with the Cog creation process. 49 self = super().__new__(cls) 50 51 return self 52 53 def walk_commands(self) -> Generator[Command, None, None]: 54 """An iterator that recursively walks through this cog's commands and subcommands. 55 56 Yields 57 ------ 58 Union[:class:`.Command`, :class:`.Group`] 59 A command or group from the cog. 60 """ 61 from .core import GroupMixin 62 for command in self.__cog_commands__: 63 if isinstance(command, ApplicationCommand): 64 yield command 65 else: 66 if command.parent is None: 67 yield command 68 if isinstance(command, GroupMixin): 69 yield from command.walk_commands() 70 [end of discord/ext/commands/cog.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/discord/ext/commands/cog.py b/discord/ext/commands/cog.py --- a/discord/ext/commands/cog.py +++ b/discord/ext/commands/cog.py @@ -27,7 +27,7 @@ import discord from ...cog import Cog -from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type +from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type, Union from ...commands import ApplicationCommand @@ -67,3 +67,16 @@ yield command if isinstance(command, GroupMixin): yield from command.walk_commands() + + def get_commands(self) -> List[Union[ApplicationCommand, Command]]: + r""" + Returns + -------- + List[Union[:class:`~discord.ApplicationCommand`, :class:`.Command`]] + A :class:`list` of commands that are defined inside this cog. + + .. note:: + + This does not include subcommands. + """ + return [c for c in self.__cog_commands__ if c.parent is None]
{"golden_diff": "diff --git a/discord/ext/commands/cog.py b/discord/ext/commands/cog.py\n--- a/discord/ext/commands/cog.py\n+++ b/discord/ext/commands/cog.py\n@@ -27,7 +27,7 @@\n import discord\n from ...cog import Cog\n \n-from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type\n+from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type, Union\n \n from ...commands import ApplicationCommand\n \n@@ -67,3 +67,16 @@\n yield command\n if isinstance(command, GroupMixin):\n yield from command.walk_commands()\n+\n+ def get_commands(self) -> List[Union[ApplicationCommand, Command]]:\n+ r\"\"\"\n+ Returns\n+ --------\n+ List[Union[:class:`~discord.ApplicationCommand`, :class:`.Command`]]\n+ A :class:`list` of commands that are defined inside this cog.\n+\n+ .. note::\n+\n+ This does not include subcommands.\n+ \"\"\"\n+ return [c for c in self.__cog_commands__ if c.parent is None]\n", "issue": "Cog.get_commands() does not return ext.commands commands\n### Summary\r\n\r\n`Cog.get_commands()` no longer returns anything but slash/interaction based commands, breaking all help commands\r\n\r\n### Reproduction Steps\r\n\r\nRun the example below with a cog that has `ext.commands` based commands.\r\n\r\n### Minimal Reproducible Code\r\n\r\n```python\r\ncog = bot.get_cog(\"MyCog\")\r\ncommands = cog.get_commands()\r\nprint(commands)\r\n#This will return [] unless MyCog contains slash/interaction based commands\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nCommands inside cog returned as a list.\r\n\r\n### Actual Results\r\n\r\nOnly interaction-based commands are returned\r\n\r\n### Intents\r\n\r\n```\r\ndiscord.Intents(guilds = True, members = True, bans = True, emojis = True, messages = True, invites = True, reactions = True)\r\n```\r\n\r\n### System Information\r\n```\r\n- Python v3.10.1-final\r\n- py-cord v2.0.0-alpha\r\n - py-cord pkg_resources: v2.0.0a4627+gca1a76cb\r\n- aiohttp v3.7.4.post0\r\n- system info: Linux 5.15.10-zen1-1-zen #1 ZEN SMP PREEMPT Fri, 17 Dec 2021 11:17:39 +0000\r\n```\r\n### Checklist\r\n\r\n- [X] I have searched the open issues for duplicates.\r\n- [X] I have shown the entire traceback, if possible.\r\n- [X] I have removed my token from display, if visible.\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom __future__ import annotations\n\nimport discord\nfrom ...cog import Cog\n\nfrom typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type\n\nfrom ...commands import ApplicationCommand\n\nif TYPE_CHECKING:\n from .core import Command\n\n__all__ = ('Cog',)\n\nCogT = TypeVar('CogT', bound='Cog')\nFuncT = TypeVar('FuncT', bound=Callable[..., Any])\n\nMISSING: Any = discord.utils.MISSING\n\nclass Cog(Cog):\n def __new__(cls: Type[CogT], *args: Any, **kwargs: Any) -> CogT:\n # For issue 426, we need to store a copy of the command objects\n # since we modify them to inject `self` to them.\n # To do this, we need to interfere with the Cog creation process.\n self = super().__new__(cls)\n\n return self\n\n def walk_commands(self) -> Generator[Command, None, None]:\n \"\"\"An iterator that recursively walks through this cog's commands and subcommands.\n\n Yields\n ------\n Union[:class:`.Command`, :class:`.Group`]\n A command or group from the cog.\n \"\"\"\n from .core import GroupMixin\n for command in self.__cog_commands__:\n if isinstance(command, ApplicationCommand):\n yield command\n else:\n if command.parent is None:\n yield command\n if isinstance(command, GroupMixin):\n yield from command.walk_commands()\n", "path": "discord/ext/commands/cog.py"}]}
1,593
249
gh_patches_debug_13989
rasdani/github-patches
git_diff
Lightning-AI__pytorch-lightning-1192
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> multi-gpu ddp calls validation and testing loops too many times When using ddp with multiple gpus, each validation and test loop is called with the entire validation dataset for each gpu. Expected behavior is that the dataset is divided appropriately across the gpus. I am using current master (cloned Mar 14), Ubuntu 19.10, Cuda 10.1, python 3.7.5, pytorch 1.4, venv environment. The problem appears to be in `auto_add_sampler()` in data_loading.py. It does not create a `DistributedSampler` for validation or test datasets. </issue> <code> [start of pytorch_lightning/trainer/data_loading.py] 1 from abc import ABC, abstractmethod 2 from typing import Union, List, Tuple, Callable 3 4 import torch.distributed as torch_distrib 5 from torch.utils.data import SequentialSampler, DataLoader 6 from torch.utils.data.distributed import DistributedSampler 7 8 from pytorch_lightning.core import LightningModule 9 from pytorch_lightning.utilities.debugging import MisconfigurationException 10 11 try: 12 from apex import amp 13 except ImportError: 14 APEX_AVAILABLE = False 15 else: 16 APEX_AVAILABLE = True 17 18 try: 19 import torch_xla 20 import torch_xla.core.xla_model as xm 21 import torch_xla.distributed.xla_multiprocessing as xmp 22 except ImportError: 23 XLA_AVAILABLE = False 24 else: 25 XLA_AVAILABLE = True 26 27 28 def _has_len(dataloader: DataLoader) -> bool: 29 try: 30 # try getting the length 31 _ = len(dataloader) 32 return True 33 except TypeError: 34 return False 35 36 37 class TrainerDataLoadingMixin(ABC): 38 39 # this is just a summary on variables used in this abstract class, 40 # the proper values/initialisation should be done in child class 41 proc_rank: int 42 use_ddp: bool 43 use_ddp2: bool 44 shown_warnings: ... 45 val_check_interval: float 46 use_tpu: bool 47 tpu_local_core_rank: int 48 train_dataloader: DataLoader 49 num_training_batches: Union[int, float] 50 val_check_batch: ... 51 val_dataloaders: List[DataLoader] 52 num_val_batches: Union[int, float] 53 test_dataloaders: List[DataLoader] 54 num_test_batches: Union[int, float] 55 train_percent_check: float 56 val_percent_check: float 57 test_percent_check: float 58 59 @abstractmethod 60 def is_overriden(self, *args): 61 """Warning: this is just empty shell for code implemented in other class.""" 62 63 def _percent_range_check(self, name: str) -> None: 64 value = getattr(self, name) 65 msg = f'`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}.' 66 if name == 'val_check_interval': 67 msg += ' If you want to disable validation set `val_percent_check` to 0.0 instead.' 68 69 if not 0. <= value <= 1.: 70 raise ValueError(msg) 71 72 def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader: 73 if self.use_ddp or self.use_ddp2 or self.use_tpu: 74 dl_args = { 75 'dataset': dataloader.dataset, 76 'batch_size': dataloader.batch_size, 77 'shuffle': False, 78 'num_workers': dataloader.num_workers, 79 'collate_fn': dataloader.collate_fn, 80 'pin_memory': dataloader.pin_memory, 81 'drop_last': dataloader.drop_last, 82 'timeout': dataloader.timeout, 83 'worker_init_fn': dataloader.worker_init_fn 84 } 85 86 if self.use_tpu: 87 sampler = DistributedSampler( 88 dataloader.dataset, 89 num_replicas=xm.xrt_world_size(), 90 rank=xm.get_ordinal() 91 ) 92 dl_args['shuffle'] = False 93 else: 94 if train: 95 sampler = DistributedSampler(dataloader.dataset) 96 dl_args['shuffle'] = False 97 else: 98 sampler = SequentialSampler(dataloader.dataset) 99 100 dl_args['sampler'] = sampler 101 102 dataloader = DataLoader(**dl_args) 103 return dataloader 104 105 def reset_train_dataloader(self, model: LightningModule) -> None: 106 """Resets the train dataloader and initialises required variables 107 (number of batches, when to validate, etc.). 108 109 Args: 110 model: The current `LightningModule` 111 """ 112 self.train_dataloader = self.request_dataloader(model.train_dataloader) 113 self.num_training_batches = 0 114 115 # automatically add samplers 116 self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True) 117 118 self._percent_range_check('train_percent_check') 119 120 if not _has_len(self.train_dataloader): 121 self.num_training_batches = float('inf') 122 else: 123 # try getting the length 124 self.num_training_batches = len(self.train_dataloader) 125 self.num_training_batches = int(self.num_training_batches * self.train_percent_check) 126 127 # determine when to check validation 128 # if int passed in, val checks that often 129 # otherwise, it checks in [0, 1.0] % range of a training epoch 130 if isinstance(self.val_check_interval, int): 131 self.val_check_batch = self.val_check_interval 132 if self.val_check_batch > self.num_training_batches: 133 raise ValueError( 134 f'`val_check_interval` ({self.val_check_interval}) must be less than or equal ' 135 f'to the number of the training batches ({self.num_training_batches}). ' 136 'If you want to disable validation set `val_percent_check` to 0.0 instead.') 137 else: 138 if not _has_len(self.train_dataloader): 139 raise MisconfigurationException( 140 'When using an infinite DataLoader (e.g. with an IterableDataset or when ' 141 'DataLoader does not implement `__len__`) for `train_dataloader`, ' 142 '`Trainer(val_check_interval)` must be an int. An int k specifies checking ' 143 'validation every k training batches.') 144 145 self._percent_range_check('val_check_interval') 146 147 self.val_check_batch = int(self.num_training_batches * self.val_check_interval) 148 self.val_check_batch = max(1, self.val_check_batch) 149 150 def _reset_eval_dataloader(self, model: LightningModule, 151 mode: str) -> Tuple[int, List[DataLoader]]: 152 """Generic method to reset a dataloader for evaluation. 153 154 Args: 155 model: The current `LightningModule` 156 mode: Either `'val'` or `'test'` 157 158 Returns: 159 Tuple (num_batches, dataloaders) 160 """ 161 dataloaders = self.request_dataloader(getattr(model, f'{mode}_dataloader')) 162 163 if not isinstance(dataloaders, list): 164 dataloaders = [dataloaders] 165 166 # add samplers 167 dataloaders = [self.auto_add_sampler(dl, train=False) for dl in dataloaders if dl] 168 169 num_batches = 0 170 171 # determine number of batches 172 # datasets could be none, 1 or 2+ 173 if len(dataloaders) != 0: 174 for dataloader in dataloaders: 175 if not _has_len(dataloader): 176 num_batches = float('inf') 177 break 178 179 percent_check = getattr(self, f'{mode}_percent_check') 180 181 if num_batches != float('inf'): 182 self._percent_range_check(f'{mode}_percent_check') 183 184 num_batches = sum(len(dataloader) for dataloader in dataloaders) 185 num_batches = int(num_batches * percent_check) 186 elif percent_check not in (0.0, 1.0): 187 raise MisconfigurationException( 188 'When using an infinite DataLoader (e.g. with an IterableDataset or when ' 189 f'DataLoader does not implement `__len__`) for `{mode}_dataloader`, ' 190 f'`Trainer({mode}_percent_check)` must be `0.0` or `1.0`.') 191 return num_batches, dataloaders 192 193 def reset_val_dataloader(self, model: LightningModule) -> None: 194 """Resets the validation dataloader and determines the number of batches. 195 196 Args: 197 model: The current `LightningModule` 198 """ 199 if self.is_overriden('validation_step'): 200 self.num_val_batches, self.val_dataloaders =\ 201 self._reset_eval_dataloader(model, 'val') 202 203 def reset_test_dataloader(self, model) -> None: 204 """Resets the validation dataloader and determines the number of batches. 205 206 Args: 207 model: The current `LightningModule` 208 """ 209 if self.is_overriden('test_step'): 210 self.num_test_batches, self.test_dataloaders =\ 211 self._reset_eval_dataloader(model, 'test') 212 213 def request_dataloader(self, dataloader_fx: Callable) -> DataLoader: 214 """Handles downloading data in the GPU or TPU case. 215 216 Args: 217 dataloader_fx: The bound dataloader getter 218 219 Returns: 220 The dataloader 221 """ 222 dataloader = dataloader_fx() 223 224 # get the function we'll use to get data 225 if self.use_ddp or self.use_ddp2: 226 # all processes wait until data download has happened 227 torch_distrib.barrier() 228 229 # data download/load on TPU 230 elif self.use_tpu and XLA_AVAILABLE: 231 # all processes wait until data download has happened 232 torch_xla.core.xla_model.rendezvous('pl.TrainerDataLoadingMixin.get_dataloaders') 233 234 return dataloader 235 236 def determine_data_use_amount(self, train_percent_check: float, val_percent_check: float, 237 test_percent_check: float, overfit_pct: float) -> None: 238 """Use less data for debugging purposes 239 """ 240 self.train_percent_check = train_percent_check 241 self.val_percent_check = val_percent_check 242 self.test_percent_check = test_percent_check 243 if overfit_pct > 0: 244 if overfit_pct > 1: 245 raise ValueError( 246 f'`overfit_pct` must be not greater than 1.0, but got {overfit_pct:.3f}.') 247 248 self.train_percent_check = overfit_pct 249 self.val_percent_check = overfit_pct 250 self.test_percent_check = overfit_pct 251 [end of pytorch_lightning/trainer/data_loading.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py --- a/pytorch_lightning/trainer/data_loading.py +++ b/pytorch_lightning/trainer/data_loading.py @@ -91,15 +91,12 @@ ) dl_args['shuffle'] = False else: - if train: - sampler = DistributedSampler(dataloader.dataset) - dl_args['shuffle'] = False - else: - sampler = SequentialSampler(dataloader.dataset) + sampler = DistributedSampler(dataloader.dataset) + dl_args['shuffle'] = False dl_args['sampler'] = sampler - dataloader = DataLoader(**dl_args) + return dataloader def reset_train_dataloader(self, model: LightningModule) -> None:
{"golden_diff": "diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py\n--- a/pytorch_lightning/trainer/data_loading.py\n+++ b/pytorch_lightning/trainer/data_loading.py\n@@ -91,15 +91,12 @@\n )\n dl_args['shuffle'] = False\n else:\n- if train:\n- sampler = DistributedSampler(dataloader.dataset)\n- dl_args['shuffle'] = False\n- else:\n- sampler = SequentialSampler(dataloader.dataset)\n+ sampler = DistributedSampler(dataloader.dataset)\n+ dl_args['shuffle'] = False\n \n dl_args['sampler'] = sampler\n-\n dataloader = DataLoader(**dl_args)\n+\n return dataloader\n \n def reset_train_dataloader(self, model: LightningModule) -> None:\n", "issue": "multi-gpu ddp calls validation and testing loops too many times\nWhen using ddp with multiple gpus, each validation and test loop is called with the entire validation dataset for each gpu.\r\n\r\nExpected behavior is that the dataset is divided appropriately across the gpus.\r\n\r\nI am using current master (cloned Mar 14), Ubuntu 19.10, Cuda 10.1, python 3.7.5, pytorch 1.4, venv environment.\r\n\r\nThe problem appears to be in `auto_add_sampler()` in data_loading.py. It does not create a `DistributedSampler` for validation or test datasets.\n", "before_files": [{"content": "from abc import ABC, abstractmethod\nfrom typing import Union, List, Tuple, Callable\n\nimport torch.distributed as torch_distrib\nfrom torch.utils.data import SequentialSampler, DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom pytorch_lightning.core import LightningModule\nfrom pytorch_lightning.utilities.debugging import MisconfigurationException\n\ntry:\n from apex import amp\nexcept ImportError:\n APEX_AVAILABLE = False\nelse:\n APEX_AVAILABLE = True\n\ntry:\n import torch_xla\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.xla_multiprocessing as xmp\nexcept ImportError:\n XLA_AVAILABLE = False\nelse:\n XLA_AVAILABLE = True\n\n\ndef _has_len(dataloader: DataLoader) -> bool:\n try:\n # try getting the length\n _ = len(dataloader)\n return True\n except TypeError:\n return False\n\n\nclass TrainerDataLoadingMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n proc_rank: int\n use_ddp: bool\n use_ddp2: bool\n shown_warnings: ...\n val_check_interval: float\n use_tpu: bool\n tpu_local_core_rank: int\n train_dataloader: DataLoader\n num_training_batches: Union[int, float]\n val_check_batch: ...\n val_dataloaders: List[DataLoader]\n num_val_batches: Union[int, float]\n test_dataloaders: List[DataLoader]\n num_test_batches: Union[int, float]\n train_percent_check: float\n val_percent_check: float\n test_percent_check: float\n\n @abstractmethod\n def is_overriden(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n def _percent_range_check(self, name: str) -> None:\n value = getattr(self, name)\n msg = f'`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}.'\n if name == 'val_check_interval':\n msg += ' If you want to disable validation set `val_percent_check` to 0.0 instead.'\n\n if not 0. <= value <= 1.:\n raise ValueError(msg)\n\n def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader:\n if self.use_ddp or self.use_ddp2 or self.use_tpu:\n dl_args = {\n 'dataset': dataloader.dataset,\n 'batch_size': dataloader.batch_size,\n 'shuffle': False,\n 'num_workers': dataloader.num_workers,\n 'collate_fn': dataloader.collate_fn,\n 'pin_memory': dataloader.pin_memory,\n 'drop_last': dataloader.drop_last,\n 'timeout': dataloader.timeout,\n 'worker_init_fn': dataloader.worker_init_fn\n }\n\n if self.use_tpu:\n sampler = DistributedSampler(\n dataloader.dataset,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal()\n )\n dl_args['shuffle'] = False\n else:\n if train:\n sampler = DistributedSampler(dataloader.dataset)\n dl_args['shuffle'] = False\n else:\n sampler = SequentialSampler(dataloader.dataset)\n\n dl_args['sampler'] = sampler\n\n dataloader = DataLoader(**dl_args)\n return dataloader\n\n def reset_train_dataloader(self, model: LightningModule) -> None:\n \"\"\"Resets the train dataloader and initialises required variables\n (number of batches, when to validate, etc.).\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n self.train_dataloader = self.request_dataloader(model.train_dataloader)\n self.num_training_batches = 0\n\n # automatically add samplers\n self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True)\n\n self._percent_range_check('train_percent_check')\n\n if not _has_len(self.train_dataloader):\n self.num_training_batches = float('inf')\n else:\n # try getting the length\n self.num_training_batches = len(self.train_dataloader)\n self.num_training_batches = int(self.num_training_batches * self.train_percent_check)\n\n # determine when to check validation\n # if int passed in, val checks that often\n # otherwise, it checks in [0, 1.0] % range of a training epoch\n if isinstance(self.val_check_interval, int):\n self.val_check_batch = self.val_check_interval\n if self.val_check_batch > self.num_training_batches:\n raise ValueError(\n f'`val_check_interval` ({self.val_check_interval}) must be less than or equal '\n f'to the number of the training batches ({self.num_training_batches}). '\n 'If you want to disable validation set `val_percent_check` to 0.0 instead.')\n else:\n if not _has_len(self.train_dataloader):\n raise MisconfigurationException(\n 'When using an infinite DataLoader (e.g. with an IterableDataset or when '\n 'DataLoader does not implement `__len__`) for `train_dataloader`, '\n '`Trainer(val_check_interval)` must be an int. An int k specifies checking '\n 'validation every k training batches.')\n\n self._percent_range_check('val_check_interval')\n\n self.val_check_batch = int(self.num_training_batches * self.val_check_interval)\n self.val_check_batch = max(1, self.val_check_batch)\n\n def _reset_eval_dataloader(self, model: LightningModule,\n mode: str) -> Tuple[int, List[DataLoader]]:\n \"\"\"Generic method to reset a dataloader for evaluation.\n\n Args:\n model: The current `LightningModule`\n mode: Either `'val'` or `'test'`\n\n Returns:\n Tuple (num_batches, dataloaders)\n \"\"\"\n dataloaders = self.request_dataloader(getattr(model, f'{mode}_dataloader'))\n\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n # add samplers\n dataloaders = [self.auto_add_sampler(dl, train=False) for dl in dataloaders if dl]\n\n num_batches = 0\n\n # determine number of batches\n # datasets could be none, 1 or 2+\n if len(dataloaders) != 0:\n for dataloader in dataloaders:\n if not _has_len(dataloader):\n num_batches = float('inf')\n break\n\n percent_check = getattr(self, f'{mode}_percent_check')\n\n if num_batches != float('inf'):\n self._percent_range_check(f'{mode}_percent_check')\n\n num_batches = sum(len(dataloader) for dataloader in dataloaders)\n num_batches = int(num_batches * percent_check)\n elif percent_check not in (0.0, 1.0):\n raise MisconfigurationException(\n 'When using an infinite DataLoader (e.g. with an IterableDataset or when '\n f'DataLoader does not implement `__len__`) for `{mode}_dataloader`, '\n f'`Trainer({mode}_percent_check)` must be `0.0` or `1.0`.')\n return num_batches, dataloaders\n\n def reset_val_dataloader(self, model: LightningModule) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n if self.is_overriden('validation_step'):\n self.num_val_batches, self.val_dataloaders =\\\n self._reset_eval_dataloader(model, 'val')\n\n def reset_test_dataloader(self, model) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n if self.is_overriden('test_step'):\n self.num_test_batches, self.test_dataloaders =\\\n self._reset_eval_dataloader(model, 'test')\n\n def request_dataloader(self, dataloader_fx: Callable) -> DataLoader:\n \"\"\"Handles downloading data in the GPU or TPU case.\n\n Args:\n dataloader_fx: The bound dataloader getter\n\n Returns:\n The dataloader\n \"\"\"\n dataloader = dataloader_fx()\n\n # get the function we'll use to get data\n if self.use_ddp or self.use_ddp2:\n # all processes wait until data download has happened\n torch_distrib.barrier()\n\n # data download/load on TPU\n elif self.use_tpu and XLA_AVAILABLE:\n # all processes wait until data download has happened\n torch_xla.core.xla_model.rendezvous('pl.TrainerDataLoadingMixin.get_dataloaders')\n\n return dataloader\n\n def determine_data_use_amount(self, train_percent_check: float, val_percent_check: float,\n test_percent_check: float, overfit_pct: float) -> None:\n \"\"\"Use less data for debugging purposes\n \"\"\"\n self.train_percent_check = train_percent_check\n self.val_percent_check = val_percent_check\n self.test_percent_check = test_percent_check\n if overfit_pct > 0:\n if overfit_pct > 1:\n raise ValueError(\n f'`overfit_pct` must be not greater than 1.0, but got {overfit_pct:.3f}.')\n\n self.train_percent_check = overfit_pct\n self.val_percent_check = overfit_pct\n self.test_percent_check = overfit_pct\n", "path": "pytorch_lightning/trainer/data_loading.py"}]}
3,448
179
gh_patches_debug_34006
rasdani/github-patches
git_diff
conan-io__conan-5846
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [question] How to not include some libs in conanbuildinfo.props Conan version 1.19.0 I have a dependency on boost (boost/1.71.0@conan/stable). I'm using the visual_studio generator with VS2017 This adds *all* of the boost libs to conanbuildinfo.props which causes my build to fail. I don't completely understand why this used to be OK with 1.68 and now fails with that too after moving my cache to a different drive (perhaps it was due to installing vs2019 alongside vs2017). But regardless it would be good if I could either cherrypick which libs to include or remove specific ones from the list. For example I don't want to include any of the stacktrace libs which result in duplicate symbols. I can't figure out how to do it. If I specify any of the 'without_xxx' options then it rebuilds the whole of the boost dependency which I would rather avoid - I just want to control which libs it puts in the vs props file! I was trying to see if there was some place I could remove them from deps_cpp_info but it doesn't look like there is a place to put that (i.e. an event just before the generators are run). Perhaps overriding the generator could do the trick but it seems like that would be a bit of a sledgehammer and not all that clean. </issue> <code> [start of conans/client/generators/visualstudio.py] 1 import os 2 import re 3 4 from conans.model import Generator 5 from conans.paths import BUILD_INFO_VISUAL_STUDIO 6 from conans.client.tools.files import VALID_LIB_EXTENSIONS 7 8 9 class VisualStudioGenerator(Generator): 10 11 template = '''<?xml version="1.0" encoding="utf-8"?> 12 <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> 13 <ImportGroup Label="PropertySheets" /> 14 <PropertyGroup Label="UserMacros" /> 15 <PropertyGroup Label="Conan-RootDirs">{item_properties} 16 </PropertyGroup> 17 {properties} 18 <ItemGroup /> 19 </Project>''' 20 21 properties_template = ''' <PropertyGroup Label="ConanVariables"{condition}> 22 <ConanCompilerFlags>{compiler_flags}</ConanCompilerFlags> 23 <ConanPreprocessorDefinitions>{definitions}</ConanPreprocessorDefinitions> 24 <ConanIncludeDirectories>{include_dirs}</ConanIncludeDirectories> 25 <ConanResourceDirectories>{res_dirs}</ConanResourceDirectories> 26 <ConanLibraryDirectories>{lib_dirs}</ConanLibraryDirectories> 27 <ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories> 28 </PropertyGroup> 29 <PropertyGroup{condition}> 30 <LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment> 31 <DebuggerFlavor>WindowsLocalDebugger</DebuggerFlavor> 32 </PropertyGroup> 33 <ItemDefinitionGroup{condition}> 34 <ClCompile> 35 <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> 36 <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions> 37 <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions> 38 </ClCompile> 39 <Link> 40 <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories> 41 <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies> 42 <AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions> 43 </Link> 44 <Lib> 45 <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories> 46 <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies> 47 </Lib> 48 <Midl> 49 <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> 50 </Midl> 51 <ResourceCompile> 52 <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> 53 <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions> 54 <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions> 55 </ResourceCompile> 56 </ItemDefinitionGroup>''' 57 58 item_template = ''' 59 <Conan-{name}-Root>{root_dir}</Conan-{name}-Root>''' 60 61 def _format_items(self): 62 sections = [] 63 for dep_name, cpp_info in self._deps_build_info.dependencies: 64 fields = { 65 'root_dir': cpp_info.rootpath, 66 'name': dep_name.replace(".", "-") 67 } 68 section = self.item_template.format(**fields) 69 sections.append(section) 70 return "".join(sections) 71 72 @property 73 def filename(self): 74 return BUILD_INFO_VISUAL_STUDIO 75 76 def _format_properties(self, build_info, condition): 77 def has_valid_ext(lib): 78 ext = os.path.splitext(lib)[1] 79 return ext in VALID_LIB_EXTENSIONS 80 81 fields = { 82 'condition': condition, 83 'bin_dirs': "".join("%s;" % p for p in build_info.bin_paths), 84 'res_dirs': "".join("%s;" % p for p in build_info.res_paths), 85 'include_dirs': "".join("%s;" % p for p in build_info.include_paths), 86 'lib_dirs': "".join("%s;" % p for p in build_info.lib_paths), 87 'libs': "".join(['%s.lib;' % lib if not has_valid_ext(lib) 88 else '%s;' % lib for lib in build_info.libs]), 89 'definitions': "".join("%s;" % d for d in build_info.defines), 90 'compiler_flags': " ".join(build_info.cxxflags + build_info.cflags), 91 'linker_flags': " ".join(build_info.sharedlinkflags), 92 'exe_flags': " ".join(build_info.exelinkflags) 93 } 94 formatted_template = self.properties_template.format(**fields) 95 return formatted_template 96 97 @property 98 def content(self): 99 per_item_props = self._format_items() 100 101 properties = [self._format_properties(self._deps_build_info, condition='')] 102 for config, cpp_info in self._deps_build_info.configs.items(): 103 condition = " Condition=\"'$(Configuration)' == '%s'\"" % config 104 properties.append(self._format_properties(cpp_info, condition=condition)) 105 106 fields = { 107 'item_properties': per_item_props, 108 'properties': '\n'.join(properties) 109 } 110 formatted_template = self.template.format(**fields) 111 112 userprofile = os.getenv("USERPROFILE") 113 if userprofile: 114 userprofile = userprofile.replace("\\", "\\\\") 115 formatted_template = re.sub(userprofile, "$(USERPROFILE)", formatted_template, 116 flags=re.I) 117 return formatted_template 118 [end of conans/client/generators/visualstudio.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conans/client/generators/visualstudio.py b/conans/client/generators/visualstudio.py --- a/conans/client/generators/visualstudio.py +++ b/conans/client/generators/visualstudio.py @@ -18,13 +18,15 @@ <ItemGroup /> </Project>''' - properties_template = ''' <PropertyGroup Label="ConanVariables"{condition}> + properties_template = '''<PropertyGroup Label="ConanVariables"{condition}> <ConanCompilerFlags>{compiler_flags}</ConanCompilerFlags> + <ConanLinkerFlags>{linker_flags}</ConanLinkerFlags> <ConanPreprocessorDefinitions>{definitions}</ConanPreprocessorDefinitions> <ConanIncludeDirectories>{include_dirs}</ConanIncludeDirectories> <ConanResourceDirectories>{res_dirs}</ConanResourceDirectories> <ConanLibraryDirectories>{lib_dirs}</ConanLibraryDirectories> <ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories> + <ConanLibraries>{libs}</ConanLibraries> </PropertyGroup> <PropertyGroup{condition}> <LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment> @@ -38,13 +40,9 @@ </ClCompile> <Link> <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories> - <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies> - <AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions> + <AdditionalDependencies>$(ConanLibraries)%(AdditionalDependencies)</AdditionalDependencies> + <AdditionalOptions>$(ConanLinkerFlags) %(AdditionalOptions)</AdditionalOptions> </Link> - <Lib> - <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories> - <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies> - </Lib> <Midl> <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> </Midl>
{"golden_diff": "diff --git a/conans/client/generators/visualstudio.py b/conans/client/generators/visualstudio.py\n--- a/conans/client/generators/visualstudio.py\n+++ b/conans/client/generators/visualstudio.py\n@@ -18,13 +18,15 @@\n <ItemGroup />\n </Project>'''\n \n- properties_template = ''' <PropertyGroup Label=\"ConanVariables\"{condition}>\n+ properties_template = '''<PropertyGroup Label=\"ConanVariables\"{condition}>\n <ConanCompilerFlags>{compiler_flags}</ConanCompilerFlags>\n+ <ConanLinkerFlags>{linker_flags}</ConanLinkerFlags>\n <ConanPreprocessorDefinitions>{definitions}</ConanPreprocessorDefinitions>\n <ConanIncludeDirectories>{include_dirs}</ConanIncludeDirectories>\n <ConanResourceDirectories>{res_dirs}</ConanResourceDirectories>\n <ConanLibraryDirectories>{lib_dirs}</ConanLibraryDirectories>\n <ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories>\n+ <ConanLibraries>{libs}</ConanLibraries>\n </PropertyGroup>\n <PropertyGroup{condition}>\n <LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>\n@@ -38,13 +40,9 @@\n </ClCompile>\n <Link>\n <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n- <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>\n- <AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions>\n+ <AdditionalDependencies>$(ConanLibraries)%(AdditionalDependencies)</AdditionalDependencies>\n+ <AdditionalOptions>$(ConanLinkerFlags) %(AdditionalOptions)</AdditionalOptions>\n </Link>\n- <Lib>\n- <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n- <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>\n- </Lib>\n <Midl>\n <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n </Midl>\n", "issue": "[question] How to not include some libs in conanbuildinfo.props\nConan version 1.19.0\r\n\r\nI have a dependency on boost (boost/1.71.0@conan/stable).\r\n\r\nI'm using the visual_studio generator with VS2017\r\n\r\nThis adds *all* of the boost libs to conanbuildinfo.props which causes my build to fail. I don't completely understand why this used to be OK with 1.68 and now fails with that too after moving my cache to a different drive (perhaps it was due to installing vs2019 alongside vs2017). \r\n\r\nBut regardless it would be good if I could either cherrypick which libs to include or remove specific ones from the list.\r\n\r\nFor example I don't want to include any of the stacktrace libs which result in duplicate symbols.\r\n\r\nI can't figure out how to do it. If I specify any of the 'without_xxx' options then it rebuilds the whole of the boost dependency which I would rather avoid - I just want to control which libs it puts in the vs props file!\r\n\r\nI was trying to see if there was some place I could remove them from deps_cpp_info but it doesn't look like there is a place to put that (i.e. an event just before the generators are run).\r\n\r\nPerhaps overriding the generator could do the trick but it seems like that would be a bit of a sledgehammer and not all that clean.\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nimport re\n\nfrom conans.model import Generator\nfrom conans.paths import BUILD_INFO_VISUAL_STUDIO\nfrom conans.client.tools.files import VALID_LIB_EXTENSIONS\n\n\nclass VisualStudioGenerator(Generator):\n\n template = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project ToolsVersion=\"4.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n <ImportGroup Label=\"PropertySheets\" />\n <PropertyGroup Label=\"UserMacros\" />\n <PropertyGroup Label=\"Conan-RootDirs\">{item_properties}\n </PropertyGroup>\n {properties}\n <ItemGroup />\n</Project>'''\n\n properties_template = ''' <PropertyGroup Label=\"ConanVariables\"{condition}>\n <ConanCompilerFlags>{compiler_flags}</ConanCompilerFlags>\n <ConanPreprocessorDefinitions>{definitions}</ConanPreprocessorDefinitions>\n <ConanIncludeDirectories>{include_dirs}</ConanIncludeDirectories>\n <ConanResourceDirectories>{res_dirs}</ConanResourceDirectories>\n <ConanLibraryDirectories>{lib_dirs}</ConanLibraryDirectories>\n <ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories>\n </PropertyGroup>\n <PropertyGroup{condition}>\n <LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>\n <DebuggerFlavor>WindowsLocalDebugger</DebuggerFlavor>\n </PropertyGroup>\n <ItemDefinitionGroup{condition}>\n <ClCompile>\n <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>\n <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>\n </ClCompile>\n <Link>\n <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>\n <AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions>\n </Link>\n <Lib>\n <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>\n </Lib>\n <Midl>\n <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n </Midl>\n <ResourceCompile>\n <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>\n <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>\n </ResourceCompile>\n </ItemDefinitionGroup>'''\n\n item_template = '''\n <Conan-{name}-Root>{root_dir}</Conan-{name}-Root>'''\n\n def _format_items(self):\n sections = []\n for dep_name, cpp_info in self._deps_build_info.dependencies:\n fields = {\n 'root_dir': cpp_info.rootpath,\n 'name': dep_name.replace(\".\", \"-\")\n }\n section = self.item_template.format(**fields)\n sections.append(section)\n return \"\".join(sections)\n\n @property\n def filename(self):\n return BUILD_INFO_VISUAL_STUDIO\n\n def _format_properties(self, build_info, condition):\n def has_valid_ext(lib):\n ext = os.path.splitext(lib)[1]\n return ext in VALID_LIB_EXTENSIONS\n\n fields = {\n 'condition': condition,\n 'bin_dirs': \"\".join(\"%s;\" % p for p in build_info.bin_paths),\n 'res_dirs': \"\".join(\"%s;\" % p for p in build_info.res_paths),\n 'include_dirs': \"\".join(\"%s;\" % p for p in build_info.include_paths),\n 'lib_dirs': \"\".join(\"%s;\" % p for p in build_info.lib_paths),\n 'libs': \"\".join(['%s.lib;' % lib if not has_valid_ext(lib)\n else '%s;' % lib for lib in build_info.libs]),\n 'definitions': \"\".join(\"%s;\" % d for d in build_info.defines),\n 'compiler_flags': \" \".join(build_info.cxxflags + build_info.cflags),\n 'linker_flags': \" \".join(build_info.sharedlinkflags),\n 'exe_flags': \" \".join(build_info.exelinkflags)\n }\n formatted_template = self.properties_template.format(**fields)\n return formatted_template\n\n @property\n def content(self):\n per_item_props = self._format_items()\n\n properties = [self._format_properties(self._deps_build_info, condition='')]\n for config, cpp_info in self._deps_build_info.configs.items():\n condition = \" Condition=\\\"'$(Configuration)' == '%s'\\\"\" % config\n properties.append(self._format_properties(cpp_info, condition=condition))\n\n fields = {\n 'item_properties': per_item_props,\n 'properties': '\\n'.join(properties)\n }\n formatted_template = self.template.format(**fields)\n\n userprofile = os.getenv(\"USERPROFILE\")\n if userprofile:\n userprofile = userprofile.replace(\"\\\\\", \"\\\\\\\\\")\n formatted_template = re.sub(userprofile, \"$(USERPROFILE)\", formatted_template,\n flags=re.I)\n return formatted_template\n", "path": "conans/client/generators/visualstudio.py"}]}
2,245
462
gh_patches_debug_32036
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-851
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve errors logging Right now errors are being caught by GraphQL core and sent into the `errors` fields of the GraphQL response. Unfortunately that makes things harder to debug locally; we should print better exceptions locally, by using one of the following options: ```python def process_result( self, request: HttpRequest, result: ExecutionResult ) -> GraphQLHTTPResponse: data: GraphQLHTTPResponse = {"data": result.data} for error in result.errors: actual_error = error.original_error or error traceback.print_exception( type(actual_error), actual_error, actual_error.__traceback__ ) return data ``` ```python def process_result(self, request, result): data = super().process_result(request, result) # Log errors if result.errors: for error in result.errors: try: if isinstance(error, GraphQLError) and error.original_error: raise error.original_error else: raise error except Exception as error: logger.exception(error) return data ``` </issue> <code> [start of strawberry/schema/schema.py] 1 from typing import Any, Dict, List, Optional, Sequence, Type, Union 2 3 from graphql import ( 4 ExecutionContext as GraphQLExecutionContext, 5 GraphQLSchema, 6 get_introspection_query, 7 parse, 8 validate_schema, 9 ) 10 from graphql.subscription import subscribe 11 from graphql.type.directives import specified_directives 12 13 from strawberry.custom_scalar import ScalarDefinition 14 from strawberry.enum import EnumDefinition 15 from strawberry.extensions import Extension 16 from strawberry.schema.schema_converter import GraphQLCoreConverter 17 from strawberry.types import ExecutionResult 18 from strawberry.types.types import TypeDefinition 19 from strawberry.union import StrawberryUnion 20 21 from ..middleware import DirectivesMiddleware, Middleware 22 from ..printer import print_schema 23 from .execute import execute, execute_sync 24 25 26 class Schema: 27 def __init__( 28 self, 29 # TODO: can we make sure we only allow to pass something that has been decorated? 30 query: Type, 31 mutation: Optional[Type] = None, 32 subscription: Optional[Type] = None, 33 directives=(), 34 types=(), 35 extensions: Sequence[Type[Extension]] = (), 36 execution_context_class: Optional[Type[GraphQLExecutionContext]] = None, 37 ): 38 self.extensions = extensions 39 self.execution_context_class = execution_context_class 40 self.schema_converter = GraphQLCoreConverter() 41 42 query_type = self.schema_converter.from_object_type(query) 43 mutation_type = ( 44 self.schema_converter.from_object_type(mutation) if mutation else None 45 ) 46 subscription_type = ( 47 self.schema_converter.from_object_type(subscription) 48 if subscription 49 else None 50 ) 51 52 self.middleware: List[Middleware] = [DirectivesMiddleware(directives)] 53 54 directives = [ 55 self.schema_converter.from_directive(directive.directive_definition) 56 for directive in directives 57 ] 58 59 self._schema = GraphQLSchema( 60 query=query_type, 61 mutation=mutation_type, 62 subscription=subscription_type if subscription else None, 63 directives=specified_directives + directives, 64 types=list(map(self.schema_converter.from_object_type, types)), 65 ) 66 67 # Validate schema early because we want developers to know about 68 # possible issues as soon as possible 69 errors = validate_schema(self._schema) 70 if errors: 71 formatted_errors = "\n\n".join(f"❌ {error.message}" for error in errors) 72 raise ValueError(f"Invalid Schema. Errors:\n\n{formatted_errors}") 73 74 self.query = self.schema_converter.type_map[query_type.name] 75 76 def get_type_by_name( 77 self, name: str 78 ) -> Optional[ 79 Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion] 80 ]: 81 if name in self.schema_converter.type_map: 82 return self.schema_converter.type_map[name].definition 83 84 return None 85 86 async def execute( 87 self, 88 query: str, 89 variable_values: Optional[Dict[str, Any]] = None, 90 context_value: Optional[Any] = None, 91 root_value: Optional[Any] = None, 92 operation_name: Optional[str] = None, 93 validate_queries: bool = True, 94 ) -> ExecutionResult: 95 result = await execute( 96 self._schema, 97 query, 98 variable_values=variable_values, 99 root_value=root_value, 100 context_value=context_value, 101 operation_name=operation_name, 102 additional_middlewares=self.middleware, 103 extensions=self.extensions, 104 execution_context_class=self.execution_context_class, 105 validate_queries=validate_queries, 106 ) 107 108 return ExecutionResult( 109 data=result.data, 110 errors=result.errors, 111 extensions=result.extensions, 112 ) 113 114 def execute_sync( 115 self, 116 query: str, 117 variable_values: Optional[Dict[str, Any]] = None, 118 context_value: Optional[Any] = None, 119 root_value: Optional[Any] = None, 120 operation_name: Optional[str] = None, 121 validate_queries: bool = True, 122 ) -> ExecutionResult: 123 result = execute_sync( 124 self._schema, 125 query, 126 variable_values=variable_values, 127 root_value=root_value, 128 context_value=context_value, 129 operation_name=operation_name, 130 additional_middlewares=self.middleware, 131 extensions=self.extensions, 132 execution_context_class=self.execution_context_class, 133 validate_queries=validate_queries, 134 ) 135 136 return ExecutionResult( 137 data=result.data, 138 errors=result.errors, 139 extensions=result.extensions, 140 ) 141 142 async def subscribe( 143 self, 144 query: str, 145 variable_values: Optional[Dict[str, Any]] = None, 146 context_value: Optional[Any] = None, 147 root_value: Optional[Any] = None, 148 operation_name: Optional[str] = None, 149 ): 150 return await subscribe( 151 self._schema, 152 parse(query), 153 root_value=root_value, 154 context_value=context_value, 155 variable_values=variable_values, 156 operation_name=operation_name, 157 ) 158 159 def as_str(self) -> str: 160 return print_schema(self) 161 162 __str__ = as_str 163 164 def introspect(self) -> Dict[str, Any]: 165 """Return the introspection query result for the current schema 166 167 Raises: 168 ValueError: If the introspection query fails due to an invalid schema 169 """ 170 introspection = self.execute_sync(get_introspection_query()) 171 if introspection.errors or not introspection.data: 172 raise ValueError(f"Invalid Schema. Errors {introspection.errors!r}") 173 174 return introspection.data 175 [end of strawberry/schema/schema.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/strawberry/schema/schema.py b/strawberry/schema/schema.py --- a/strawberry/schema/schema.py +++ b/strawberry/schema/schema.py @@ -1,3 +1,4 @@ +import logging from typing import Any, Dict, List, Optional, Sequence, Type, Union from graphql import ( @@ -7,6 +8,7 @@ parse, validate_schema, ) +from graphql.error import GraphQLError from graphql.subscription import subscribe from graphql.type.directives import specified_directives @@ -23,6 +25,9 @@ from .execute import execute, execute_sync +logger = logging.getLogger("strawberry.execution") + + class Schema: def __init__( self, @@ -83,6 +88,11 @@ return None + def process_errors(self, errors: List[GraphQLError]) -> None: + for error in errors: + actual_error = error.original_error or error + logger.error(actual_error, exc_info=actual_error) + async def execute( self, query: str, @@ -105,6 +115,9 @@ validate_queries=validate_queries, ) + if result.errors: + self.process_errors(result.errors) + return ExecutionResult( data=result.data, errors=result.errors, @@ -133,6 +146,9 @@ validate_queries=validate_queries, ) + if result.errors: + self.process_errors(result.errors) + return ExecutionResult( data=result.data, errors=result.errors,
{"golden_diff": "diff --git a/strawberry/schema/schema.py b/strawberry/schema/schema.py\n--- a/strawberry/schema/schema.py\n+++ b/strawberry/schema/schema.py\n@@ -1,3 +1,4 @@\n+import logging\n from typing import Any, Dict, List, Optional, Sequence, Type, Union\n \n from graphql import (\n@@ -7,6 +8,7 @@\n parse,\n validate_schema,\n )\n+from graphql.error import GraphQLError\n from graphql.subscription import subscribe\n from graphql.type.directives import specified_directives\n \n@@ -23,6 +25,9 @@\n from .execute import execute, execute_sync\n \n \n+logger = logging.getLogger(\"strawberry.execution\")\n+\n+\n class Schema:\n def __init__(\n self,\n@@ -83,6 +88,11 @@\n \n return None\n \n+ def process_errors(self, errors: List[GraphQLError]) -> None:\n+ for error in errors:\n+ actual_error = error.original_error or error\n+ logger.error(actual_error, exc_info=actual_error)\n+\n async def execute(\n self,\n query: str,\n@@ -105,6 +115,9 @@\n validate_queries=validate_queries,\n )\n \n+ if result.errors:\n+ self.process_errors(result.errors)\n+\n return ExecutionResult(\n data=result.data,\n errors=result.errors,\n@@ -133,6 +146,9 @@\n validate_queries=validate_queries,\n )\n \n+ if result.errors:\n+ self.process_errors(result.errors)\n+\n return ExecutionResult(\n data=result.data,\n errors=result.errors,\n", "issue": "Improve errors logging\nRight now errors are being caught by GraphQL core and sent into the `errors` fields of the GraphQL response. Unfortunately that makes things harder to debug locally; we should print better exceptions locally, by using one of the following options:\r\n\r\n```python\r\n def process_result(\r\n self, request: HttpRequest, result: ExecutionResult\r\n ) -> GraphQLHTTPResponse:\r\n data: GraphQLHTTPResponse = {\"data\": result.data}\r\n\r\n for error in result.errors:\r\n actual_error = error.original_error or error\r\n\r\n traceback.print_exception(\r\n type(actual_error), actual_error, actual_error.__traceback__\r\n )\r\n\r\n return data\r\n```\r\n\r\n```python\r\n def process_result(self, request, result):\r\n data = super().process_result(request, result)\r\n\r\n # Log errors\r\n if result.errors:\r\n for error in result.errors:\r\n try:\r\n if isinstance(error, GraphQLError) and error.original_error:\r\n raise error.original_error\r\n else:\r\n raise error\r\n except Exception as error:\r\n logger.exception(error)\r\n\r\n return data\r\n```\n", "before_files": [{"content": "from typing import Any, Dict, List, Optional, Sequence, Type, Union\n\nfrom graphql import (\n ExecutionContext as GraphQLExecutionContext,\n GraphQLSchema,\n get_introspection_query,\n parse,\n validate_schema,\n)\nfrom graphql.subscription import subscribe\nfrom graphql.type.directives import specified_directives\n\nfrom strawberry.custom_scalar import ScalarDefinition\nfrom strawberry.enum import EnumDefinition\nfrom strawberry.extensions import Extension\nfrom strawberry.schema.schema_converter import GraphQLCoreConverter\nfrom strawberry.types import ExecutionResult\nfrom strawberry.types.types import TypeDefinition\nfrom strawberry.union import StrawberryUnion\n\nfrom ..middleware import DirectivesMiddleware, Middleware\nfrom ..printer import print_schema\nfrom .execute import execute, execute_sync\n\n\nclass Schema:\n def __init__(\n self,\n # TODO: can we make sure we only allow to pass something that has been decorated?\n query: Type,\n mutation: Optional[Type] = None,\n subscription: Optional[Type] = None,\n directives=(),\n types=(),\n extensions: Sequence[Type[Extension]] = (),\n execution_context_class: Optional[Type[GraphQLExecutionContext]] = None,\n ):\n self.extensions = extensions\n self.execution_context_class = execution_context_class\n self.schema_converter = GraphQLCoreConverter()\n\n query_type = self.schema_converter.from_object_type(query)\n mutation_type = (\n self.schema_converter.from_object_type(mutation) if mutation else None\n )\n subscription_type = (\n self.schema_converter.from_object_type(subscription)\n if subscription\n else None\n )\n\n self.middleware: List[Middleware] = [DirectivesMiddleware(directives)]\n\n directives = [\n self.schema_converter.from_directive(directive.directive_definition)\n for directive in directives\n ]\n\n self._schema = GraphQLSchema(\n query=query_type,\n mutation=mutation_type,\n subscription=subscription_type if subscription else None,\n directives=specified_directives + directives,\n types=list(map(self.schema_converter.from_object_type, types)),\n )\n\n # Validate schema early because we want developers to know about\n # possible issues as soon as possible\n errors = validate_schema(self._schema)\n if errors:\n formatted_errors = \"\\n\\n\".join(f\"\u274c {error.message}\" for error in errors)\n raise ValueError(f\"Invalid Schema. Errors:\\n\\n{formatted_errors}\")\n\n self.query = self.schema_converter.type_map[query_type.name]\n\n def get_type_by_name(\n self, name: str\n ) -> Optional[\n Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]\n ]:\n if name in self.schema_converter.type_map:\n return self.schema_converter.type_map[name].definition\n\n return None\n\n async def execute(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n validate_queries: bool = True,\n ) -> ExecutionResult:\n result = await execute(\n self._schema,\n query,\n variable_values=variable_values,\n root_value=root_value,\n context_value=context_value,\n operation_name=operation_name,\n additional_middlewares=self.middleware,\n extensions=self.extensions,\n execution_context_class=self.execution_context_class,\n validate_queries=validate_queries,\n )\n\n return ExecutionResult(\n data=result.data,\n errors=result.errors,\n extensions=result.extensions,\n )\n\n def execute_sync(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n validate_queries: bool = True,\n ) -> ExecutionResult:\n result = execute_sync(\n self._schema,\n query,\n variable_values=variable_values,\n root_value=root_value,\n context_value=context_value,\n operation_name=operation_name,\n additional_middlewares=self.middleware,\n extensions=self.extensions,\n execution_context_class=self.execution_context_class,\n validate_queries=validate_queries,\n )\n\n return ExecutionResult(\n data=result.data,\n errors=result.errors,\n extensions=result.extensions,\n )\n\n async def subscribe(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n ):\n return await subscribe(\n self._schema,\n parse(query),\n root_value=root_value,\n context_value=context_value,\n variable_values=variable_values,\n operation_name=operation_name,\n )\n\n def as_str(self) -> str:\n return print_schema(self)\n\n __str__ = as_str\n\n def introspect(self) -> Dict[str, Any]:\n \"\"\"Return the introspection query result for the current schema\n\n Raises:\n ValueError: If the introspection query fails due to an invalid schema\n \"\"\"\n introspection = self.execute_sync(get_introspection_query())\n if introspection.errors or not introspection.data:\n raise ValueError(f\"Invalid Schema. Errors {introspection.errors!r}\")\n\n return introspection.data\n", "path": "strawberry/schema/schema.py"}]}
2,296
353
gh_patches_debug_7799
rasdani/github-patches
git_diff
e2nIEE__pandapower-1329
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 'plot_gens' unexpected in simple_plot() It seems that `plot_gens` is an argument in `simple_plot()`, but is not documented. When attempting to set `plot_gens` when calling `simple_plot()`, I get the following error: > TypeError: simple_plot() got an unexpected keyword argument 'plot_gens' Version: 2.7.0 </issue> <code> [start of pandapower/plotting/simple_plot.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics 4 # and Energy System Technology (IEE), Kassel. All rights reserved. 5 6 7 import matplotlib.pyplot as plt 8 9 from pandapower.plotting.plotting_toolbox import get_collection_sizes 10 from pandapower.plotting.collections import create_bus_collection, create_line_collection, \ 11 create_trafo_collection, create_trafo3w_collection, \ 12 create_line_switch_collection, draw_collections, create_bus_bus_switch_collection, create_sgen_collection, \ 13 create_gen_collection, create_load_collection 14 from pandapower.plotting.generic_geodata import create_generic_coordinates 15 16 try: 17 import pplog as logging 18 except ImportError: 19 import logging 20 21 logger = logging.getLogger(__name__) 22 23 24 def simple_plot(net, respect_switches=False, line_width=1.0, bus_size=1.0, ext_grid_size=1.0, 25 trafo_size=1.0, plot_loads=False, plot_gens=False, plot_sgens=False, load_size=1.0, gen_size=1.0, sgen_size=1.0, 26 switch_size=2.0, switch_distance=1.0, plot_line_switches=False, scale_size=True, 27 bus_color="b", line_color='grey', trafo_color='k', ext_grid_color='y', 28 switch_color='k', library="igraph", show_plot=True, ax=None): 29 """ 30 Plots a pandapower network as simple as possible. If no geodata is available, artificial 31 geodata is generated. For advanced plotting see the tutorial 32 33 INPUT: 34 **net** - The pandapower format network. 35 36 OPTIONAL: 37 **respect_switches** (bool, False) - Respect switches if artificial geodata is created. 38 This Flag is ignored if plot_line_switches is True 39 40 **line_width** (float, 1.0) - width of lines 41 42 **bus_size** (float, 1.0) - Relative size of buses to plot. 43 The value bus_size is multiplied with mean_distance_between_buses, which equals the 44 distance between 45 the max geoocord and the min divided by 200. 46 mean_distance_between_buses = sum((net['bus_geodata'].max() - net['bus_geodata'].min()) / 200) 47 48 **ext_grid_size** (float, 1.0) - Relative size of ext_grids to plot. See bus sizes for details. 49 Note: ext_grids are plottet as rectangles 50 51 **trafo_size** (float, 1.0) - Relative size of trafos to plot. 52 53 **plot_loads** (bool, False) - Flag to decide whether load symbols should be drawn. 54 55 **plot_sgens** (bool, False) - Flag to decide whether sgen symbols should be drawn. 56 57 **load_size** (float, 1.0) - Relative size of loads to plot. 58 59 **sgen_size** (float, 1.0) - Relative size of sgens to plot. 60 61 **switch_size** (float, 2.0) - Relative size of switches to plot. See bus size for details 62 63 **switch_distance** (float, 1.0) - Relative distance of the switch to its corresponding \ 64 bus. See bus size for details 65 66 **plot_line_switches** (bool, False) - Flag if line switches are plotted 67 68 **scale_size** (bool, True) - Flag if bus_size, ext_grid_size, bus_size- and distance \ 69 will be scaled with respect to grid mean distances 70 71 **bus_color** (String, colors[0]) - Bus Color. Init as first value of color palette. Usually colors[0] = "b". 72 73 **line_color** (String, 'grey') - Line Color. Init is grey 74 75 **trafo_color** (String, 'k') - Trafo Color. Init is black 76 77 **ext_grid_color** (String, 'y') - External Grid Color. Init is yellow 78 79 **switch_color** (String, 'k') - Switch Color. Init is black 80 81 **library** (String, "igraph") - library name to create generic coordinates (case of 82 missing geodata). "igraph" to use igraph package or "networkx" to use networkx package. 83 84 **show_plot** (bool, True) - Shows plot at the end of plotting 85 86 **ax** (object, None) - matplotlib axis to plot to 87 88 OUTPUT: 89 **ax** - axes of figure 90 """ 91 # don't hide lines if switches are plotted 92 if plot_line_switches: 93 respect_switches = False 94 95 # create geocoord if none are available 96 if len(net.line_geodata) == 0 and len(net.bus_geodata) == 0: 97 logger.warning("No or insufficient geodata available --> Creating artificial coordinates." + 98 " This may take some time") 99 create_generic_coordinates(net, respect_switches=respect_switches, library=library) 100 101 if scale_size: 102 # if scale_size -> calc size from distance between min and max geocoord 103 sizes = get_collection_sizes(net, bus_size, ext_grid_size, trafo_size, 104 load_size, sgen_size, switch_size, switch_distance) 105 bus_size = sizes["bus"] 106 ext_grid_size = sizes["ext_grid"] 107 trafo_size = sizes["trafo"] 108 sgen_size = sizes["sgen"] 109 load_size = sizes["load"] 110 switch_size = sizes["switch"] 111 switch_distance = sizes["switch_distance"] 112 113 # create bus collections to plot 114 bc = create_bus_collection(net, net.bus.index, size=bus_size, color=bus_color, zorder=10) 115 116 # if bus geodata is available, but no line geodata 117 use_bus_geodata = len(net.line_geodata) == 0 118 in_service_lines = net.line[net.line.in_service].index 119 nogolines = set(net.switch.element[(net.switch.et == "l") & (net.switch.closed == 0)]) \ 120 if respect_switches else set() 121 plot_lines = in_service_lines.difference(nogolines) 122 123 # create line collections 124 lc = create_line_collection(net, plot_lines, color=line_color, linewidths=line_width, 125 use_bus_geodata=use_bus_geodata) 126 collections = [bc, lc] 127 128 # create ext_grid collections 129 eg_buses_with_geo_coordinates = set(net.ext_grid.bus.values) & set(net.bus_geodata.index) 130 if len(eg_buses_with_geo_coordinates) > 0: 131 sc = create_bus_collection(net, eg_buses_with_geo_coordinates, patch_type="rect", 132 size=ext_grid_size, color=ext_grid_color, zorder=11) 133 collections.append(sc) 134 135 # create trafo collection if trafo is available 136 trafo_buses_with_geo_coordinates = [t for t, trafo in net.trafo.iterrows() 137 if trafo.hv_bus in net.bus_geodata.index and 138 trafo.lv_bus in net.bus_geodata.index] 139 if len(trafo_buses_with_geo_coordinates) > 0: 140 tc = create_trafo_collection(net, trafo_buses_with_geo_coordinates, 141 color=trafo_color, size=trafo_size) 142 collections.append(tc) 143 144 # create trafo3w collection if trafo3w is available 145 trafo3w_buses_with_geo_coordinates = [ 146 t for t, trafo3w in net.trafo3w.iterrows() if trafo3w.hv_bus in net.bus_geodata.index and 147 trafo3w.mv_bus in net.bus_geodata.index and trafo3w.lv_bus in net.bus_geodata.index] 148 if len(trafo3w_buses_with_geo_coordinates) > 0: 149 tc = create_trafo3w_collection(net, trafo3w_buses_with_geo_coordinates, 150 color=trafo_color) 151 collections.append(tc) 152 153 if plot_line_switches and len(net.switch): 154 sc = create_line_switch_collection( 155 net, size=switch_size, distance_to_bus=switch_distance, 156 use_line_geodata=not use_bus_geodata, zorder=12, color=switch_color) 157 collections.append(sc) 158 159 if plot_sgens and len(net.sgen): 160 sgc = create_sgen_collection(net, size=sgen_size) 161 collections.append(sgc) 162 if plot_gens and len(net.gen): 163 gc = create_gen_collection(net, size=gen_size) 164 collections.append(gc) 165 if plot_loads and len(net.load): 166 lc = create_load_collection(net, size=load_size) 167 collections.append(lc) 168 169 if len(net.switch): 170 bsc = create_bus_bus_switch_collection(net, size=switch_size) 171 collections.append(bsc) 172 173 ax = draw_collections(collections, ax=ax) 174 if show_plot: 175 plt.show() 176 return ax 177 178 179 if __name__ == "__main__": 180 import pandapower.networks as nw 181 182 net = nw.case145() 183 # net = nw.create_cigre_network_mv() 184 # net = nw.mv_oberrhein() 185 simple_plot(net, bus_size=0.4) 186 [end of pandapower/plotting/simple_plot.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pandapower/plotting/simple_plot.py b/pandapower/plotting/simple_plot.py --- a/pandapower/plotting/simple_plot.py +++ b/pandapower/plotting/simple_plot.py @@ -51,6 +51,8 @@ **trafo_size** (float, 1.0) - Relative size of trafos to plot. **plot_loads** (bool, False) - Flag to decide whether load symbols should be drawn. + + **plot_gens** (bool, False) - Flag to decide whether gen symbols should be drawn. **plot_sgens** (bool, False) - Flag to decide whether sgen symbols should be drawn.
{"golden_diff": "diff --git a/pandapower/plotting/simple_plot.py b/pandapower/plotting/simple_plot.py\n--- a/pandapower/plotting/simple_plot.py\n+++ b/pandapower/plotting/simple_plot.py\n@@ -51,6 +51,8 @@\n **trafo_size** (float, 1.0) - Relative size of trafos to plot.\n \n **plot_loads** (bool, False) - Flag to decide whether load symbols should be drawn.\n+ \n+ **plot_gens** (bool, False) - Flag to decide whether gen symbols should be drawn.\n \n **plot_sgens** (bool, False) - Flag to decide whether sgen symbols should be drawn.\n", "issue": "'plot_gens' unexpected in simple_plot()\nIt seems that `plot_gens` is an argument in `simple_plot()`, but is not documented. When attempting to set `plot_gens` when calling `simple_plot()`, I get the following error:\r\n\r\n> TypeError: simple_plot() got an unexpected keyword argument 'plot_gens'\r\n\r\nVersion: 2.7.0\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nimport matplotlib.pyplot as plt\n\nfrom pandapower.plotting.plotting_toolbox import get_collection_sizes\nfrom pandapower.plotting.collections import create_bus_collection, create_line_collection, \\\n create_trafo_collection, create_trafo3w_collection, \\\n create_line_switch_collection, draw_collections, create_bus_bus_switch_collection, create_sgen_collection, \\\n create_gen_collection, create_load_collection\nfrom pandapower.plotting.generic_geodata import create_generic_coordinates\n\ntry:\n import pplog as logging\nexcept ImportError:\n import logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef simple_plot(net, respect_switches=False, line_width=1.0, bus_size=1.0, ext_grid_size=1.0,\n trafo_size=1.0, plot_loads=False, plot_gens=False, plot_sgens=False, load_size=1.0, gen_size=1.0, sgen_size=1.0,\n switch_size=2.0, switch_distance=1.0, plot_line_switches=False, scale_size=True,\n bus_color=\"b\", line_color='grey', trafo_color='k', ext_grid_color='y',\n switch_color='k', library=\"igraph\", show_plot=True, ax=None):\n \"\"\"\n Plots a pandapower network as simple as possible. If no geodata is available, artificial\n geodata is generated. For advanced plotting see the tutorial\n\n INPUT:\n **net** - The pandapower format network.\n\n OPTIONAL:\n **respect_switches** (bool, False) - Respect switches if artificial geodata is created.\n This Flag is ignored if plot_line_switches is True\n\n **line_width** (float, 1.0) - width of lines\n\n **bus_size** (float, 1.0) - Relative size of buses to plot.\n The value bus_size is multiplied with mean_distance_between_buses, which equals the\n distance between\n the max geoocord and the min divided by 200.\n mean_distance_between_buses = sum((net['bus_geodata'].max() - net['bus_geodata'].min()) / 200)\n\n **ext_grid_size** (float, 1.0) - Relative size of ext_grids to plot. See bus sizes for details.\n Note: ext_grids are plottet as rectangles\n\n **trafo_size** (float, 1.0) - Relative size of trafos to plot.\n\n **plot_loads** (bool, False) - Flag to decide whether load symbols should be drawn.\n\n **plot_sgens** (bool, False) - Flag to decide whether sgen symbols should be drawn.\n\n **load_size** (float, 1.0) - Relative size of loads to plot.\n\n **sgen_size** (float, 1.0) - Relative size of sgens to plot.\n\n **switch_size** (float, 2.0) - Relative size of switches to plot. See bus size for details\n\n **switch_distance** (float, 1.0) - Relative distance of the switch to its corresponding \\\n bus. See bus size for details\n\n **plot_line_switches** (bool, False) - Flag if line switches are plotted\n\n **scale_size** (bool, True) - Flag if bus_size, ext_grid_size, bus_size- and distance \\\n will be scaled with respect to grid mean distances\n\n **bus_color** (String, colors[0]) - Bus Color. Init as first value of color palette. Usually colors[0] = \"b\".\n\n **line_color** (String, 'grey') - Line Color. Init is grey\n\n **trafo_color** (String, 'k') - Trafo Color. Init is black\n\n **ext_grid_color** (String, 'y') - External Grid Color. Init is yellow\n\n **switch_color** (String, 'k') - Switch Color. Init is black\n\n **library** (String, \"igraph\") - library name to create generic coordinates (case of\n missing geodata). \"igraph\" to use igraph package or \"networkx\" to use networkx package.\n\n **show_plot** (bool, True) - Shows plot at the end of plotting\n\n **ax** (object, None) - matplotlib axis to plot to\n\n OUTPUT:\n **ax** - axes of figure\n \"\"\"\n # don't hide lines if switches are plotted\n if plot_line_switches:\n respect_switches = False\n\n # create geocoord if none are available\n if len(net.line_geodata) == 0 and len(net.bus_geodata) == 0:\n logger.warning(\"No or insufficient geodata available --> Creating artificial coordinates.\" +\n \" This may take some time\")\n create_generic_coordinates(net, respect_switches=respect_switches, library=library)\n\n if scale_size:\n # if scale_size -> calc size from distance between min and max geocoord\n sizes = get_collection_sizes(net, bus_size, ext_grid_size, trafo_size,\n load_size, sgen_size, switch_size, switch_distance)\n bus_size = sizes[\"bus\"]\n ext_grid_size = sizes[\"ext_grid\"]\n trafo_size = sizes[\"trafo\"]\n sgen_size = sizes[\"sgen\"]\n load_size = sizes[\"load\"]\n switch_size = sizes[\"switch\"]\n switch_distance = sizes[\"switch_distance\"]\n\n # create bus collections to plot\n bc = create_bus_collection(net, net.bus.index, size=bus_size, color=bus_color, zorder=10)\n\n # if bus geodata is available, but no line geodata\n use_bus_geodata = len(net.line_geodata) == 0\n in_service_lines = net.line[net.line.in_service].index\n nogolines = set(net.switch.element[(net.switch.et == \"l\") & (net.switch.closed == 0)]) \\\n if respect_switches else set()\n plot_lines = in_service_lines.difference(nogolines)\n\n # create line collections\n lc = create_line_collection(net, plot_lines, color=line_color, linewidths=line_width,\n use_bus_geodata=use_bus_geodata)\n collections = [bc, lc]\n\n # create ext_grid collections\n eg_buses_with_geo_coordinates = set(net.ext_grid.bus.values) & set(net.bus_geodata.index)\n if len(eg_buses_with_geo_coordinates) > 0:\n sc = create_bus_collection(net, eg_buses_with_geo_coordinates, patch_type=\"rect\",\n size=ext_grid_size, color=ext_grid_color, zorder=11)\n collections.append(sc)\n\n # create trafo collection if trafo is available\n trafo_buses_with_geo_coordinates = [t for t, trafo in net.trafo.iterrows()\n if trafo.hv_bus in net.bus_geodata.index and\n trafo.lv_bus in net.bus_geodata.index]\n if len(trafo_buses_with_geo_coordinates) > 0:\n tc = create_trafo_collection(net, trafo_buses_with_geo_coordinates,\n color=trafo_color, size=trafo_size)\n collections.append(tc)\n\n # create trafo3w collection if trafo3w is available\n trafo3w_buses_with_geo_coordinates = [\n t for t, trafo3w in net.trafo3w.iterrows() if trafo3w.hv_bus in net.bus_geodata.index and\n trafo3w.mv_bus in net.bus_geodata.index and trafo3w.lv_bus in net.bus_geodata.index]\n if len(trafo3w_buses_with_geo_coordinates) > 0:\n tc = create_trafo3w_collection(net, trafo3w_buses_with_geo_coordinates,\n color=trafo_color)\n collections.append(tc)\n\n if plot_line_switches and len(net.switch):\n sc = create_line_switch_collection(\n net, size=switch_size, distance_to_bus=switch_distance,\n use_line_geodata=not use_bus_geodata, zorder=12, color=switch_color)\n collections.append(sc)\n\n if plot_sgens and len(net.sgen):\n sgc = create_sgen_collection(net, size=sgen_size)\n collections.append(sgc)\n if plot_gens and len(net.gen):\n gc = create_gen_collection(net, size=gen_size)\n collections.append(gc)\n if plot_loads and len(net.load):\n lc = create_load_collection(net, size=load_size)\n collections.append(lc)\n\n if len(net.switch):\n bsc = create_bus_bus_switch_collection(net, size=switch_size)\n collections.append(bsc)\n\n ax = draw_collections(collections, ax=ax)\n if show_plot:\n plt.show()\n return ax\n\n\nif __name__ == \"__main__\":\n import pandapower.networks as nw\n\n net = nw.case145()\n # net = nw.create_cigre_network_mv()\n # net = nw.mv_oberrhein()\n simple_plot(net, bus_size=0.4)\n", "path": "pandapower/plotting/simple_plot.py"}]}
3,119
159
gh_patches_debug_13840
rasdani/github-patches
git_diff
conan-io__conan-center-index-636
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [package] libx264/20190605: build fail compiler.version setting required for vcvars not defined ### Package and Environment Details * Package Name/Version: **libx264/20190605** * Operating System+version: **Windows 10** * Compiler+version: **Mingw GCC 8.1** * Docker image: **conanio/gcc8.1** * Conan version: **conan 1.21.0** * Python version: **Python 3.7.5** ### Conan profile ``` Configuration: [settings] arch=x86_64 arch_build=x86_64 build_type=Debug compiler=gcc compiler.cppstd=14 compiler.exception=seh compiler.libcxx=libstdc++11 compiler.threads=posix compiler.version=8.1 os=Windows os_build=Windows [options] [build_requires] [env] CONAN_CMAKE_GENERATOR=MinGW Makefiles ``` ``` [requires] libx264/20190605 ``` ### Steps to reproduce Change `libx264/20190605@bincrafters/stable` to `libx264/20190605` build fail. Failed in ``` ConanException: compiler.version setting required for vcvars not defined ``` Because of new `conanfile.py` without first determining whether it is `_is_msvc`. In no `Visual Studio` environment, `tools.vcvars` due to the error. ### Fixed method in line 94, change to: ``` def build(self): if self._is_msvc: with tools.vcvars(self.settings): autotools = self._configure_autotools() autotools.make() else: autotools = self._configure_autotools() autotools.make() def package(self): if self._is_msvc: with tools.vcvars(self.settings): autotools = self._configure_autotools() autotools.install() else: autotools = self._configure_autotools() autotools.install() self.copy(pattern="COPYING", src=self._source_subfolder, dst='licenses') tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig')) ``` ### Logs <details><summary>Click to expand log</summary> ``` libx264/20190605: Applying build-requirement: nasm/2.14 libx264/20190605: Applying build-requirement: msys2/20190524 libx264/20190605: WARN: Build folder is dirty, removing it: C:\Users\Home\.conan\data\libx264\20190605\_\_\build\1a9b746db3232276ada4cf64b16512ba901fcf09 libx264/20190605: Copying sources to build folder libx264/20190605: Building your package in C:\Users\Home\.conan\data\libx264\20190605\_\_\build\1a9b746db3232276ada4cf64b16512ba901fcf09 libx264/20190605: Generator txt created conanbuildinfo.txt libx264/20190605: Calling build() libx264/20190605: libx264/20190605: ERROR: Package '1a9b746db3232276ada4cf64b16512ba901fcf09' build failed libx264/20190605: WARN: Build folder C:\Users\Home\.conan\data\libx264\20190605\_\_\build\1a9b746db3232276ada4cf64b16512ba901fcf09 ERROR: libx264/20190605: Error in build() method, line 95 with tools.vcvars(self.settings): ConanException: compiler.version setting required for vcvars not defined ``` </details> </issue> <code> [start of recipes/libx264/all/conanfile.py] 1 from conans import ConanFile, tools, AutoToolsBuildEnvironment 2 import os 3 4 5 class LibX264Conan(ConanFile): 6 name = "libx264" 7 url = "https://github.com/conan-io/conan-center-index" 8 homepage = "https://www.videolan.org/developers/x264.html" 9 description = "x264 is a free software library and application for encoding video streams into the " \ 10 "H.264/MPEG-4 AVC compression format" 11 topics = ("conan", "libx264", "video", "encoding") 12 license = "GPL-2.0" 13 settings = "os", "arch", "compiler", "build_type" 14 options = {"shared": [True, False], "fPIC": [True, False], "bit_depth": [8, 10, "all"]} 15 default_options = {'shared': False, 'fPIC': True, 'bit_depth': 'all'} 16 build_requires = "nasm/2.13.02" 17 _override_env = {} 18 _autotools = None 19 20 @property 21 def _is_mingw(self): 22 return self.settings.os == "Windows" and self.settings.compiler == 'gcc' 23 24 @property 25 def _is_msvc(self): 26 return self.settings.compiler == 'Visual Studio' 27 28 @property 29 def _source_subfolder(self): 30 return "source_subfolder" 31 32 def build_requirements(self): 33 if "CONAN_BASH_PATH" not in os.environ and tools.os_info.is_windows: 34 self.build_requires("msys2/20190524") 35 36 def config_options(self): 37 if self.settings.os == 'Windows': 38 del self.options.fPIC 39 40 def configure(self): 41 del self.settings.compiler.libcxx 42 del self.settings.compiler.cppstd 43 44 def source(self): 45 tools.get(**self.conan_data["sources"][self.version]) 46 extracted_dir = 'x264-snapshot-%s-2245' % self.version 47 os.rename(extracted_dir, self._source_subfolder) 48 49 @property 50 def env(self): 51 ret = super(LibX264Conan, self).env 52 ret.update(self._override_env) 53 return ret 54 55 def _configure_autotools(self): 56 if not self._autotools: 57 prefix = tools.unix_path(self.package_folder) 58 args = ['--disable-cli', '--prefix={}'.format(prefix)] 59 if self.options.shared: 60 args.append('--enable-shared') 61 else: 62 args.append('--enable-static') 63 if self.settings.os != 'Windows' and self.options.fPIC: 64 args.append('--enable-pic') 65 if self.settings.build_type == 'Debug': 66 args.append('--enable-debug') 67 args.append('--bit-depth=%s' % str(self.options.bit_depth)) 68 69 if tools.cross_building(self.settings): 70 if self.settings.os == "Android": 71 # the as of ndk does not work well for building libx264 72 self._override_env["AS"] = os.environ["CC"] 73 ndk_root = tools.unix_path(os.environ["NDK_ROOT"]) 74 arch = {'armv7': 'arm', 75 'armv8': 'aarch64', 76 'x86': 'i686', 77 'x86_64': 'x86_64'}.get(str(self.settings.arch)) 78 abi = 'androideabi' if self.settings.arch == 'armv7' else 'android' 79 cross_prefix = "%s/bin/%s-linux-%s-" % (ndk_root, arch, abi) 80 args.append('--cross-prefix=%s' % cross_prefix) 81 82 if self._is_msvc: 83 self._override_env['CC'] = 'cl' 84 self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows) 85 if self._is_msvc: 86 self._autotools.flags.append('-%s' % str(self.settings.compiler.runtime)) 87 # cannot open program database ... if multiple CL.EXE write to the same .PDB file, please use /FS 88 self._autotools.flags.append('-FS') 89 self._autotools.configure(args=args, build=False, vars=self._override_env, configure_dir=self._source_subfolder) 90 return self._autotools 91 92 def build(self): 93 with tools.vcvars(self.settings): 94 autotools = self._configure_autotools() 95 autotools.make() 96 97 def package(self): 98 with tools.vcvars(self.settings): 99 autotools = self._configure_autotools() 100 autotools.install() 101 self.copy(pattern="COPYING", src=self._source_subfolder, dst='licenses') 102 tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig')) 103 104 def package_info(self): 105 if self._is_msvc: 106 self.cpp_info.libs = ['libx264.dll.lib' if self.options.shared else 'libx264'] 107 if self.options.shared: 108 self.cpp_info.defines.append("X264_API_IMPORTS") 109 elif self._is_mingw: 110 self.cpp_info.libs = ['x264.dll' if self.options.shared else 'x264'] 111 else: 112 self.cpp_info.libs = ['x264'] 113 if self.settings.os == "Linux": 114 self.cpp_info.system_libs.extend(['dl', 'pthread', 'm']) 115 elif self.settings.os == "Android": 116 self.cpp_info.system_libs.extend(['dl', 'm']) 117 self.cpp_info.names['pkg_config'] = 'x264' 118 [end of recipes/libx264/all/conanfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/recipes/libx264/all/conanfile.py b/recipes/libx264/all/conanfile.py --- a/recipes/libx264/all/conanfile.py +++ b/recipes/libx264/all/conanfile.py @@ -90,12 +90,12 @@ return self._autotools def build(self): - with tools.vcvars(self.settings): + with tools.vcvars(self.settings) if self._is_msvc else tools.no_op(): autotools = self._configure_autotools() autotools.make() def package(self): - with tools.vcvars(self.settings): + with tools.vcvars(self.settings) if self._is_msvc else tools.no_op(): autotools = self._configure_autotools() autotools.install() self.copy(pattern="COPYING", src=self._source_subfolder, dst='licenses')
{"golden_diff": "diff --git a/recipes/libx264/all/conanfile.py b/recipes/libx264/all/conanfile.py\n--- a/recipes/libx264/all/conanfile.py\n+++ b/recipes/libx264/all/conanfile.py\n@@ -90,12 +90,12 @@\n return self._autotools\n \n def build(self):\n- with tools.vcvars(self.settings):\n+ with tools.vcvars(self.settings) if self._is_msvc else tools.no_op():\n autotools = self._configure_autotools()\n autotools.make()\n \n def package(self):\n- with tools.vcvars(self.settings):\n+ with tools.vcvars(self.settings) if self._is_msvc else tools.no_op():\n autotools = self._configure_autotools()\n autotools.install()\n self.copy(pattern=\"COPYING\", src=self._source_subfolder, dst='licenses')\n", "issue": "[package] libx264/20190605: build fail compiler.version setting required for vcvars not defined\n\r\n### Package and Environment Details\r\n * Package Name/Version: **libx264/20190605**\r\n * Operating System+version: **Windows 10**\r\n * Compiler+version: **Mingw GCC 8.1**\r\n * Docker image: **conanio/gcc8.1**\r\n * Conan version: **conan 1.21.0**\r\n * Python version: **Python 3.7.5**\r\n\r\n\r\n### Conan profile\r\n```\r\nConfiguration:\r\n[settings]\r\narch=x86_64\r\narch_build=x86_64\r\nbuild_type=Debug\r\ncompiler=gcc\r\ncompiler.cppstd=14\r\ncompiler.exception=seh\r\ncompiler.libcxx=libstdc++11\r\ncompiler.threads=posix\r\ncompiler.version=8.1\r\nos=Windows\r\nos_build=Windows\r\n[options]\r\n[build_requires]\r\n[env]\r\nCONAN_CMAKE_GENERATOR=MinGW Makefiles\r\n```\r\n\r\n```\r\n[requires]\r\nlibx264/20190605\r\n```\r\n\r\n\r\n### Steps to reproduce\r\nChange `libx264/20190605@bincrafters/stable` to `libx264/20190605` build fail.\r\nFailed in\r\n\r\n```\r\nConanException: compiler.version setting required for vcvars not defined\r\n```\r\nBecause of new `conanfile.py` without first determining whether it is `_is_msvc`.\r\nIn no `Visual Studio` environment, `tools.vcvars` due to the error.\r\n\r\n\r\n### Fixed method\r\nin line 94, change to:\r\n```\r\n def build(self):\r\n if self._is_msvc:\r\n with tools.vcvars(self.settings):\r\n autotools = self._configure_autotools()\r\n autotools.make()\r\n else:\r\n autotools = self._configure_autotools()\r\n autotools.make()\r\n\r\n def package(self):\r\n if self._is_msvc:\r\n with tools.vcvars(self.settings):\r\n autotools = self._configure_autotools()\r\n autotools.install()\r\n else:\r\n autotools = self._configure_autotools()\r\n autotools.install()\r\n self.copy(pattern=\"COPYING\", src=self._source_subfolder, dst='licenses')\r\n tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))\r\n```\r\n\r\n\r\n### Logs\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nlibx264/20190605: Applying build-requirement: nasm/2.14\r\nlibx264/20190605: Applying build-requirement: msys2/20190524\r\nlibx264/20190605: WARN: Build folder is dirty, removing it: C:\\Users\\Home\\.conan\\data\\libx264\\20190605\\_\\_\\build\\1a9b746db3232276ada4cf64b16512ba901fcf09\r\nlibx264/20190605: Copying sources to build folder\r\nlibx264/20190605: Building your package in C:\\Users\\Home\\.conan\\data\\libx264\\20190605\\_\\_\\build\\1a9b746db3232276ada4cf64b16512ba901fcf09\r\nlibx264/20190605: Generator txt created conanbuildinfo.txt\r\nlibx264/20190605: Calling build()\r\nlibx264/20190605: \r\nlibx264/20190605: ERROR: Package '1a9b746db3232276ada4cf64b16512ba901fcf09' build failed\r\nlibx264/20190605: WARN: Build folder C:\\Users\\Home\\.conan\\data\\libx264\\20190605\\_\\_\\build\\1a9b746db3232276ada4cf64b16512ba901fcf09\r\nERROR: libx264/20190605: Error in build() method, line 95\r\n\twith tools.vcvars(self.settings):\r\n\tConanException: compiler.version setting required for vcvars not defined\r\n\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conans import ConanFile, tools, AutoToolsBuildEnvironment\nimport os\n\n\nclass LibX264Conan(ConanFile):\n name = \"libx264\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.videolan.org/developers/x264.html\"\n description = \"x264 is a free software library and application for encoding video streams into the \" \\\n \"H.264/MPEG-4 AVC compression format\"\n topics = (\"conan\", \"libx264\", \"video\", \"encoding\")\n license = \"GPL-2.0\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False], \"bit_depth\": [8, 10, \"all\"]}\n default_options = {'shared': False, 'fPIC': True, 'bit_depth': 'all'}\n build_requires = \"nasm/2.13.02\"\n _override_env = {}\n _autotools = None\n\n @property\n def _is_mingw(self):\n return self.settings.os == \"Windows\" and self.settings.compiler == 'gcc'\n\n @property\n def _is_msvc(self):\n return self.settings.compiler == 'Visual Studio'\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def build_requirements(self):\n if \"CONAN_BASH_PATH\" not in os.environ and tools.os_info.is_windows:\n self.build_requires(\"msys2/20190524\")\n\n def config_options(self):\n if self.settings.os == 'Windows':\n del self.options.fPIC\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = 'x264-snapshot-%s-2245' % self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n @property\n def env(self):\n ret = super(LibX264Conan, self).env\n ret.update(self._override_env)\n return ret\n\n def _configure_autotools(self):\n if not self._autotools:\n prefix = tools.unix_path(self.package_folder)\n args = ['--disable-cli', '--prefix={}'.format(prefix)]\n if self.options.shared:\n args.append('--enable-shared')\n else:\n args.append('--enable-static')\n if self.settings.os != 'Windows' and self.options.fPIC:\n args.append('--enable-pic')\n if self.settings.build_type == 'Debug':\n args.append('--enable-debug')\n args.append('--bit-depth=%s' % str(self.options.bit_depth))\n\n if tools.cross_building(self.settings):\n if self.settings.os == \"Android\":\n # the as of ndk does not work well for building libx264\n self._override_env[\"AS\"] = os.environ[\"CC\"]\n ndk_root = tools.unix_path(os.environ[\"NDK_ROOT\"])\n arch = {'armv7': 'arm',\n 'armv8': 'aarch64',\n 'x86': 'i686',\n 'x86_64': 'x86_64'}.get(str(self.settings.arch))\n abi = 'androideabi' if self.settings.arch == 'armv7' else 'android'\n cross_prefix = \"%s/bin/%s-linux-%s-\" % (ndk_root, arch, abi)\n args.append('--cross-prefix=%s' % cross_prefix)\n\n if self._is_msvc:\n self._override_env['CC'] = 'cl'\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n if self._is_msvc:\n self._autotools.flags.append('-%s' % str(self.settings.compiler.runtime))\n # cannot open program database ... if multiple CL.EXE write to the same .PDB file, please use /FS\n self._autotools.flags.append('-FS')\n self._autotools.configure(args=args, build=False, vars=self._override_env, configure_dir=self._source_subfolder)\n return self._autotools\n\n def build(self):\n with tools.vcvars(self.settings):\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n with tools.vcvars(self.settings):\n autotools = self._configure_autotools()\n autotools.install()\n self.copy(pattern=\"COPYING\", src=self._source_subfolder, dst='licenses')\n tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))\n\n def package_info(self):\n if self._is_msvc:\n self.cpp_info.libs = ['libx264.dll.lib' if self.options.shared else 'libx264']\n if self.options.shared:\n self.cpp_info.defines.append(\"X264_API_IMPORTS\")\n elif self._is_mingw:\n self.cpp_info.libs = ['x264.dll' if self.options.shared else 'x264']\n else:\n self.cpp_info.libs = ['x264']\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend(['dl', 'pthread', 'm'])\n elif self.settings.os == \"Android\":\n self.cpp_info.system_libs.extend(['dl', 'm'])\n self.cpp_info.names['pkg_config'] = 'x264'\n", "path": "recipes/libx264/all/conanfile.py"}]}
3,107
210
gh_patches_debug_1871
rasdani/github-patches
git_diff
fidals__shopelectro-200
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SE yml fix delivery 3k -> 5k [origin trello task](https://trello.com/c/LyLVDakS/298-se-%D0%BF%D1%80%D0%B0%D0%B2%D1%8C-yml) Внезапно обноружили, что через фид передается( в теге <sales_notes>) неверная инфа о доставке. Исправь на 5к. </issue> <code> [start of shopelectro/settings/base.py] 1 """ 2 Django settings for shopelectro project. 3 4 Generated by 'django-admin startproject' using Django 1.9.5. 5 6 For more information on this file, see 7 https://docs.djangoproject.com/en/1.9/topics/settings/ 8 9 For the full list of settings and their values, see 10 https://docs.djangoproject.com/en/1.9/ref/settings/ 11 """ 12 13 import os 14 from datetime import datetime 15 16 import dj_database_url 17 18 # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 19 BASE_DIR = os.path.dirname(os.path.dirname( 20 os.path.dirname(os.path.abspath(__file__)))) 21 22 # Quick-start development settings - unsuitable for production 23 # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ 24 25 # SECURITY WARNING: keep the secret key used in production secret! 26 SECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key') 27 28 # SECURITY WARNING: don't run with debug turned on in production! 29 DEBUG = True 30 31 # http://bit.ly/sorl-thumbnail-docs 32 THUMBNAIL_DEBUG = False 33 34 ALLOWED_HOSTS = ['*'] 35 36 if os.environ.get('TEST_ENV', False): 37 # disable https in CI 38 # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header 39 SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http') 40 41 # Enable in frame loading for Ya.Metric 42 # https://docs.djangoproject.com/es/1.10/ref/clickjacking/ 43 # https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page 44 X_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com' 45 46 # Application definition 47 INSTALLED_APPS = [ 48 # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover 49 'django.contrib.contenttypes', 50 'django.contrib.auth', 51 'django.contrib.messages', 52 'django.contrib.redirects', 53 'django.contrib.sessions', 54 'django.contrib.sitemaps', 55 'django.contrib.sites', 56 'django.contrib.staticfiles', 57 'django.contrib.humanize', 58 'django_user_agents', 59 'generic_admin', 60 'django.contrib.admin.apps.SimpleAdminConfig', 61 'debug_toolbar', 62 'mptt', 63 'widget_tweaks', 64 'sorl.thumbnail', 65 'images', 66 'pages', 67 'catalog', 68 'ecommerce', 69 'shopelectro', 70 ] 71 72 MIDDLEWARE = [ 73 'django.middleware.security.SecurityMiddleware', 74 'django.contrib.sessions.middleware.SessionMiddleware', 75 'django.middleware.common.CommonMiddleware', 76 'django.middleware.csrf.CsrfViewMiddleware', 77 'django.contrib.auth.middleware.AuthenticationMiddleware', 78 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 79 'django.contrib.messages.middleware.MessageMiddleware', 80 'django.middleware.clickjacking.XFrameOptionsMiddleware', 81 'django.contrib.redirects.middleware.RedirectFallbackMiddleware', 82 'django.middleware.locale.LocaleMiddleware', 83 'django_user_agents.middleware.UserAgentMiddleware', 84 'debug_toolbar.middleware.DebugToolbarMiddleware', 85 ] 86 87 ROOT_URLCONF = 'shopelectro.urls' 88 89 TEMPLATES = [ 90 { 91 'BACKEND': 'django.template.backends.django.DjangoTemplates', 92 'DIRS': [os.path.join(BASE_DIR, 'templates')], 93 'APP_DIRS': True, 94 'OPTIONS': { 95 'context_processors': [ 96 'django.template.context_processors.debug', 97 'django.template.context_processors.media', 98 'django.template.context_processors.request', 99 'django.template.context_processors.static', 100 'django.contrib.auth.context_processors.auth', 101 'django.contrib.messages.context_processors.messages', 102 'ecommerce.context_processors.cart', 103 'shopelectro.context_processors.shop', 104 ], 105 }, 106 }, 107 ] 108 109 WSGI_APPLICATION = 'shopelectro.wsgi.application' 110 111 # Password validation 112 # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators 113 114 AUTH_PASSWORD_VALIDATORS = [ 115 { 116 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', 117 }, 118 { 119 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 120 }, 121 { 122 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', 123 }, 124 { 125 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', 126 }, 127 ] 128 129 # Internationalization 130 # https://docs.djangoproject.com/en/1.9/topics/i18n/ 131 132 LOCALE_NAME = 'en_US' 133 TIME_ZONE = 'UTC' 134 135 USE_I18N = True 136 USE_L10N = True 137 USE_TZ = True 138 139 LOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')] 140 FORMAT_MODULE_PATH = [ 141 'shopelectro.formats', 142 ] 143 144 # Static files (CSS, JavaScript, Images) 145 # https://docs.djangoproject.com/en/1.9/howto/static-files/ 146 STATIC_URL = '/static/' 147 STATIC_ROOT = os.path.join(BASE_DIR, 'static') 148 ASSETS_DIR = os.path.join(BASE_DIR, 'assets') 149 150 STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage' 151 152 STATICFILES_DIRS = [ 153 os.path.join(BASE_DIR, 'front/build'), 154 ASSETS_DIR, 155 ] 156 157 MEDIA_URL = '/media/' 158 MEDIA_ROOT = os.path.join(BASE_DIR, 'media') 159 160 # It is fake-url. Correct url will be created on `docker-compose up` stage from `docker/.env` 161 DATABASE_URL = 'postgres://user:pass@db_name/table' 162 DATABASES = { 163 'default': dj_database_url.config( 164 env='DATABASE_URL', 165 default=DATABASE_URL, 166 ) 167 } 168 169 LOGGING = { 170 'version': 1, 171 'disable_existing_loggers': False, 172 'handlers': { 173 'console': { 174 'class': 'logging.StreamHandler', 175 }, 176 }, 177 'loggers': { 178 'django': { 179 'handlers': ['console'], 180 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), 181 }, 182 }, 183 } 184 185 SITE_CREATED = datetime(2013, 1, 1) 186 187 LOCALHOST = 'http://127.0.0.1:8000/' 188 BASE_URL = 'https://www.shopelectro.ru' 189 190 PLACEHOLDER_IMAGE = 'images/logo.png' 191 PLACEHOLDER_ALT = 'Логотип компании Shopelectro' 192 193 # Autocomplete and search settings 194 SEARCH_SEE_ALL_LABEL = 'Смотреть все результаты' 195 196 # For sitemaps and sites framework 197 SITE_ID = 1 198 SITE_DOMAIN_NAME = 'www.shopelectro.ru' 199 200 # Used to retrieve instances in ecommerce.Cart 201 CART_ID = 'cart' 202 203 # Used to define choices attr in definition of Order.payment_type field 204 PAYMENT_OPTIONS = ( 205 ('cash', 'Наличные'), 206 ('cashless', 'Безналичные и денежные переводы'), 207 ('AC', 'Банковская карта'), 208 ('PC', 'Яндекс.Деньги'), 209 ('GP', 'Связной (терминал)'), 210 ('AB', 'Альфа-Клик'), 211 ) 212 213 # It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env` 214 YANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass') 215 216 # Used for order's email in ecommerce app 217 FAKE_ORDER_NUMBER = 6000 218 219 # Subjects for different types of emails sent from SE. 220 EMAIL_SUBJECTS = { 221 'call': 'Обратный звонок', 222 'order': 'Заказ №{0.fake_order_number}', 223 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса', 224 'one_click': 'Заказ в один клик №{0.fake_order_number}', 225 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете', 226 } 227 228 # Email configs 229 # It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env` 230 EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass') 231 EMAIL_HOST_USER = '[email protected]' 232 EMAIL_USE_TLS = True 233 EMAIL_HOST = 'smtp.yandex.ru' 234 EMAIL_PORT = 587 235 EMAIL_SENDER = '[email protected]' 236 EMAIL_RECIPIENT = '[email protected]' 237 SHOP_EMAIL = '[email protected]' 238 239 # FTP configs 240 FTP_USER = os.environ.get('FTP_USER', 'user') 241 FTP_PASS = os.environ.get('FTP_PASS', 'pass') 242 FTP_IP = os.environ.get('FTP_IP', '0.0.0.0') 243 244 # Used in admin image uploads 245 MODEL_TYPES = { 246 'Product': { 247 'app_name': 'shopelectro', 248 'dir_name': 'products', 249 }, 250 'Category': { 251 'app_name': 'shopelectro', 252 'dir_name': 'categories', 253 } 254 } 255 256 # This need for using {% debug %} variable in templates. 257 INTERNAL_IPS = ( 258 '127.0.0.1', 259 ) 260 261 TOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764] 262 263 SHOP = { 264 'id': '69886', 265 'scid': '64788', 266 'success_url': BASE_URL + '/shop/order-success/', 267 'fail_url': BASE_URL + '/', 268 'cps_phone': '+78124163200', 269 'cps_email': '[email protected]', 270 'local_delivery_cost': 300, 271 'local_delivery_cost_threshold': 3000, 272 } 273 274 # used in data-migrations and tests 275 CUSTOM_PAGES = { 276 'index': { 277 'slug': '', 278 'name': 'Интернет-магазин элементов питания "ShopElectro"', 279 'menu_title': 'Главная', 280 'title': 'Интернет-магазин Элементов питания с доставкой по России', 281 }, 282 'sitemap': { 283 'slug': 'sitemap', 284 'h1': 'Карта сайта', 285 'name': 'Карта сайта', 286 }, 287 'order': { 288 'slug': 'order', 289 'name': 'Оформление заказа', 290 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург', 291 }, 292 'search': { 293 'slug': 'search', 294 'name': 'Результаты поиска', 295 }, 296 'catalog': { 297 'slug': 'catalog', 298 'name': 'Каталог товаров', 299 'menu_title': 'Каталог', 300 }, 301 'order_success': { 302 'slug': 'order-success', 303 'name': 'Заказ принят', 304 } 305 } 306 [end of shopelectro/settings/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py --- a/shopelectro/settings/base.py +++ b/shopelectro/settings/base.py @@ -268,7 +268,7 @@ 'cps_phone': '+78124163200', 'cps_email': '[email protected]', 'local_delivery_cost': 300, - 'local_delivery_cost_threshold': 3000, + 'local_delivery_cost_threshold': 5000, } # used in data-migrations and tests
{"golden_diff": "diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py\n--- a/shopelectro/settings/base.py\n+++ b/shopelectro/settings/base.py\n@@ -268,7 +268,7 @@\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n- 'local_delivery_cost_threshold': 3000,\n+ 'local_delivery_cost_threshold': 5000,\n }\n \n # used in data-migrations and tests\n", "issue": "SE yml fix delivery 3k -> 5k\n[origin trello task](https://trello.com/c/LyLVDakS/298-se-%D0%BF%D1%80%D0%B0%D0%B2%D1%8C-yml)\r\n\r\n\u0412\u043d\u0435\u0437\u0430\u043f\u043d\u043e \u043e\u0431\u043d\u043e\u0440\u0443\u0436\u0438\u043b\u0438, \u0447\u0442\u043e \u0447\u0435\u0440\u0435\u0437 \u0444\u0438\u0434 \u043f\u0435\u0440\u0435\u0434\u0430\u0435\u0442\u0441\u044f( \u0432 \u0442\u0435\u0433\u0435 <sales_notes>) \u043d\u0435\u0432\u0435\u0440\u043d\u0430\u044f \u0438\u043d\u0444\u0430 \u043e \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u0435.\r\n\u0418\u0441\u043f\u0440\u0430\u0432\u044c \u043d\u0430 5\u043a.\n", "before_files": [{"content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\nimport dj_database_url\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'images',\n 'pages',\n 'catalog',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front/build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# It is fake-url. Correct url will be created on `docker-compose up` stage from `docker/.env`\nDATABASE_URL = 'postgres://user:pass@db_name/table'\nDATABASES = {\n 'default': dj_database_url.config(\n env='DATABASE_URL',\n default=DATABASE_URL,\n )\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = '\u041b\u043e\u0433\u043e\u0442\u0438\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0438 Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = '\u0421\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u0432\u0441\u0435 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', '\u041d\u0430\u043b\u0438\u0447\u043d\u044b\u0435'),\n ('cashless', '\u0411\u0435\u0437\u043d\u0430\u043b\u0438\u0447\u043d\u044b\u0435 \u0438 \u0434\u0435\u043d\u0435\u0436\u043d\u044b\u0435 \u043f\u0435\u0440\u0435\u0432\u043e\u0434\u044b'),\n ('AC', '\u0411\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0430\u044f \u043a\u0430\u0440\u0442\u0430'),\n ('PC', '\u042f\u043d\u0434\u0435\u043a\u0441.\u0414\u0435\u043d\u044c\u0433\u0438'),\n ('GP', '\u0421\u0432\u044f\u0437\u043d\u043e\u0439 (\u0442\u0435\u0440\u043c\u0438\u043d\u0430\u043b)'),\n ('AB', '\u0410\u043b\u044c\u0444\u0430-\u041a\u043b\u0438\u043a'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': '\u041e\u0431\u0440\u0430\u0442\u043d\u044b\u0439 \u0437\u0432\u043e\u043d\u043e\u043a',\n 'order': '\u0417\u0430\u043a\u0430\u0437 \u2116{0.fake_order_number}',\n 'yandex_order': '\u0417\u0430\u043a\u0430\u0437 \u2116{0.fake_order_number} | \u042f\u043d\u0434\u0435\u043a\u0441.\u041a\u0430\u0441\u0441\u0430',\n 'one_click': '\u0417\u0430\u043a\u0430\u0437 \u0432 \u043e\u0434\u0438\u043d \u043a\u043b\u0438\u043a \u2116{0.fake_order_number}',\n 'ya_feedback_request': '\u041e\u0446\u0435\u043d\u0438\u0442\u0435 \u043d\u0430\u0441 \u043d\u0430 \u042f\u043d\u0434\u0435\u043a\u0441.\u041c\u0430\u0440\u043a\u0435\u0442\u0435',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENT = '[email protected]'\nSHOP_EMAIL = '[email protected]'\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 3000,\n}\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': '\u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d \u044d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u0432 \u043f\u0438\u0442\u0430\u043d\u0438\u044f \"ShopElectro\"',\n 'menu_title': '\u0413\u043b\u0430\u0432\u043d\u0430\u044f',\n 'title': '\u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d \u042d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u0432 \u043f\u0438\u0442\u0430\u043d\u0438\u044f \u0441 \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u043e\u0439 \u043f\u043e \u0420\u043e\u0441\u0441\u0438\u0438',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': '\u041a\u0430\u0440\u0442\u0430 \u0441\u0430\u0439\u0442\u0430',\n 'name': '\u041a\u0430\u0440\u0442\u0430 \u0441\u0430\u0439\u0442\u0430',\n },\n 'order': {\n 'slug': 'order',\n 'name': '\u041e\u0444\u043e\u0440\u043c\u043b\u0435\u043d\u0438\u0435 \u0437\u0430\u043a\u0430\u0437\u0430',\n 'title': '\u041a\u043e\u0440\u0437\u0438\u043d\u0430 \u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d shopelectro.ru \u0421\u0430\u043d\u043a\u0442-\u041f\u0435\u0442\u0435\u0440\u0431\u0443\u0440\u0433',\n },\n 'search': {\n 'slug': 'search',\n 'name': '\u0420\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b \u043f\u043e\u0438\u0441\u043a\u0430',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': '\u041a\u0430\u0442\u0430\u043b\u043e\u0433 \u0442\u043e\u0432\u0430\u0440\u043e\u0432',\n 'menu_title': '\u041a\u0430\u0442\u0430\u043b\u043e\u0433',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': '\u0417\u0430\u043a\u0430\u0437 \u043f\u0440\u0438\u043d\u044f\u0442',\n }\n}\n", "path": "shopelectro/settings/base.py"}]}
3,830
136
gh_patches_debug_10792
rasdani/github-patches
git_diff
AnalogJ__lexicon-1100
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> transip provider needs requests>=2.27.0 in dependency spec The update of the transip provider from #1086 included in 3.9.2 uses `from requests.exceptions import JSONDecodeError` which was first included in requests 2.27.0 but the requirements spec only requires `>=2`: https://github.com/AnalogJ/lexicon/blob/d8b26bcfffd484f34444c91ceb3ced4a2583bd18/pyproject.toml#L43 This is passed directly through to the requirement spec in the wheel: ``` Requires-Dist: requests (>=2) ``` And means that someone who upgrades dns-lexicon from a previous version using pip will get an import error. </issue> <code> [start of lexicon/providers/transip.py] 1 """Module provider for TransIP""" 2 import binascii 3 import json 4 import logging 5 import uuid 6 from base64 import b64decode, b64encode 7 from typing import Any, Dict, List, Optional 8 9 import requests 10 from cryptography.hazmat.backends import default_backend 11 from cryptography.hazmat.primitives import hashes 12 from cryptography.hazmat.primitives.asymmetric import padding 13 from cryptography.hazmat.primitives.serialization import load_pem_private_key 14 from requests.exceptions import JSONDecodeError 15 16 from lexicon.exceptions import LexiconError 17 from lexicon.providers.base import Provider as BaseProvider 18 19 LOGGER = logging.getLogger(__name__) 20 21 NAMESERVER_DOMAINS: List[str] = [] 22 23 API_BASE_URL = "https://api.transip.nl/v6" 24 25 26 def provider_parser(subparser): 27 """Configure provider parser for TransIP""" 28 subparser.add_argument( 29 "--auth-username", help="specify username for authentication" 30 ) 31 subparser.add_argument( 32 "--auth-api-key", 33 help="specify the private key to use for API authentication, in PEM format: can be either " 34 "the path of the key file (eg. /tmp/key.pem) or the base64 encoded content of this " 35 "file prefixed by 'base64::' (eg. base64::eyJhbGciOyJ...)", 36 ) 37 subparser.add_argument( 38 "--auth-key-is-global", 39 action="store_true", 40 help="set this flag is the private key used is a global key with no IP whitelist restriction", 41 ) 42 43 44 class Provider(BaseProvider): 45 """ 46 Provider class for TransIP 47 48 provider_options can be overwritten by a Provider to setup custom defaults. 49 They will be overwritten by any options set via the CLI or Env. 50 order is: 51 52 """ 53 54 def __init__(self, config): 55 super(Provider, self).__init__(config) 56 self.provider_name = "transip" 57 self.domain_id = None 58 59 private_key_conf = self._get_provider_option("auth_api_key") 60 if private_key_conf.startswith("base64::"): 61 private_key_bytes = b64decode(private_key_conf.replace("base64::", "")) 62 else: 63 with open( 64 private_key_conf, 65 "rb", 66 ) as file: 67 private_key_bytes = file.read() 68 69 self.private_key = load_pem_private_key(private_key_bytes, password=None) 70 self.token: str 71 72 def _authenticate(self): 73 request_body = { 74 "login": self._get_provider_option("auth_username"), 75 "nonce": uuid.uuid4().hex, 76 "global_key": self._get_provider_option("auth_key_is_global") or False, 77 } 78 79 request_body_bytes = json.dumps(request_body).encode() 80 81 signature = self.private_key.sign( 82 request_body_bytes, 83 padding.PKCS1v15(), 84 hashes.SHA512(), 85 ) 86 87 headers = {"Signature": b64encode(signature).decode()} 88 89 response = requests.request( 90 "POST", f"{API_BASE_URL}/auth", json=request_body, headers=headers 91 ) 92 response.raise_for_status() 93 94 self.token = response.json()["token"] 95 96 data = self._get(f"/domains/{self.domain}") 97 98 self.domain_id = data["domain"]["authCode"] 99 100 def _create_record(self, rtype: str, name: str, content: str) -> bool: 101 if not rtype or not name or not content: 102 raise Exception( 103 "Error, rtype, name and content are mandatory to create a record." 104 ) 105 106 identifier = Provider._identifier( 107 {"type": rtype, "name": self._full_name(name), "content": content} 108 ) 109 110 if any( 111 record 112 for record in self._list_records(rtype=rtype, name=name, content=content) 113 if record["id"] == identifier 114 ): 115 LOGGER.debug("create_record (ignored, duplicate): %s", identifier) 116 return True 117 118 data = { 119 "dnsEntry": { 120 "type": rtype, 121 "name": self._relative_name(name), 122 "content": content, 123 "expire": self._get_lexicon_option("ttl"), 124 }, 125 } 126 127 self._post(f"/domains/{self.domain}/dns", data=data) 128 129 LOGGER.debug("create_record: %s", identifier) 130 131 return True 132 133 def _list_records( 134 self, 135 rtype: Optional[str] = None, 136 name: Optional[str] = None, 137 content: Optional[str] = None, 138 ) -> List[Dict[str, Any]]: 139 data = self._get(f"/domains/{self.domain}/dns") 140 141 records = [] 142 for entry in data["dnsEntries"]: 143 record = { 144 "type": entry["type"], 145 "name": self._full_name(entry["name"]), 146 "ttl": entry["expire"], 147 "content": entry["content"], 148 } 149 record["id"] = Provider._identifier(record) 150 records.append(record) 151 152 if rtype: 153 records = [record for record in records if record["type"] == rtype] 154 if name: 155 records = [ 156 record for record in records if record["name"] == self._full_name(name) 157 ] 158 if content: 159 records = [record for record in records if record["content"] == content] 160 161 LOGGER.debug("list_records: %s", records) 162 163 return records 164 165 def _update_record( 166 self, 167 identifier: Optional[str] = None, 168 rtype: Optional[str] = None, 169 name: Optional[str] = None, 170 content: Optional[str] = None, 171 ) -> bool: 172 if not identifier and (not rtype or not name): 173 raise Exception("Error, identifier or rtype+name parameters are required.") 174 175 if identifier: 176 records = self._list_records() 177 records_to_update = [ 178 record for record in records if record["id"] == identifier 179 ] 180 else: 181 records_to_update = self._list_records(rtype=rtype, name=name) 182 183 if not records_to_update: 184 raise Exception( 185 f"Error, could not find a record for given identifier: {identifier}" 186 ) 187 188 if len(records_to_update) > 1: 189 LOGGER.warning( 190 "Warning, multiple records found for given parameters, " 191 "only first one will be updated: %s", 192 records_to_update, 193 ) 194 195 record = records_to_update[0] 196 197 # TransIP API is not designed to update one record out of several records 198 # matching the same type+name (eg. multi-valued TXT entries). 199 # To circumvent the limitation, we remove first the record to update, then 200 # recreate it with the updated content. 201 202 data = { 203 "dnsEntry": { 204 "type": record["type"], 205 "name": self._relative_name(record["name"]), 206 "content": record["content"], 207 "expire": record["ttl"], 208 }, 209 } 210 211 self._request("DELETE", f"/domains/{self.domain}/dns", data=data) 212 213 data["dnsEntry"]["content"] = content 214 215 self._post(f"/domains/{self.domain}/dns", data=data) 216 217 LOGGER.debug("update_record: %s", record["id"]) 218 219 return True 220 221 def _delete_record( 222 self, 223 identifier: Optional[str] = None, 224 rtype: Optional[str] = None, 225 name: Optional[str] = None, 226 content: Optional[str] = None, 227 ) -> bool: 228 if identifier: 229 records = self._list_records() 230 records = [record for record in records if record["id"] == identifier] 231 232 if not records: 233 raise LexiconError( 234 f"Could not find a record matching the identifier provider: {identifier}" 235 ) 236 else: 237 records = self._list_records(rtype, name, content) 238 239 for record in records: 240 data = { 241 "dnsEntry": { 242 "type": record["type"], 243 "name": self._relative_name(record["name"]), 244 "content": record["content"], 245 "expire": record["ttl"], 246 }, 247 } 248 249 self._request("DELETE", f"/domains/{self.domain}/dns", data=data) 250 251 LOGGER.debug("delete_records: %s %s %s %s", identifier, rtype, name, content) 252 253 return True 254 255 def _request( 256 self, 257 action: str = "GET", 258 url: str = "/", 259 data: Optional[Dict] = None, 260 query_params: Optional[Dict] = None, 261 ) -> Optional[Dict[str, Any]]: 262 response = requests.request( 263 action, 264 f"{API_BASE_URL}{url}", 265 params=query_params, 266 json=data, 267 headers={"Authorization": f"Bearer {self.token}"}, 268 ) 269 270 response.raise_for_status() 271 272 try: 273 return response.json() 274 except JSONDecodeError: 275 return None 276 277 @staticmethod 278 def _identifier(record): 279 digest = hashes.Hash(hashes.SHA256(), backend=default_backend()) 280 digest.update(("type=" + record.get("type", "") + ",").encode("utf-8")) 281 digest.update(("name=" + record.get("name", "") + ",").encode("utf-8")) 282 digest.update(("content=" + record.get("content", "") + ",").encode("utf-8")) 283 284 return binascii.hexlify(digest.finalize()).decode("utf-8")[0:7] 285 [end of lexicon/providers/transip.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lexicon/providers/transip.py b/lexicon/providers/transip.py --- a/lexicon/providers/transip.py +++ b/lexicon/providers/transip.py @@ -11,7 +11,11 @@ from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.serialization import load_pem_private_key -from requests.exceptions import JSONDecodeError + +try: + from simplejson import JSONDecodeError +except ImportError: + from json import JSONDecodeError # type: ignore[misc] from lexicon.exceptions import LexiconError from lexicon.providers.base import Provider as BaseProvider
{"golden_diff": "diff --git a/lexicon/providers/transip.py b/lexicon/providers/transip.py\n--- a/lexicon/providers/transip.py\n+++ b/lexicon/providers/transip.py\n@@ -11,7 +11,11 @@\n from cryptography.hazmat.primitives import hashes\n from cryptography.hazmat.primitives.asymmetric import padding\n from cryptography.hazmat.primitives.serialization import load_pem_private_key\n-from requests.exceptions import JSONDecodeError\n+\n+try:\n+ from simplejson import JSONDecodeError\n+except ImportError:\n+ from json import JSONDecodeError # type: ignore[misc]\n \n from lexicon.exceptions import LexiconError\n from lexicon.providers.base import Provider as BaseProvider\n", "issue": "transip provider needs requests>=2.27.0 in dependency spec\nThe update of the transip provider from #1086 included in 3.9.2 uses `from requests.exceptions import JSONDecodeError` which was first included in requests 2.27.0 but the requirements spec only requires `>=2`:\r\nhttps://github.com/AnalogJ/lexicon/blob/d8b26bcfffd484f34444c91ceb3ced4a2583bd18/pyproject.toml#L43\r\n\r\nThis is passed directly through to the requirement spec in the wheel:\r\n```\r\nRequires-Dist: requests (>=2)\r\n```\r\n\r\nAnd means that someone who upgrades dns-lexicon from a previous version using pip will get an import error.\n", "before_files": [{"content": "\"\"\"Module provider for TransIP\"\"\"\nimport binascii\nimport json\nimport logging\nimport uuid\nfrom base64 import b64decode, b64encode\nfrom typing import Any, Dict, List, Optional\n\nimport requests\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives.serialization import load_pem_private_key\nfrom requests.exceptions import JSONDecodeError\n\nfrom lexicon.exceptions import LexiconError\nfrom lexicon.providers.base import Provider as BaseProvider\n\nLOGGER = logging.getLogger(__name__)\n\nNAMESERVER_DOMAINS: List[str] = []\n\nAPI_BASE_URL = \"https://api.transip.nl/v6\"\n\n\ndef provider_parser(subparser):\n \"\"\"Configure provider parser for TransIP\"\"\"\n subparser.add_argument(\n \"--auth-username\", help=\"specify username for authentication\"\n )\n subparser.add_argument(\n \"--auth-api-key\",\n help=\"specify the private key to use for API authentication, in PEM format: can be either \"\n \"the path of the key file (eg. /tmp/key.pem) or the base64 encoded content of this \"\n \"file prefixed by 'base64::' (eg. base64::eyJhbGciOyJ...)\",\n )\n subparser.add_argument(\n \"--auth-key-is-global\",\n action=\"store_true\",\n help=\"set this flag is the private key used is a global key with no IP whitelist restriction\",\n )\n\n\nclass Provider(BaseProvider):\n \"\"\"\n Provider class for TransIP\n\n provider_options can be overwritten by a Provider to setup custom defaults.\n They will be overwritten by any options set via the CLI or Env.\n order is:\n\n \"\"\"\n\n def __init__(self, config):\n super(Provider, self).__init__(config)\n self.provider_name = \"transip\"\n self.domain_id = None\n\n private_key_conf = self._get_provider_option(\"auth_api_key\")\n if private_key_conf.startswith(\"base64::\"):\n private_key_bytes = b64decode(private_key_conf.replace(\"base64::\", \"\"))\n else:\n with open(\n private_key_conf,\n \"rb\",\n ) as file:\n private_key_bytes = file.read()\n\n self.private_key = load_pem_private_key(private_key_bytes, password=None)\n self.token: str\n\n def _authenticate(self):\n request_body = {\n \"login\": self._get_provider_option(\"auth_username\"),\n \"nonce\": uuid.uuid4().hex,\n \"global_key\": self._get_provider_option(\"auth_key_is_global\") or False,\n }\n\n request_body_bytes = json.dumps(request_body).encode()\n\n signature = self.private_key.sign(\n request_body_bytes,\n padding.PKCS1v15(),\n hashes.SHA512(),\n )\n\n headers = {\"Signature\": b64encode(signature).decode()}\n\n response = requests.request(\n \"POST\", f\"{API_BASE_URL}/auth\", json=request_body, headers=headers\n )\n response.raise_for_status()\n\n self.token = response.json()[\"token\"]\n\n data = self._get(f\"/domains/{self.domain}\")\n\n self.domain_id = data[\"domain\"][\"authCode\"]\n\n def _create_record(self, rtype: str, name: str, content: str) -> bool:\n if not rtype or not name or not content:\n raise Exception(\n \"Error, rtype, name and content are mandatory to create a record.\"\n )\n\n identifier = Provider._identifier(\n {\"type\": rtype, \"name\": self._full_name(name), \"content\": content}\n )\n\n if any(\n record\n for record in self._list_records(rtype=rtype, name=name, content=content)\n if record[\"id\"] == identifier\n ):\n LOGGER.debug(\"create_record (ignored, duplicate): %s\", identifier)\n return True\n\n data = {\n \"dnsEntry\": {\n \"type\": rtype,\n \"name\": self._relative_name(name),\n \"content\": content,\n \"expire\": self._get_lexicon_option(\"ttl\"),\n },\n }\n\n self._post(f\"/domains/{self.domain}/dns\", data=data)\n\n LOGGER.debug(\"create_record: %s\", identifier)\n\n return True\n\n def _list_records(\n self,\n rtype: Optional[str] = None,\n name: Optional[str] = None,\n content: Optional[str] = None,\n ) -> List[Dict[str, Any]]:\n data = self._get(f\"/domains/{self.domain}/dns\")\n\n records = []\n for entry in data[\"dnsEntries\"]:\n record = {\n \"type\": entry[\"type\"],\n \"name\": self._full_name(entry[\"name\"]),\n \"ttl\": entry[\"expire\"],\n \"content\": entry[\"content\"],\n }\n record[\"id\"] = Provider._identifier(record)\n records.append(record)\n\n if rtype:\n records = [record for record in records if record[\"type\"] == rtype]\n if name:\n records = [\n record for record in records if record[\"name\"] == self._full_name(name)\n ]\n if content:\n records = [record for record in records if record[\"content\"] == content]\n\n LOGGER.debug(\"list_records: %s\", records)\n\n return records\n\n def _update_record(\n self,\n identifier: Optional[str] = None,\n rtype: Optional[str] = None,\n name: Optional[str] = None,\n content: Optional[str] = None,\n ) -> bool:\n if not identifier and (not rtype or not name):\n raise Exception(\"Error, identifier or rtype+name parameters are required.\")\n\n if identifier:\n records = self._list_records()\n records_to_update = [\n record for record in records if record[\"id\"] == identifier\n ]\n else:\n records_to_update = self._list_records(rtype=rtype, name=name)\n\n if not records_to_update:\n raise Exception(\n f\"Error, could not find a record for given identifier: {identifier}\"\n )\n\n if len(records_to_update) > 1:\n LOGGER.warning(\n \"Warning, multiple records found for given parameters, \"\n \"only first one will be updated: %s\",\n records_to_update,\n )\n\n record = records_to_update[0]\n\n # TransIP API is not designed to update one record out of several records\n # matching the same type+name (eg. multi-valued TXT entries).\n # To circumvent the limitation, we remove first the record to update, then\n # recreate it with the updated content.\n\n data = {\n \"dnsEntry\": {\n \"type\": record[\"type\"],\n \"name\": self._relative_name(record[\"name\"]),\n \"content\": record[\"content\"],\n \"expire\": record[\"ttl\"],\n },\n }\n\n self._request(\"DELETE\", f\"/domains/{self.domain}/dns\", data=data)\n\n data[\"dnsEntry\"][\"content\"] = content\n\n self._post(f\"/domains/{self.domain}/dns\", data=data)\n\n LOGGER.debug(\"update_record: %s\", record[\"id\"])\n\n return True\n\n def _delete_record(\n self,\n identifier: Optional[str] = None,\n rtype: Optional[str] = None,\n name: Optional[str] = None,\n content: Optional[str] = None,\n ) -> bool:\n if identifier:\n records = self._list_records()\n records = [record for record in records if record[\"id\"] == identifier]\n\n if not records:\n raise LexiconError(\n f\"Could not find a record matching the identifier provider: {identifier}\"\n )\n else:\n records = self._list_records(rtype, name, content)\n\n for record in records:\n data = {\n \"dnsEntry\": {\n \"type\": record[\"type\"],\n \"name\": self._relative_name(record[\"name\"]),\n \"content\": record[\"content\"],\n \"expire\": record[\"ttl\"],\n },\n }\n\n self._request(\"DELETE\", f\"/domains/{self.domain}/dns\", data=data)\n\n LOGGER.debug(\"delete_records: %s %s %s %s\", identifier, rtype, name, content)\n\n return True\n\n def _request(\n self,\n action: str = \"GET\",\n url: str = \"/\",\n data: Optional[Dict] = None,\n query_params: Optional[Dict] = None,\n ) -> Optional[Dict[str, Any]]:\n response = requests.request(\n action,\n f\"{API_BASE_URL}{url}\",\n params=query_params,\n json=data,\n headers={\"Authorization\": f\"Bearer {self.token}\"},\n )\n\n response.raise_for_status()\n\n try:\n return response.json()\n except JSONDecodeError:\n return None\n\n @staticmethod\n def _identifier(record):\n digest = hashes.Hash(hashes.SHA256(), backend=default_backend())\n digest.update((\"type=\" + record.get(\"type\", \"\") + \",\").encode(\"utf-8\"))\n digest.update((\"name=\" + record.get(\"name\", \"\") + \",\").encode(\"utf-8\"))\n digest.update((\"content=\" + record.get(\"content\", \"\") + \",\").encode(\"utf-8\"))\n\n return binascii.hexlify(digest.finalize()).decode(\"utf-8\")[0:7]\n", "path": "lexicon/providers/transip.py"}]}
3,508
153
gh_patches_debug_33279
rasdani/github-patches
git_diff
ytdl-org__youtube-dl-29682
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> One seemingly random "Unsupported URL", while others works just fine ## Checklist - [x] I'm reporting a broken site support - [x] I've verified that I'm running youtube-dl version **2021.06.06** - [x] I've checked that all provided URLs are alive and playable in a browser - [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped - [x] I've searched the bugtracker for similar issues including closed ones ## Verbose log ``` youtube-dl -v -F https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/ [debug] System config: [] [debug] User config: [] [debug] Custom config: [] [debug] Command-line args: ['-v', '-F', 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/'] [debug] Encodings: locale cp65001, fs mbcs, out cp65001, pref cp65001 [debug] youtube-dl version 2021.06.06 [debug] Python version 3.4.4 (CPython) - Windows-10-10.0.19041 [debug] exe versions: ffmpeg 4.2.3, ffprobe git-2019-12-17-bd83191 [debug] Proxy map: {} [generic] 9999a34: Requesting header WARNING: Falling back on generic information extractor. [generic] 9999a34: Downloading webpage [generic] 9999a34: Extracting information ERROR: Unsupported URL: https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/ Traceback (most recent call last): File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\YoutubeDL.py", line 815, in wrapper File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\YoutubeDL.py", line 836, in __extract_info File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\extractor\common.py", line 534, in extract File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\extractor\generic.py", line 3520, in _real_extract youtube_dl.utils.UnsupportedError: Unsupported URL: https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/ ``` ## Description I'm getting " Unsupported URL" for this single link. Every other url I've tried from the nhk site (https://www3.nhk.or.jp/nhkworld/), formatted exactly like this one, downloads just fine. </issue> <code> [start of youtube_dl/extractor/nhk.py] 1 from __future__ import unicode_literals 2 3 import re 4 5 from .common import InfoExtractor 6 from ..utils import urljoin 7 8 9 class NhkBaseIE(InfoExtractor): 10 _API_URL_TEMPLATE = 'https://api.nhk.or.jp/nhkworld/%sod%slist/v7a/%s/%s/%s/all%s.json' 11 _BASE_URL_REGEX = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand' 12 _TYPE_REGEX = r'/(?P<type>video|audio)/' 13 14 def _call_api(self, m_id, lang, is_video, is_episode, is_clip): 15 return self._download_json( 16 self._API_URL_TEMPLATE % ( 17 'v' if is_video else 'r', 18 'clip' if is_clip else 'esd', 19 'episode' if is_episode else 'program', 20 m_id, lang, '/all' if is_video else ''), 21 m_id, query={'apikey': 'EJfK8jdS57GqlupFgAfAAwr573q01y6k'})['data']['episodes'] or [] 22 23 def _extract_episode_info(self, url, episode=None): 24 fetch_episode = episode is None 25 lang, m_type, episode_id = re.match(NhkVodIE._VALID_URL, url).groups() 26 if episode_id.isdigit(): 27 episode_id = episode_id[:4] + '-' + episode_id[4:] 28 29 is_video = m_type == 'video' 30 if fetch_episode: 31 episode = self._call_api( 32 episode_id, lang, is_video, True, episode_id[:4] == '9999')[0] 33 title = episode.get('sub_title_clean') or episode['sub_title'] 34 35 def get_clean_field(key): 36 return episode.get(key + '_clean') or episode.get(key) 37 38 series = get_clean_field('title') 39 40 thumbnails = [] 41 for s, w, h in [('', 640, 360), ('_l', 1280, 720)]: 42 img_path = episode.get('image' + s) 43 if not img_path: 44 continue 45 thumbnails.append({ 46 'id': '%dp' % h, 47 'height': h, 48 'width': w, 49 'url': 'https://www3.nhk.or.jp' + img_path, 50 }) 51 52 info = { 53 'id': episode_id + '-' + lang, 54 'title': '%s - %s' % (series, title) if series and title else title, 55 'description': get_clean_field('description'), 56 'thumbnails': thumbnails, 57 'series': series, 58 'episode': title, 59 } 60 if is_video: 61 vod_id = episode['vod_id'] 62 info.update({ 63 '_type': 'url_transparent', 64 'ie_key': 'Piksel', 65 'url': 'https://player.piksel.com/v/refid/nhkworld/prefid/' + vod_id, 66 'id': vod_id, 67 }) 68 else: 69 if fetch_episode: 70 audio_path = episode['audio']['audio'] 71 info['formats'] = self._extract_m3u8_formats( 72 'https://nhkworld-vh.akamaihd.net/i%s/master.m3u8' % audio_path, 73 episode_id, 'm4a', entry_protocol='m3u8_native', 74 m3u8_id='hls', fatal=False) 75 for f in info['formats']: 76 f['language'] = lang 77 else: 78 info.update({ 79 '_type': 'url_transparent', 80 'ie_key': NhkVodIE.ie_key(), 81 'url': url, 82 }) 83 return info 84 85 86 class NhkVodIE(NhkBaseIE): 87 _VALID_URL = r'%s%s(?P<id>\d{7}|[^/]+?-\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX) 88 # Content available only for a limited period of time. Visit 89 # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples. 90 _TESTS = [{ 91 # video clip 92 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999011/', 93 'md5': '7a90abcfe610ec22a6bfe15bd46b30ca', 94 'info_dict': { 95 'id': 'a95j5iza', 96 'ext': 'mp4', 97 'title': "Dining with the Chef - Chef Saito's Family recipe: MENCHI-KATSU", 98 'description': 'md5:5aee4a9f9d81c26281862382103b0ea5', 99 'timestamp': 1565965194, 100 'upload_date': '20190816', 101 }, 102 }, { 103 # audio clip 104 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/r_inventions-20201104-1/', 105 'info_dict': { 106 'id': 'r_inventions-20201104-1-en', 107 'ext': 'm4a', 108 'title': "Japan's Top Inventions - Miniature Video Cameras", 109 'description': 'md5:07ea722bdbbb4936fdd360b6a480c25b', 110 }, 111 'params': { 112 # m3u8 download 113 'skip_download': True, 114 }, 115 }, { 116 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2015173/', 117 'only_matching': True, 118 }, { 119 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/plugin-20190404-1/', 120 'only_matching': True, 121 }, { 122 'url': 'https://www3.nhk.or.jp/nhkworld/fr/ondemand/audio/plugin-20190404-1/', 123 'only_matching': True, 124 }, { 125 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/', 126 'only_matching': True, 127 }] 128 129 def _real_extract(self, url): 130 return self._extract_episode_info(url) 131 132 133 class NhkVodProgramIE(NhkBaseIE): 134 _VALID_URL = r'%s/program%s(?P<id>[0-9a-z]+)(?:.+?\btype=(?P<episode_type>clip|(?:radio|tv)Episode))?' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX) 135 _TESTS = [{ 136 # video program episodes 137 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway', 138 'info_dict': { 139 'id': 'japanrailway', 140 'title': 'Japan Railway Journal', 141 }, 142 'playlist_mincount': 1, 143 }, { 144 # video program clips 145 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway/?type=clip', 146 'info_dict': { 147 'id': 'japanrailway', 148 'title': 'Japan Railway Journal', 149 }, 150 'playlist_mincount': 5, 151 }, { 152 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/10yearshayaomiyazaki/', 153 'only_matching': True, 154 }, { 155 # audio program 156 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/audio/listener/', 157 'only_matching': True, 158 }] 159 160 def _real_extract(self, url): 161 lang, m_type, program_id, episode_type = re.match(self._VALID_URL, url).groups() 162 163 episodes = self._call_api( 164 program_id, lang, m_type == 'video', False, episode_type == 'clip') 165 166 entries = [] 167 for episode in episodes: 168 episode_path = episode.get('url') 169 if not episode_path: 170 continue 171 entries.append(self._extract_episode_info( 172 urljoin(url, episode_path), episode)) 173 174 program_title = None 175 if entries: 176 program_title = entries[0].get('series') 177 178 return self.playlist_result(entries, program_id, program_title) 179 [end of youtube_dl/extractor/nhk.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/youtube_dl/extractor/nhk.py b/youtube_dl/extractor/nhk.py --- a/youtube_dl/extractor/nhk.py +++ b/youtube_dl/extractor/nhk.py @@ -1,3 +1,4 @@ +# coding: utf-8 from __future__ import unicode_literals import re @@ -23,7 +24,7 @@ def _extract_episode_info(self, url, episode=None): fetch_episode = episode is None lang, m_type, episode_id = re.match(NhkVodIE._VALID_URL, url).groups() - if episode_id.isdigit(): + if len(episode_id) == 7: episode_id = episode_id[:4] + '-' + episode_id[4:] is_video = m_type == 'video' @@ -84,7 +85,8 @@ class NhkVodIE(NhkBaseIE): - _VALID_URL = r'%s%s(?P<id>\d{7}|[^/]+?-\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX) + # the 7-character IDs can have alphabetic chars too: assume [a-z] rather than just [a-f], eg + _VALID_URL = r'%s%s(?P<id>[0-9a-z]{7}|[^/]+?-\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX) # Content available only for a limited period of time. Visit # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples. _TESTS = [{ @@ -124,6 +126,19 @@ }, { 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/', 'only_matching': True, + }, { + # video, alphabetic character in ID #29670 + 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/', + 'only_matching': True, + 'info_dict': { + 'id': 'qfjay6cg', + 'ext': 'mp4', + 'title': 'DESIGN TALKS plus - Fishermen’s Finery', + 'description': 'md5:8a8f958aaafb0d7cb59d38de53f1e448', + 'thumbnail': r're:^https?:/(/[a-z0-9.-]+)+\.jpg\?w=1920&h=1080$', + 'upload_date': '20210615', + 'timestamp': 1623722008, + } }] def _real_extract(self, url):
{"golden_diff": "diff --git a/youtube_dl/extractor/nhk.py b/youtube_dl/extractor/nhk.py\n--- a/youtube_dl/extractor/nhk.py\n+++ b/youtube_dl/extractor/nhk.py\n@@ -1,3 +1,4 @@\n+# coding: utf-8\n from __future__ import unicode_literals\n \n import re\n@@ -23,7 +24,7 @@\n def _extract_episode_info(self, url, episode=None):\n fetch_episode = episode is None\n lang, m_type, episode_id = re.match(NhkVodIE._VALID_URL, url).groups()\n- if episode_id.isdigit():\n+ if len(episode_id) == 7:\n episode_id = episode_id[:4] + '-' + episode_id[4:]\n \n is_video = m_type == 'video'\n@@ -84,7 +85,8 @@\n \n \n class NhkVodIE(NhkBaseIE):\n- _VALID_URL = r'%s%s(?P<id>\\d{7}|[^/]+?-\\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)\n+ # the 7-character IDs can have alphabetic chars too: assume [a-z] rather than just [a-f], eg\n+ _VALID_URL = r'%s%s(?P<id>[0-9a-z]{7}|[^/]+?-\\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)\n # Content available only for a limited period of time. Visit\n # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.\n _TESTS = [{\n@@ -124,6 +126,19 @@\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/',\n 'only_matching': True,\n+ }, {\n+ # video, alphabetic character in ID #29670\n+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/',\n+ 'only_matching': True,\n+ 'info_dict': {\n+ 'id': 'qfjay6cg',\n+ 'ext': 'mp4',\n+ 'title': 'DESIGN TALKS plus - Fishermen\u2019s Finery',\n+ 'description': 'md5:8a8f958aaafb0d7cb59d38de53f1e448',\n+ 'thumbnail': r're:^https?:/(/[a-z0-9.-]+)+\\.jpg\\?w=1920&h=1080$',\n+ 'upload_date': '20210615',\n+ 'timestamp': 1623722008,\n+ }\n }]\n \n def _real_extract(self, url):\n", "issue": "One seemingly random \"Unsupported URL\", while others works just fine\n\r\n## Checklist\r\n\r\n- [x] I'm reporting a broken site support\r\n- [x] I've verified that I'm running youtube-dl version **2021.06.06**\r\n- [x] I've checked that all provided URLs are alive and playable in a browser\r\n- [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped\r\n- [x] I've searched the bugtracker for similar issues including closed ones\r\n\r\n\r\n## Verbose log\r\n\r\n\r\n```\r\nyoutube-dl -v -F https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: ['-v', '-F', 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/']\r\n[debug] Encodings: locale cp65001, fs mbcs, out cp65001, pref cp65001\r\n[debug] youtube-dl version 2021.06.06\r\n[debug] Python version 3.4.4 (CPython) - Windows-10-10.0.19041\r\n[debug] exe versions: ffmpeg 4.2.3, ffprobe git-2019-12-17-bd83191\r\n[debug] Proxy map: {}\r\n[generic] 9999a34: Requesting header\r\nWARNING: Falling back on generic information extractor.\r\n[generic] 9999a34: Downloading webpage\r\n[generic] 9999a34: Extracting information\r\nERROR: Unsupported URL: https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\ytdl-org\\tmpkqxnwl31\\build\\youtube_dl\\YoutubeDL.py\", line 815, in wrapper\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\ytdl-org\\tmpkqxnwl31\\build\\youtube_dl\\YoutubeDL.py\", line 836, in __extract_info\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\ytdl-org\\tmpkqxnwl31\\build\\youtube_dl\\extractor\\common.py\", line 534, in extract\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\ytdl-org\\tmpkqxnwl31\\build\\youtube_dl\\extractor\\generic.py\", line 3520, in _real_extract\r\nyoutube_dl.utils.UnsupportedError: Unsupported URL: https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/\r\n\r\n```\r\n\r\n\r\n## Description\r\n\r\nI'm getting \" Unsupported URL\" for this single link. Every other url I've tried from the nhk site (https://www3.nhk.or.jp/nhkworld/), formatted exactly like this one, downloads just fine.\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import urljoin\n\n\nclass NhkBaseIE(InfoExtractor):\n _API_URL_TEMPLATE = 'https://api.nhk.or.jp/nhkworld/%sod%slist/v7a/%s/%s/%s/all%s.json'\n _BASE_URL_REGEX = r'https?://www3\\.nhk\\.or\\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand'\n _TYPE_REGEX = r'/(?P<type>video|audio)/'\n\n def _call_api(self, m_id, lang, is_video, is_episode, is_clip):\n return self._download_json(\n self._API_URL_TEMPLATE % (\n 'v' if is_video else 'r',\n 'clip' if is_clip else 'esd',\n 'episode' if is_episode else 'program',\n m_id, lang, '/all' if is_video else ''),\n m_id, query={'apikey': 'EJfK8jdS57GqlupFgAfAAwr573q01y6k'})['data']['episodes'] or []\n\n def _extract_episode_info(self, url, episode=None):\n fetch_episode = episode is None\n lang, m_type, episode_id = re.match(NhkVodIE._VALID_URL, url).groups()\n if episode_id.isdigit():\n episode_id = episode_id[:4] + '-' + episode_id[4:]\n\n is_video = m_type == 'video'\n if fetch_episode:\n episode = self._call_api(\n episode_id, lang, is_video, True, episode_id[:4] == '9999')[0]\n title = episode.get('sub_title_clean') or episode['sub_title']\n\n def get_clean_field(key):\n return episode.get(key + '_clean') or episode.get(key)\n\n series = get_clean_field('title')\n\n thumbnails = []\n for s, w, h in [('', 640, 360), ('_l', 1280, 720)]:\n img_path = episode.get('image' + s)\n if not img_path:\n continue\n thumbnails.append({\n 'id': '%dp' % h,\n 'height': h,\n 'width': w,\n 'url': 'https://www3.nhk.or.jp' + img_path,\n })\n\n info = {\n 'id': episode_id + '-' + lang,\n 'title': '%s - %s' % (series, title) if series and title else title,\n 'description': get_clean_field('description'),\n 'thumbnails': thumbnails,\n 'series': series,\n 'episode': title,\n }\n if is_video:\n vod_id = episode['vod_id']\n info.update({\n '_type': 'url_transparent',\n 'ie_key': 'Piksel',\n 'url': 'https://player.piksel.com/v/refid/nhkworld/prefid/' + vod_id,\n 'id': vod_id,\n })\n else:\n if fetch_episode:\n audio_path = episode['audio']['audio']\n info['formats'] = self._extract_m3u8_formats(\n 'https://nhkworld-vh.akamaihd.net/i%s/master.m3u8' % audio_path,\n episode_id, 'm4a', entry_protocol='m3u8_native',\n m3u8_id='hls', fatal=False)\n for f in info['formats']:\n f['language'] = lang\n else:\n info.update({\n '_type': 'url_transparent',\n 'ie_key': NhkVodIE.ie_key(),\n 'url': url,\n })\n return info\n\n\nclass NhkVodIE(NhkBaseIE):\n _VALID_URL = r'%s%s(?P<id>\\d{7}|[^/]+?-\\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)\n # Content available only for a limited period of time. Visit\n # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.\n _TESTS = [{\n # video clip\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999011/',\n 'md5': '7a90abcfe610ec22a6bfe15bd46b30ca',\n 'info_dict': {\n 'id': 'a95j5iza',\n 'ext': 'mp4',\n 'title': \"Dining with the Chef - Chef Saito's Family recipe: MENCHI-KATSU\",\n 'description': 'md5:5aee4a9f9d81c26281862382103b0ea5',\n 'timestamp': 1565965194,\n 'upload_date': '20190816',\n },\n }, {\n # audio clip\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/r_inventions-20201104-1/',\n 'info_dict': {\n 'id': 'r_inventions-20201104-1-en',\n 'ext': 'm4a',\n 'title': \"Japan's Top Inventions - Miniature Video Cameras\",\n 'description': 'md5:07ea722bdbbb4936fdd360b6a480c25b',\n },\n 'params': {\n # m3u8 download\n 'skip_download': True,\n },\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2015173/',\n 'only_matching': True,\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/plugin-20190404-1/',\n 'only_matching': True,\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/fr/ondemand/audio/plugin-20190404-1/',\n 'only_matching': True,\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n return self._extract_episode_info(url)\n\n\nclass NhkVodProgramIE(NhkBaseIE):\n _VALID_URL = r'%s/program%s(?P<id>[0-9a-z]+)(?:.+?\\btype=(?P<episode_type>clip|(?:radio|tv)Episode))?' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)\n _TESTS = [{\n # video program episodes\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway',\n 'info_dict': {\n 'id': 'japanrailway',\n 'title': 'Japan Railway Journal',\n },\n 'playlist_mincount': 1,\n }, {\n # video program clips\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway/?type=clip',\n 'info_dict': {\n 'id': 'japanrailway',\n 'title': 'Japan Railway Journal',\n },\n 'playlist_mincount': 5,\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/10yearshayaomiyazaki/',\n 'only_matching': True,\n }, {\n # audio program\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/audio/listener/',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n lang, m_type, program_id, episode_type = re.match(self._VALID_URL, url).groups()\n\n episodes = self._call_api(\n program_id, lang, m_type == 'video', False, episode_type == 'clip')\n\n entries = []\n for episode in episodes:\n episode_path = episode.get('url')\n if not episode_path:\n continue\n entries.append(self._extract_episode_info(\n urljoin(url, episode_path), episode))\n\n program_title = None\n if entries:\n program_title = entries[0].get('series')\n\n return self.playlist_result(entries, program_id, program_title)\n", "path": "youtube_dl/extractor/nhk.py"}]}
3,642
680
gh_patches_debug_17155
rasdani/github-patches
git_diff
cloudtools__troposphere-1157
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> RDS VPC example instance classes are out of date https://github.com/cloudtools/troposphere/blob/565247a50bd27a8f45c3e408b466b193473cb9a6/examples/RDS_VPC.py#L67-L76 The current list of instance classes is documented here: https://aws.amazon.com/rds/instance-types/ </issue> <code> [start of examples/RDS_VPC.py] 1 # Converted from RDS_VPC.template located at: 2 # http://aws.amazon.com/cloudformation/aws-cloudformation-templates/ 3 4 from troposphere import GetAtt, Join, Output, Parameter, Ref, Template 5 from troposphere.ec2 import SecurityGroup 6 from troposphere.rds import DBInstance, DBSubnetGroup 7 8 9 t = Template() 10 11 t.add_description( 12 "AWS CloudFormation Sample Template VPC_RDS_DB_Instance: Sample template " 13 "showing how to create an RDS DBInstance in an existing Virtual Private " 14 "Cloud (VPC). **WARNING** This template creates an Amazon Relational " 15 "Database Service database instance. You will be billed for the AWS " 16 "resources used if you create a stack from this template.") 17 18 vpcid = t.add_parameter(Parameter( 19 "VpcId", 20 Type="String", 21 Description="VpcId of your existing Virtual Private Cloud (VPC)" 22 )) 23 24 subnet = t.add_parameter(Parameter( 25 "Subnets", 26 Type="CommaDelimitedList", 27 Description=( 28 "The list of SubnetIds, for at least two Availability Zones in the " 29 "region in your Virtual Private Cloud (VPC)") 30 )) 31 32 dbname = t.add_parameter(Parameter( 33 "DBName", 34 Default="MyDatabase", 35 Description="The database name", 36 Type="String", 37 MinLength="1", 38 MaxLength="64", 39 AllowedPattern="[a-zA-Z][a-zA-Z0-9]*", 40 ConstraintDescription=("must begin with a letter and contain only" 41 " alphanumeric characters.") 42 )) 43 44 dbuser = t.add_parameter(Parameter( 45 "DBUser", 46 NoEcho=True, 47 Description="The database admin account username", 48 Type="String", 49 MinLength="1", 50 MaxLength="16", 51 AllowedPattern="[a-zA-Z][a-zA-Z0-9]*", 52 ConstraintDescription=("must begin with a letter and contain only" 53 " alphanumeric characters.") 54 )) 55 56 dbpassword = t.add_parameter(Parameter( 57 "DBPassword", 58 NoEcho=True, 59 Description="The database admin account password", 60 Type="String", 61 MinLength="1", 62 MaxLength="41", 63 AllowedPattern="[a-zA-Z0-9]*", 64 ConstraintDescription="must contain only alphanumeric characters." 65 )) 66 67 dbclass = t.add_parameter(Parameter( 68 "DBClass", 69 Default="db.m1.small", 70 Description="Database instance class", 71 Type="String", 72 AllowedValues=[ 73 "db.m1.small", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", 74 "db.m2.2xlarge", "db.m2.4xlarge"], 75 ConstraintDescription="must select a valid database instance type.", 76 )) 77 78 dballocatedstorage = t.add_parameter(Parameter( 79 "DBAllocatedStorage", 80 Default="5", 81 Description="The size of the database (Gb)", 82 Type="Number", 83 MinValue="5", 84 MaxValue="1024", 85 ConstraintDescription="must be between 5 and 1024Gb.", 86 )) 87 88 89 mydbsubnetgroup = t.add_resource(DBSubnetGroup( 90 "MyDBSubnetGroup", 91 DBSubnetGroupDescription="Subnets available for the RDS DB Instance", 92 SubnetIds=Ref(subnet), 93 )) 94 95 myvpcsecuritygroup = t.add_resource(SecurityGroup( 96 "myVPCSecurityGroup", 97 GroupDescription="Security group for RDS DB Instance.", 98 VpcId=Ref(vpcid) 99 )) 100 101 mydb = t.add_resource(DBInstance( 102 "MyDB", 103 DBName=Ref(dbname), 104 AllocatedStorage=Ref(dballocatedstorage), 105 DBInstanceClass=Ref(dbclass), 106 Engine="MySQL", 107 EngineVersion="5.5", 108 MasterUsername=Ref(dbuser), 109 MasterUserPassword=Ref(dbpassword), 110 DBSubnetGroupName=Ref(mydbsubnetgroup), 111 VPCSecurityGroups=[Ref(myvpcsecuritygroup)], 112 )) 113 114 t.add_output(Output( 115 "JDBCConnectionString", 116 Description="JDBC connection string for database", 117 Value=Join("", [ 118 "jdbc:mysql://", 119 GetAtt("MyDB", "Endpoint.Address"), 120 GetAtt("MyDB", "Endpoint.Port"), 121 "/", 122 Ref(dbname) 123 ]) 124 )) 125 126 print(t.to_json()) 127 [end of examples/RDS_VPC.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/RDS_VPC.py b/examples/RDS_VPC.py --- a/examples/RDS_VPC.py +++ b/examples/RDS_VPC.py @@ -70,8 +70,17 @@ Description="Database instance class", Type="String", AllowedValues=[ - "db.m1.small", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", - "db.m2.2xlarge", "db.m2.4xlarge"], + "db.m5.large", "db.m5.xlarge", "db.m5.2xlarge", "db.m5.4xlarge", + "db.m5.12xlarge", "db.m5.24xlarge", "db.m4.large", "db.m4.xlarge", + "db.m4.2xlarge", "db.m4.4xlarge", "db.m4.10xlarge", "db.m4.16xlarge", + "db.r4.large", "db.r4.xlarge", "db.r4.2xlarge", "db.r4.4xlarge", + "db.r4.8xlarge", "db.r4.16xlarge", "db.x1e.xlarge", "db.x1e.2xlarge", + "db.x1e.4xlarge", "db.x1e.8xlarge", "db.x1e.16xlarge", "db.x1e.32xlarge", + "db.x1.16xlarge", "db.x1.32xlarge", "db.r3.large", "db.r3.xlarge", + "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.t2.micro", + "db.t2.small", "db.t2.medium", "db.t2.large", "db.t2.xlarge", + "db.t2.2xlarge" + ], ConstraintDescription="must select a valid database instance type.", ))
{"golden_diff": "diff --git a/examples/RDS_VPC.py b/examples/RDS_VPC.py\n--- a/examples/RDS_VPC.py\n+++ b/examples/RDS_VPC.py\n@@ -70,8 +70,17 @@\n Description=\"Database instance class\",\n Type=\"String\",\n AllowedValues=[\n- \"db.m1.small\", \"db.m1.large\", \"db.m1.xlarge\", \"db.m2.xlarge\",\n- \"db.m2.2xlarge\", \"db.m2.4xlarge\"],\n+ \"db.m5.large\", \"db.m5.xlarge\", \"db.m5.2xlarge\", \"db.m5.4xlarge\",\n+ \"db.m5.12xlarge\", \"db.m5.24xlarge\", \"db.m4.large\", \"db.m4.xlarge\",\n+ \"db.m4.2xlarge\", \"db.m4.4xlarge\", \"db.m4.10xlarge\", \"db.m4.16xlarge\",\n+ \"db.r4.large\", \"db.r4.xlarge\", \"db.r4.2xlarge\", \"db.r4.4xlarge\",\n+ \"db.r4.8xlarge\", \"db.r4.16xlarge\", \"db.x1e.xlarge\", \"db.x1e.2xlarge\",\n+ \"db.x1e.4xlarge\", \"db.x1e.8xlarge\", \"db.x1e.16xlarge\", \"db.x1e.32xlarge\",\n+ \"db.x1.16xlarge\", \"db.x1.32xlarge\", \"db.r3.large\", \"db.r3.xlarge\",\n+ \"db.r3.2xlarge\", \"db.r3.4xlarge\", \"db.r3.8xlarge\", \"db.t2.micro\",\n+ \"db.t2.small\", \"db.t2.medium\", \"db.t2.large\", \"db.t2.xlarge\",\n+ \"db.t2.2xlarge\"\n+ ],\n ConstraintDescription=\"must select a valid database instance type.\",\n ))\n", "issue": "RDS VPC example instance classes are out of date\nhttps://github.com/cloudtools/troposphere/blob/565247a50bd27a8f45c3e408b466b193473cb9a6/examples/RDS_VPC.py#L67-L76\r\n\r\nThe current list of instance classes is documented here: https://aws.amazon.com/rds/instance-types/\n", "before_files": [{"content": "# Converted from RDS_VPC.template located at:\n# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/\n\nfrom troposphere import GetAtt, Join, Output, Parameter, Ref, Template\nfrom troposphere.ec2 import SecurityGroup\nfrom troposphere.rds import DBInstance, DBSubnetGroup\n\n\nt = Template()\n\nt.add_description(\n \"AWS CloudFormation Sample Template VPC_RDS_DB_Instance: Sample template \"\n \"showing how to create an RDS DBInstance in an existing Virtual Private \"\n \"Cloud (VPC). **WARNING** This template creates an Amazon Relational \"\n \"Database Service database instance. You will be billed for the AWS \"\n \"resources used if you create a stack from this template.\")\n\nvpcid = t.add_parameter(Parameter(\n \"VpcId\",\n Type=\"String\",\n Description=\"VpcId of your existing Virtual Private Cloud (VPC)\"\n))\n\nsubnet = t.add_parameter(Parameter(\n \"Subnets\",\n Type=\"CommaDelimitedList\",\n Description=(\n \"The list of SubnetIds, for at least two Availability Zones in the \"\n \"region in your Virtual Private Cloud (VPC)\")\n))\n\ndbname = t.add_parameter(Parameter(\n \"DBName\",\n Default=\"MyDatabase\",\n Description=\"The database name\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"64\",\n AllowedPattern=\"[a-zA-Z][a-zA-Z0-9]*\",\n ConstraintDescription=(\"must begin with a letter and contain only\"\n \" alphanumeric characters.\")\n))\n\ndbuser = t.add_parameter(Parameter(\n \"DBUser\",\n NoEcho=True,\n Description=\"The database admin account username\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"16\",\n AllowedPattern=\"[a-zA-Z][a-zA-Z0-9]*\",\n ConstraintDescription=(\"must begin with a letter and contain only\"\n \" alphanumeric characters.\")\n))\n\ndbpassword = t.add_parameter(Parameter(\n \"DBPassword\",\n NoEcho=True,\n Description=\"The database admin account password\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"41\",\n AllowedPattern=\"[a-zA-Z0-9]*\",\n ConstraintDescription=\"must contain only alphanumeric characters.\"\n))\n\ndbclass = t.add_parameter(Parameter(\n \"DBClass\",\n Default=\"db.m1.small\",\n Description=\"Database instance class\",\n Type=\"String\",\n AllowedValues=[\n \"db.m1.small\", \"db.m1.large\", \"db.m1.xlarge\", \"db.m2.xlarge\",\n \"db.m2.2xlarge\", \"db.m2.4xlarge\"],\n ConstraintDescription=\"must select a valid database instance type.\",\n))\n\ndballocatedstorage = t.add_parameter(Parameter(\n \"DBAllocatedStorage\",\n Default=\"5\",\n Description=\"The size of the database (Gb)\",\n Type=\"Number\",\n MinValue=\"5\",\n MaxValue=\"1024\",\n ConstraintDescription=\"must be between 5 and 1024Gb.\",\n))\n\n\nmydbsubnetgroup = t.add_resource(DBSubnetGroup(\n \"MyDBSubnetGroup\",\n DBSubnetGroupDescription=\"Subnets available for the RDS DB Instance\",\n SubnetIds=Ref(subnet),\n))\n\nmyvpcsecuritygroup = t.add_resource(SecurityGroup(\n \"myVPCSecurityGroup\",\n GroupDescription=\"Security group for RDS DB Instance.\",\n VpcId=Ref(vpcid)\n))\n\nmydb = t.add_resource(DBInstance(\n \"MyDB\",\n DBName=Ref(dbname),\n AllocatedStorage=Ref(dballocatedstorage),\n DBInstanceClass=Ref(dbclass),\n Engine=\"MySQL\",\n EngineVersion=\"5.5\",\n MasterUsername=Ref(dbuser),\n MasterUserPassword=Ref(dbpassword),\n DBSubnetGroupName=Ref(mydbsubnetgroup),\n VPCSecurityGroups=[Ref(myvpcsecuritygroup)],\n))\n\nt.add_output(Output(\n \"JDBCConnectionString\",\n Description=\"JDBC connection string for database\",\n Value=Join(\"\", [\n \"jdbc:mysql://\",\n GetAtt(\"MyDB\", \"Endpoint.Address\"),\n GetAtt(\"MyDB\", \"Endpoint.Port\"),\n \"/\",\n Ref(dbname)\n ])\n))\n\nprint(t.to_json())\n", "path": "examples/RDS_VPC.py"}]}
1,823
461
gh_patches_debug_7269
rasdani/github-patches
git_diff
urllib3__urllib3-2711
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `format` session fail to run locally on Window platform ### Subject When I ran `nox -rs format` to format the files, the format process terminates due to `FileNotFoundError` reported at line 139 at file `noxfile.py`: ![image](https://user-images.githubusercontent.com/23013350/184066564-3a322445-c7b8-48ba-8ee9-4e747e964037.png) ### Environment Window 10 ### Steps to Reproduce Just ran `nox -rs format` on Window machine. I suspect the created `process` could not find the files properly, and my workaround is to using `session.run()` instead: ```python @nox.session() def format(session: nox.Session) -> None: """Run code formatters.""" session.install("pre-commit") session.run("pre-commit", "--version") # process = subprocess.run( # ["pre-commit", "run", "--all-files"], # env=session.env, # text=True, # stdout=subprocess.PIPE, # stderr=subprocess.STDOUT, # ) # # Ensure that pre-commit itself ran successfully # assert process.returncode in (0, 1) session.run("pre-commit", "run", "--all-files") # my workaround lint(session) ``` Perhaps this failure is due to my improper environment setup for the repo. If so, please correct me. Thanks. </issue> <code> [start of noxfile.py] 1 import os 2 import shutil 3 import subprocess 4 5 import nox 6 7 SOURCE_FILES = [ 8 "docs/", 9 "dummyserver/", 10 "src/", 11 "test/", 12 "noxfile.py", 13 "setup.py", 14 ] 15 16 17 def tests_impl( 18 session: nox.Session, 19 extras: str = "socks,secure,brotli,zstd", 20 byte_string_comparisons: bool = True, 21 ) -> None: 22 # Install deps and the package itself. 23 session.install("-r", "dev-requirements.txt") 24 session.install(f".[{extras}]") 25 26 # Show the pip version. 27 session.run("pip", "--version") 28 # Print the Python version and bytesize. 29 session.run("python", "--version") 30 session.run("python", "-c", "import struct; print(struct.calcsize('P') * 8)") 31 # Print OpenSSL information. 32 session.run("python", "-m", "OpenSSL.debug") 33 34 # Inspired from https://hynek.me/articles/ditch-codecov-python/ 35 # We use parallel mode and then combine in a later CI step 36 session.run( 37 "python", 38 *(("-bb",) if byte_string_comparisons else ()), 39 "-m", 40 "coverage", 41 "run", 42 "--parallel-mode", 43 "-m", 44 "pytest", 45 "-r", 46 "a", 47 f"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}", 48 "--tb=native", 49 "--no-success-flaky-report", 50 *(session.posargs or ("test/",)), 51 env={"PYTHONWARNINGS": "always::DeprecationWarning"}, 52 ) 53 54 55 @nox.session(python=["3.7", "3.8", "3.9", "3.10", "3.11", "pypy"]) 56 def test(session: nox.Session) -> None: 57 tests_impl(session) 58 59 60 @nox.session(python=["2.7"]) 61 def unsupported_setup_py(session: nox.Session) -> None: 62 # Can't check both returncode and output with session.run 63 process = subprocess.run( 64 ["python", "setup.py", "install"], 65 env={**session.env}, 66 text=True, 67 capture_output=True, 68 ) 69 assert process.returncode == 1 70 print(process.stderr) 71 assert "Please use `python -m pip install .` instead." in process.stderr 72 73 74 @nox.session(python=["3"]) 75 def test_brotlipy(session: nox.Session) -> None: 76 """Check that if 'brotlipy' is installed instead of 'brotli' or 77 'brotlicffi' that we still don't blow up. 78 """ 79 session.install("brotlipy") 80 tests_impl(session, extras="socks,secure", byte_string_comparisons=False) 81 82 83 def git_clone(session: nox.Session, git_url: str) -> None: 84 session.run("git", "clone", "--depth", "1", git_url, external=True) 85 86 87 @nox.session() 88 def downstream_botocore(session: nox.Session) -> None: 89 root = os.getcwd() 90 tmp_dir = session.create_tmp() 91 92 session.cd(tmp_dir) 93 git_clone(session, "https://github.com/boto/botocore") 94 session.chdir("botocore") 95 session.run( 96 "git", 97 "apply", 98 f"{root}/ci/0001-Mark-100-Continue-tests-as-failing.patch", 99 external=True, 100 ) 101 session.run("git", "rev-parse", "HEAD", external=True) 102 session.run("python", "scripts/ci/install") 103 104 session.cd(root) 105 session.install(".", silent=False) 106 session.cd(f"{tmp_dir}/botocore") 107 108 session.run("python", "-c", "import urllib3; print(urllib3.__version__)") 109 session.run("python", "scripts/ci/run-tests") 110 111 112 @nox.session() 113 def downstream_requests(session: nox.Session) -> None: 114 root = os.getcwd() 115 tmp_dir = session.create_tmp() 116 117 session.cd(tmp_dir) 118 git_clone(session, "https://github.com/psf/requests") 119 session.chdir("requests") 120 session.run("git", "apply", f"{root}/ci/requests.patch", external=True) 121 session.run("git", "rev-parse", "HEAD", external=True) 122 session.install(".[socks]", silent=False) 123 session.install("-r", "requirements-dev.txt", silent=False) 124 125 session.cd(root) 126 session.install(".", silent=False) 127 session.cd(f"{tmp_dir}/requests") 128 129 session.run("python", "-c", "import urllib3; print(urllib3.__version__)") 130 session.run("pytest", "tests") 131 132 133 @nox.session() 134 def format(session: nox.Session) -> None: 135 """Run code formatters.""" 136 session.install("pre-commit") 137 session.run("pre-commit", "--version") 138 139 process = subprocess.run( 140 ["pre-commit", "run", "--all-files"], 141 env=session.env, 142 text=True, 143 stdout=subprocess.PIPE, 144 stderr=subprocess.STDOUT, 145 ) 146 # Ensure that pre-commit itself ran successfully 147 assert process.returncode in (0, 1) 148 149 lint(session) 150 151 152 @nox.session 153 def lint(session: nox.Session) -> None: 154 session.install("pre-commit") 155 session.run("pre-commit", "run", "--all-files") 156 157 mypy(session) 158 159 160 @nox.session(python="3.8") 161 def mypy(session: nox.Session) -> None: 162 """Run mypy.""" 163 session.install("-r", "mypy-requirements.txt") 164 session.run("mypy", "--version") 165 session.run( 166 "mypy", 167 "dummyserver", 168 "noxfile.py", 169 "src/urllib3", 170 "test", 171 ) 172 173 174 @nox.session 175 def docs(session: nox.Session) -> None: 176 session.install("-r", "docs/requirements.txt") 177 session.install(".[socks,secure,brotli,zstd]") 178 179 session.chdir("docs") 180 if os.path.exists("_build"): 181 shutil.rmtree("_build") 182 session.run("sphinx-build", "-b", "html", "-W", ".", "_build/html") 183 [end of noxfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/noxfile.py b/noxfile.py --- a/noxfile.py +++ b/noxfile.py @@ -133,19 +133,6 @@ @nox.session() def format(session: nox.Session) -> None: """Run code formatters.""" - session.install("pre-commit") - session.run("pre-commit", "--version") - - process = subprocess.run( - ["pre-commit", "run", "--all-files"], - env=session.env, - text=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - ) - # Ensure that pre-commit itself ran successfully - assert process.returncode in (0, 1) - lint(session)
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -133,19 +133,6 @@\n @nox.session()\n def format(session: nox.Session) -> None:\n \"\"\"Run code formatters.\"\"\"\n- session.install(\"pre-commit\")\n- session.run(\"pre-commit\", \"--version\")\n-\n- process = subprocess.run(\n- [\"pre-commit\", \"run\", \"--all-files\"],\n- env=session.env,\n- text=True,\n- stdout=subprocess.PIPE,\n- stderr=subprocess.STDOUT,\n- )\n- # Ensure that pre-commit itself ran successfully\n- assert process.returncode in (0, 1)\n-\n lint(session)\n", "issue": "`format` session fail to run locally on Window platform\n### Subject\r\nWhen I ran `nox -rs format` to format the files, the format process terminates due to `FileNotFoundError` reported at line 139 at file `noxfile.py`:\r\n\r\n![image](https://user-images.githubusercontent.com/23013350/184066564-3a322445-c7b8-48ba-8ee9-4e747e964037.png)\r\n\r\n\r\n### Environment\r\nWindow 10\r\n\r\n### Steps to Reproduce\r\nJust ran `nox -rs format` on Window machine.\r\n\r\n\r\nI suspect the created `process` could not find the files properly, and my workaround is to using `session.run()` instead:\r\n```python\r\[email protected]()\r\ndef format(session: nox.Session) -> None:\r\n \"\"\"Run code formatters.\"\"\"\r\n session.install(\"pre-commit\")\r\n session.run(\"pre-commit\", \"--version\")\r\n\r\n # process = subprocess.run(\r\n # [\"pre-commit\", \"run\", \"--all-files\"],\r\n # env=session.env,\r\n # text=True,\r\n # stdout=subprocess.PIPE,\r\n # stderr=subprocess.STDOUT,\r\n # )\r\n # # Ensure that pre-commit itself ran successfully\r\n # assert process.returncode in (0, 1)\r\n\r\n session.run(\"pre-commit\", \"run\", \"--all-files\") # my workaround\r\n\r\n lint(session)\r\n```\r\n\r\nPerhaps this failure is due to my improper environment setup for the repo. If so, please correct me.\r\n\r\nThanks.\n", "before_files": [{"content": "import os\nimport shutil\nimport subprocess\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(\n session: nox.Session,\n extras: str = \"socks,secure,brotli,zstd\",\n byte_string_comparisons: bool = True,\n) -> None:\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(f\".[{extras}]\")\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n # Inspired from https://hynek.me/articles/ditch-codecov-python/\n # We use parallel mode and then combine in a later CI step\n session.run(\n \"python\",\n *((\"-bb\",) if byte_string_comparisons else ()),\n \"-m\",\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n \"-r\",\n \"a\",\n f\"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}\",\n \"--tb=native\",\n \"--no-success-flaky-report\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\", \"pypy\"])\ndef test(session: nox.Session) -> None:\n tests_impl(session)\n\n\[email protected](python=[\"2.7\"])\ndef unsupported_setup_py(session: nox.Session) -> None:\n # Can't check both returncode and output with session.run\n process = subprocess.run(\n [\"python\", \"setup.py\", \"install\"],\n env={**session.env},\n text=True,\n capture_output=True,\n )\n assert process.returncode == 1\n print(process.stderr)\n assert \"Please use `python -m pip install .` instead.\" in process.stderr\n\n\[email protected](python=[\"3\"])\ndef test_brotlipy(session: nox.Session) -> None:\n \"\"\"Check that if 'brotlipy' is installed instead of 'brotli' or\n 'brotlicffi' that we still don't blow up.\n \"\"\"\n session.install(\"brotlipy\")\n tests_impl(session, extras=\"socks,secure\", byte_string_comparisons=False)\n\n\ndef git_clone(session: nox.Session, git_url: str) -> None:\n session.run(\"git\", \"clone\", \"--depth\", \"1\", git_url, external=True)\n\n\[email protected]()\ndef downstream_botocore(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/boto/botocore\")\n session.chdir(\"botocore\")\n session.run(\n \"git\",\n \"apply\",\n f\"{root}/ci/0001-Mark-100-Continue-tests-as-failing.patch\",\n external=True,\n )\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.run(\"python\", \"scripts/ci/install\")\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/botocore\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"python\", \"scripts/ci/run-tests\")\n\n\[email protected]()\ndef downstream_requests(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/psf/requests\")\n session.chdir(\"requests\")\n session.run(\"git\", \"apply\", f\"{root}/ci/requests.patch\", external=True)\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.install(\".[socks]\", silent=False)\n session.install(\"-r\", \"requirements-dev.txt\", silent=False)\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/requests\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"pytest\", \"tests\")\n\n\[email protected]()\ndef format(session: nox.Session) -> None:\n \"\"\"Run code formatters.\"\"\"\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"--version\")\n\n process = subprocess.run(\n [\"pre-commit\", \"run\", \"--all-files\"],\n env=session.env,\n text=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n # Ensure that pre-commit itself ran successfully\n assert process.returncode in (0, 1)\n\n lint(session)\n\n\[email protected]\ndef lint(session: nox.Session) -> None:\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n mypy(session)\n\n\[email protected](python=\"3.8\")\ndef mypy(session: nox.Session) -> None:\n \"\"\"Run mypy.\"\"\"\n session.install(\"-r\", \"mypy-requirements.txt\")\n session.run(\"mypy\", \"--version\")\n session.run(\n \"mypy\",\n \"dummyserver\",\n \"noxfile.py\",\n \"src/urllib3\",\n \"test\",\n )\n\n\[email protected]\ndef docs(session: nox.Session) -> None:\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli,zstd]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py"}]}
2,662
162
gh_patches_debug_30972
rasdani/github-patches
git_diff
saleor__saleor-3639
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Expose in API list of supported payment gateways In API we expose the `GatewaysEnum` enum which contains all the available payment gateways, but there is no way for the Storefront 2.0 to determine which of them are actually enabled (currently Braintree is hardcoded over there). We should expose a field that would provide this information, e.g: ``` type Shop { enabledGateways: [GatewaysEnum] } ``` A resolver function should take into account gateways enabled in `CHECKOUT_PAYMENT_GATEWAYS` setting. </issue> <code> [start of saleor/graphql/checkout/types.py] 1 import graphene 2 import graphene_django_optimizer as gql_optimizer 3 4 from ...checkout import models 5 from ...core.utils.taxes import get_taxes_for_address 6 from ..core.connection import CountableDjangoObjectType 7 from ..core.types.money import TaxedMoney 8 from ..order.utils import applicable_shipping_methods 9 from ..shipping.types import ShippingMethod 10 11 12 class CheckoutLine(CountableDjangoObjectType): 13 total_price = graphene.Field( 14 TaxedMoney, 15 description=( 16 'The sum of the checkout line price, taxes and discounts.')) 17 requires_shipping = graphene.Boolean( 18 description='Indicates whether the item need to be delivered.') 19 20 class Meta: 21 exclude_fields = ['cart', 'data'] 22 description = 'Represents an item in the checkout.' 23 interfaces = [graphene.relay.Node] 24 model = models.CartLine 25 filter_fields = ['id'] 26 27 def resolve_total_price(self, info): 28 taxes = get_taxes_for_address(self.cart.shipping_address) 29 return self.get_total(taxes=taxes) 30 31 def resolve_requires_shipping(self, info): 32 return self.is_shipping_required() 33 34 35 class Checkout(CountableDjangoObjectType): 36 available_shipping_methods = graphene.List( 37 ShippingMethod, required=False, 38 description='Shipping methods that can be used with this order.') 39 is_shipping_required = graphene.Boolean( 40 description='Returns True, if checkout requires shipping.', 41 required=True) 42 lines = gql_optimizer.field( 43 graphene.List( 44 CheckoutLine, description=( 45 'A list of checkout lines, each containing information about ' 46 'an item in the checkout.')), 47 model_field='lines') 48 shipping_price = graphene.Field( 49 TaxedMoney, 50 description='The price of the shipping, with all the taxes included.') 51 subtotal_price = graphene.Field( 52 TaxedMoney, 53 description=( 54 'The price of the checkout before shipping, with taxes included.')) 55 total_price = graphene.Field( 56 TaxedMoney, 57 description=( 58 'The sum of the the checkout line prices, with all the taxes,' 59 'shipping costs, and discounts included.')) 60 61 class Meta: 62 exclude_fields = ['payments'] 63 description = 'Checkout object' 64 model = models.Cart 65 interfaces = [graphene.relay.Node] 66 filter_fields = ['token'] 67 68 def resolve_total_price(self, info): 69 taxes = get_taxes_for_address(self.shipping_address) 70 return self.get_total(taxes=taxes) 71 72 def resolve_subtotal_price(self, info): 73 taxes = get_taxes_for_address(self.shipping_address) 74 return self.get_subtotal(taxes=taxes) 75 76 def resolve_shipping_price(self, info): 77 taxes = get_taxes_for_address(self.shipping_address) 78 return self.get_shipping_price(taxes=taxes) 79 80 def resolve_lines(self, info): 81 return self.lines.prefetch_related('variant') 82 83 def resolve_available_shipping_methods(self, info): 84 taxes = get_taxes_for_address(self.shipping_address) 85 price = self.get_subtotal( 86 taxes=taxes, discounts=info.context.discounts) 87 return applicable_shipping_methods(self, info, price.gross.amount) 88 89 def resolve_is_shipping_required(self, info): 90 return self.is_shipping_required() 91 [end of saleor/graphql/checkout/types.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/graphql/checkout/types.py b/saleor/graphql/checkout/types.py --- a/saleor/graphql/checkout/types.py +++ b/saleor/graphql/checkout/types.py @@ -1,11 +1,13 @@ import graphene import graphene_django_optimizer as gql_optimizer +from django.conf import settings from ...checkout import models from ...core.utils.taxes import get_taxes_for_address from ..core.connection import CountableDjangoObjectType from ..core.types.money import TaxedMoney from ..order.utils import applicable_shipping_methods +from ..payment.enums import PaymentGatewayEnum from ..shipping.types import ShippingMethod @@ -36,6 +38,9 @@ available_shipping_methods = graphene.List( ShippingMethod, required=False, description='Shipping methods that can be used with this order.') + available_payment_gateways = graphene.List( + PaymentGatewayEnum, description='List of available payment gateways.', + required=True) is_shipping_required = graphene.Boolean( description='Returns True, if checkout requires shipping.', required=True) @@ -86,5 +91,8 @@ taxes=taxes, discounts=info.context.discounts) return applicable_shipping_methods(self, info, price.gross.amount) + def resolve_available_payment_gateways(self, info): + return settings.CHECKOUT_PAYMENT_GATEWAYS.keys() + def resolve_is_shipping_required(self, info): return self.is_shipping_required()
{"golden_diff": "diff --git a/saleor/graphql/checkout/types.py b/saleor/graphql/checkout/types.py\n--- a/saleor/graphql/checkout/types.py\n+++ b/saleor/graphql/checkout/types.py\n@@ -1,11 +1,13 @@\n import graphene\n import graphene_django_optimizer as gql_optimizer\n+from django.conf import settings\n \n from ...checkout import models\n from ...core.utils.taxes import get_taxes_for_address\n from ..core.connection import CountableDjangoObjectType\n from ..core.types.money import TaxedMoney\n from ..order.utils import applicable_shipping_methods\n+from ..payment.enums import PaymentGatewayEnum\n from ..shipping.types import ShippingMethod\n \n \n@@ -36,6 +38,9 @@\n available_shipping_methods = graphene.List(\n ShippingMethod, required=False,\n description='Shipping methods that can be used with this order.')\n+ available_payment_gateways = graphene.List(\n+ PaymentGatewayEnum, description='List of available payment gateways.',\n+ required=True)\n is_shipping_required = graphene.Boolean(\n description='Returns True, if checkout requires shipping.',\n required=True)\n@@ -86,5 +91,8 @@\n taxes=taxes, discounts=info.context.discounts)\n return applicable_shipping_methods(self, info, price.gross.amount)\n \n+ def resolve_available_payment_gateways(self, info):\n+ return settings.CHECKOUT_PAYMENT_GATEWAYS.keys()\n+\n def resolve_is_shipping_required(self, info):\n return self.is_shipping_required()\n", "issue": "Expose in API list of supported payment gateways\nIn API we expose the `GatewaysEnum` enum which contains all the available payment gateways, but there is no way for the Storefront 2.0 to determine which of them are actually enabled (currently Braintree is hardcoded over there). We should expose a field that would provide this information, e.g:\r\n```\r\ntype Shop {\r\n enabledGateways: [GatewaysEnum]\r\n}\r\n```\r\n\r\nA resolver function should take into account gateways enabled in `CHECKOUT_PAYMENT_GATEWAYS` setting.\n", "before_files": [{"content": "import graphene\nimport graphene_django_optimizer as gql_optimizer\n\nfrom ...checkout import models\nfrom ...core.utils.taxes import get_taxes_for_address\nfrom ..core.connection import CountableDjangoObjectType\nfrom ..core.types.money import TaxedMoney\nfrom ..order.utils import applicable_shipping_methods\nfrom ..shipping.types import ShippingMethod\n\n\nclass CheckoutLine(CountableDjangoObjectType):\n total_price = graphene.Field(\n TaxedMoney,\n description=(\n 'The sum of the checkout line price, taxes and discounts.'))\n requires_shipping = graphene.Boolean(\n description='Indicates whether the item need to be delivered.')\n\n class Meta:\n exclude_fields = ['cart', 'data']\n description = 'Represents an item in the checkout.'\n interfaces = [graphene.relay.Node]\n model = models.CartLine\n filter_fields = ['id']\n\n def resolve_total_price(self, info):\n taxes = get_taxes_for_address(self.cart.shipping_address)\n return self.get_total(taxes=taxes)\n\n def resolve_requires_shipping(self, info):\n return self.is_shipping_required()\n\n\nclass Checkout(CountableDjangoObjectType):\n available_shipping_methods = graphene.List(\n ShippingMethod, required=False,\n description='Shipping methods that can be used with this order.')\n is_shipping_required = graphene.Boolean(\n description='Returns True, if checkout requires shipping.',\n required=True)\n lines = gql_optimizer.field(\n graphene.List(\n CheckoutLine, description=(\n 'A list of checkout lines, each containing information about '\n 'an item in the checkout.')),\n model_field='lines')\n shipping_price = graphene.Field(\n TaxedMoney,\n description='The price of the shipping, with all the taxes included.')\n subtotal_price = graphene.Field(\n TaxedMoney,\n description=(\n 'The price of the checkout before shipping, with taxes included.'))\n total_price = graphene.Field(\n TaxedMoney,\n description=(\n 'The sum of the the checkout line prices, with all the taxes,'\n 'shipping costs, and discounts included.'))\n\n class Meta:\n exclude_fields = ['payments']\n description = 'Checkout object'\n model = models.Cart\n interfaces = [graphene.relay.Node]\n filter_fields = ['token']\n\n def resolve_total_price(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n return self.get_total(taxes=taxes)\n\n def resolve_subtotal_price(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n return self.get_subtotal(taxes=taxes)\n\n def resolve_shipping_price(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n return self.get_shipping_price(taxes=taxes)\n\n def resolve_lines(self, info):\n return self.lines.prefetch_related('variant')\n\n def resolve_available_shipping_methods(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n price = self.get_subtotal(\n taxes=taxes, discounts=info.context.discounts)\n return applicable_shipping_methods(self, info, price.gross.amount)\n\n def resolve_is_shipping_required(self, info):\n return self.is_shipping_required()\n", "path": "saleor/graphql/checkout/types.py"}]}
1,508
318
gh_patches_debug_17205
rasdani/github-patches
git_diff
fidals__shopelectro-532
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> tracking.es6:30-33: Send `purchase` event to YA and GA... The puzzle `504-30170dc3` from #504 has to be resolved: https://github.com/fidals/shopelectro/blob/4e607fb856842b43192511b1b4eaff19d2052b9a/front/js/shared/tracking.es6#L30-L33 The puzzle was created by Artemiy on 17-Aug-18. Estimate: 60 minutes, If you have any technical questions, don't ask me, submit new tickets instead. The task will be "done" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html). </issue> <code> [start of shopelectro/views/ecommerce.py] 1 from django.conf import settings 2 from django.http import HttpResponse, JsonResponse 3 from django.shortcuts import get_object_or_404, render 4 from django.views.decorators.http import require_POST 5 6 from ecommerce import mailer, views as ec_views 7 from pages.models import CustomPage 8 9 from shopelectro.cart import SECart 10 from shopelectro.forms import OrderForm 11 from shopelectro.models import Product, Order 12 13 14 # ECOMMERCE VIEWS 15 class OrderPage(ec_views.OrderPage): 16 order_form = OrderForm 17 cart = SECart 18 19 def get_context_data(self, request, **kwargs): 20 return { 21 **super(OrderPage, self).get_context_data(request, **kwargs), 22 'page': CustomPage.objects.get(slug='order'), 23 } 24 25 26 class AddToCart(ec_views.AddToCart): 27 cart = SECart 28 product_model = Product 29 order_form = OrderForm 30 31 32 class RemoveFromCart(ec_views.RemoveFromCart): 33 cart = SECart 34 product_model = Product 35 order_form = OrderForm 36 37 38 class ChangeCount(ec_views.ChangeCount): 39 cart = SECart 40 product_model = Product 41 order_form = OrderForm 42 43 44 class FlushCart(ec_views.FlushCart): 45 product_model = Product 46 order_form = OrderForm 47 48 49 class OrderSuccess(ec_views.OrderSuccess): 50 order = Order 51 52 53 @require_POST 54 def one_click_buy(request): 55 """ 56 Handle one-click-buy. 57 58 Accept XHR, save Order to DB, send mail about it 59 and return 200 OK. 60 """ 61 SECart(request.session).clear() 62 63 cart = SECart(request.session) 64 product = get_object_or_404(Product, id=request.POST['product']) 65 cart.add(product, int(request.POST['quantity'])) 66 order = Order(phone=request.POST['phone']) 67 order.set_positions(cart) 68 ec_views.save_order_to_session(request.session, order) 69 mailer.send_order( 70 subject=settings.EMAIL_SUBJECTS['one_click'], 71 order=order, 72 to_customer=False, 73 ) 74 return HttpResponse('ok') 75 76 77 @require_POST 78 def order_call(request): 79 """Send email about ordered call.""" 80 phone, time, url = ec_views.get_keys_from_post( 81 request, 'phone', 'time', 'url') 82 83 mailer.send_backcall( 84 subject=settings.EMAIL_SUBJECTS['call'], 85 phone=phone, 86 time=time, 87 url=url, 88 ) 89 90 return HttpResponse('ok') 91 92 93 class YandexOrder(OrderPage): 94 95 def post(self, request): 96 cart = self.cart(request.session) 97 form = self.order_form(request.POST.dict()) 98 99 if not form.is_valid(): 100 return render(request, self.template, {'cart': cart, 'form': form}) 101 102 order = form.save() 103 order.set_positions(cart) 104 ec_views.save_order_to_session(request.session, order) 105 106 # Took form fields from Yandex docs https://goo.gl/afKfsz 107 response_data = { 108 'yandex_kassa_link': settings.YANDEX_KASSA_LINK, # Required 109 'shopId': settings.SHOP['id'], # Required 110 'scid': settings.SHOP['scid'], # Required 111 'shopSuccessURL': settings.SHOP['success_url'], 112 'shopFailURL': settings.SHOP['fail_url'], 113 'customerNumber': order.id, # Required 114 'sum': order.total_price, # Required 115 'orderNumber': order.fake_order_number, 116 'cps_phone': order.phone, 117 'cps_email': order.email, 118 'paymentType': request.POST.get('payment_type'), 119 } 120 121 return JsonResponse(response_data) 122 [end of shopelectro/views/ecommerce.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/shopelectro/views/ecommerce.py b/shopelectro/views/ecommerce.py --- a/shopelectro/views/ecommerce.py +++ b/shopelectro/views/ecommerce.py @@ -1,4 +1,5 @@ from django.conf import settings +from django.core import serializers from django.http import HttpResponse, JsonResponse from django.shortcuts import get_object_or_404, render from django.views.decorators.http import require_POST @@ -47,7 +48,20 @@ class OrderSuccess(ec_views.OrderSuccess): - order = Order + order = Order.objects.all().prefetch_related('positions') + + def get_context_data(self, **kwargs): + context = super().get_context_data(**kwargs) + positions_json = serializers.serialize( + 'json', + context['order'].positions.all(), + fields=['name', 'quantity', 'price'], + ) + + return { + **context, + 'positions_json': positions_json, + } @require_POST
{"golden_diff": "diff --git a/shopelectro/views/ecommerce.py b/shopelectro/views/ecommerce.py\n--- a/shopelectro/views/ecommerce.py\n+++ b/shopelectro/views/ecommerce.py\n@@ -1,4 +1,5 @@\n from django.conf import settings\n+from django.core import serializers\n from django.http import HttpResponse, JsonResponse\n from django.shortcuts import get_object_or_404, render\n from django.views.decorators.http import require_POST\n@@ -47,7 +48,20 @@\n \n \n class OrderSuccess(ec_views.OrderSuccess):\n- order = Order\n+ order = Order.objects.all().prefetch_related('positions')\n+\n+ def get_context_data(self, **kwargs):\n+ context = super().get_context_data(**kwargs)\n+ positions_json = serializers.serialize(\n+ 'json',\n+ context['order'].positions.all(),\n+ fields=['name', 'quantity', 'price'],\n+ )\n+\n+ return {\n+ **context,\n+ 'positions_json': positions_json,\n+ }\n \n \n @require_POST\n", "issue": "tracking.es6:30-33: Send `purchase` event to YA and GA...\nThe puzzle `504-30170dc3` from #504 has to be resolved:\n\nhttps://github.com/fidals/shopelectro/blob/4e607fb856842b43192511b1b4eaff19d2052b9a/front/js/shared/tracking.es6#L30-L33\n\nThe puzzle was created by Artemiy on 17-Aug-18. \n\nEstimate: 60 minutes, \n\nIf you have any technical questions, don't ask me, submit new tickets instead. The task will be \"done\" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html).\n", "before_files": [{"content": "from django.conf import settings\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.views.decorators.http import require_POST\n\nfrom ecommerce import mailer, views as ec_views\nfrom pages.models import CustomPage\n\nfrom shopelectro.cart import SECart\nfrom shopelectro.forms import OrderForm\nfrom shopelectro.models import Product, Order\n\n\n# ECOMMERCE VIEWS\nclass OrderPage(ec_views.OrderPage):\n order_form = OrderForm\n cart = SECart\n\n def get_context_data(self, request, **kwargs):\n return {\n **super(OrderPage, self).get_context_data(request, **kwargs),\n 'page': CustomPage.objects.get(slug='order'),\n }\n\n\nclass AddToCart(ec_views.AddToCart):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass RemoveFromCart(ec_views.RemoveFromCart):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass ChangeCount(ec_views.ChangeCount):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass FlushCart(ec_views.FlushCart):\n product_model = Product\n order_form = OrderForm\n\n\nclass OrderSuccess(ec_views.OrderSuccess):\n order = Order\n\n\n@require_POST\ndef one_click_buy(request):\n \"\"\"\n Handle one-click-buy.\n\n Accept XHR, save Order to DB, send mail about it\n and return 200 OK.\n \"\"\"\n SECart(request.session).clear()\n\n cart = SECart(request.session)\n product = get_object_or_404(Product, id=request.POST['product'])\n cart.add(product, int(request.POST['quantity']))\n order = Order(phone=request.POST['phone'])\n order.set_positions(cart)\n ec_views.save_order_to_session(request.session, order)\n mailer.send_order(\n subject=settings.EMAIL_SUBJECTS['one_click'],\n order=order,\n to_customer=False,\n )\n return HttpResponse('ok')\n\n\n@require_POST\ndef order_call(request):\n \"\"\"Send email about ordered call.\"\"\"\n phone, time, url = ec_views.get_keys_from_post(\n request, 'phone', 'time', 'url')\n\n mailer.send_backcall(\n subject=settings.EMAIL_SUBJECTS['call'],\n phone=phone,\n time=time,\n url=url,\n )\n\n return HttpResponse('ok')\n\n\nclass YandexOrder(OrderPage):\n\n def post(self, request):\n cart = self.cart(request.session)\n form = self.order_form(request.POST.dict())\n\n if not form.is_valid():\n return render(request, self.template, {'cart': cart, 'form': form})\n\n order = form.save()\n order.set_positions(cart)\n ec_views.save_order_to_session(request.session, order)\n\n # Took form fields from Yandex docs https://goo.gl/afKfsz\n response_data = {\n 'yandex_kassa_link': settings.YANDEX_KASSA_LINK, # Required\n 'shopId': settings.SHOP['id'], # Required\n 'scid': settings.SHOP['scid'], # Required\n 'shopSuccessURL': settings.SHOP['success_url'],\n 'shopFailURL': settings.SHOP['fail_url'],\n 'customerNumber': order.id, # Required\n 'sum': order.total_price, # Required\n 'orderNumber': order.fake_order_number,\n 'cps_phone': order.phone,\n 'cps_email': order.email,\n 'paymentType': request.POST.get('payment_type'),\n }\n\n return JsonResponse(response_data)\n", "path": "shopelectro/views/ecommerce.py"}]}
1,839
232
gh_patches_debug_32456
rasdani/github-patches
git_diff
PlasmaPy__PlasmaPy-217
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create physics exceptions and warnings Several of the exceptions and warnings that come up in PlasmaPy result from when we try to violate the laws of physics. Most of the time these violations cause a `UserWarning` or raise a `ValueError`, but these are pretty generic and don't give much insight into the cause of the problem. To help with exception handling, I propose that we create new exceptions and warnings, such as `PhysicsError` and `PhysicsWarning`. We could additionally be more specific and also have `RelativityError` and/or `RelativityWarning`. One possibility would be to put these in a new file called `plasmapy/utils/exceptions.py`. This would be a good first contribution for someone who is somewhat familiar with Python. Here's documentation on [user-defined exceptions](https://docs.python.org/3/tutorial/errors.html#user-defined-exceptions. Thank you! </issue> <code> [start of plasmapy/utils/exceptions.py] 1 """ 2 Custom Error and Warning names to improve readability 3 """ 4 5 6 # ---------- 7 # Exceptions: 8 # ---------- 9 10 class PlasmaPyError(Exception): 11 """ 12 Base class of PlasmaPy custom errors. 13 14 All custom exceptions raised by PlasmaPy should inherit from this class 15 and be defined in this module. 16 17 Custom exceptions can inherit from other exception types too. Thus, if code 18 already knows how to handle a ValueError, it won't need any specific 19 modification. 20 """ 21 22 23 class PhysicsError(PlasmaPyError, ValueError): 24 """Error for use of a physics value outside PlasmaPy theoretical bounds""" 25 26 27 class RelativityError(PhysicsError): 28 """Error for use of a speed greater than or equal to the speed of light""" 29 30 31 # ---------- 32 # Warnings: 33 # ---------- 34 35 class PlasmaPyWarning(Warning): 36 """Base class of PlasmaPy custom warnings. 37 38 All PlasmaPy custom warnings should inherit from this class and be defined 39 in this module. 40 41 Warnings should be issued using warnings.warn, which will not break 42 execution if unhandled. 43 """ 44 45 46 class PhysicsWarning(PlasmaPyWarning): 47 """Warning for using a mildly worrisome physics value""" 48 49 50 class RelativityWarning(PhysicsWarning): 51 """Warning for use of a speed quantity approaching the speed of light""" 52 [end of plasmapy/utils/exceptions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plasmapy/utils/exceptions.py b/plasmapy/utils/exceptions.py --- a/plasmapy/utils/exceptions.py +++ b/plasmapy/utils/exceptions.py @@ -18,14 +18,51 @@ already knows how to handle a ValueError, it won't need any specific modification. """ + pass class PhysicsError(PlasmaPyError, ValueError): """Error for use of a physics value outside PlasmaPy theoretical bounds""" + pass class RelativityError(PhysicsError): """Error for use of a speed greater than or equal to the speed of light""" + pass + + +class AtomicError(PlasmaPyError): + """Error for use by an atomic subpackage""" + pass + + +class MissingAtomicDataError(AtomicError): + """Error for use when atomic data is missing.""" + pass + + +class NoChargeInfoError(AtomicError): + """Error for use when charge information is needed but missing.""" + + +class IonError(NoChargeInfoError): + """Error for use when an ion is invalid.""" + pass + + +class IsotopeError(AtomicError): + """Error for use when an isotope is invalid.""" + pass + + +class ElementError(IsotopeError, IonError): + """Error for use when an element is invalid.""" + pass + + +class ParticleError(ElementError): + """Error for use when a particle is invalid.""" + pass # ---------- @@ -41,11 +78,24 @@ Warnings should be issued using warnings.warn, which will not break execution if unhandled. """ + pass class PhysicsWarning(PlasmaPyWarning): """Warning for using a mildly worrisome physics value""" + pass class RelativityWarning(PhysicsWarning): """Warning for use of a speed quantity approaching the speed of light""" + pass + + +class AtomicWarning(PlasmaPyWarning): + """Warnings for use in the atomic subpackage.""" + pass + + +class MissingAtomicDataWarning(AtomicWarning): + """Warning for use when atomic data is missing.""" + pass
{"golden_diff": "diff --git a/plasmapy/utils/exceptions.py b/plasmapy/utils/exceptions.py\n--- a/plasmapy/utils/exceptions.py\n+++ b/plasmapy/utils/exceptions.py\n@@ -18,14 +18,51 @@\n already knows how to handle a ValueError, it won't need any specific\n modification.\n \"\"\"\n+ pass\n \n \n class PhysicsError(PlasmaPyError, ValueError):\n \"\"\"Error for use of a physics value outside PlasmaPy theoretical bounds\"\"\"\n+ pass\n \n \n class RelativityError(PhysicsError):\n \"\"\"Error for use of a speed greater than or equal to the speed of light\"\"\"\n+ pass\n+\n+\n+class AtomicError(PlasmaPyError):\n+ \"\"\"Error for use by an atomic subpackage\"\"\"\n+ pass\n+\n+\n+class MissingAtomicDataError(AtomicError):\n+ \"\"\"Error for use when atomic data is missing.\"\"\"\n+ pass\n+\n+\n+class NoChargeInfoError(AtomicError):\n+ \"\"\"Error for use when charge information is needed but missing.\"\"\"\n+\n+\n+class IonError(NoChargeInfoError):\n+ \"\"\"Error for use when an ion is invalid.\"\"\"\n+ pass\n+\n+\n+class IsotopeError(AtomicError):\n+ \"\"\"Error for use when an isotope is invalid.\"\"\"\n+ pass\n+\n+\n+class ElementError(IsotopeError, IonError):\n+ \"\"\"Error for use when an element is invalid.\"\"\"\n+ pass\n+\n+\n+class ParticleError(ElementError):\n+ \"\"\"Error for use when a particle is invalid.\"\"\"\n+ pass\n \n \n # ----------\n@@ -41,11 +78,24 @@\n Warnings should be issued using warnings.warn, which will not break\n execution if unhandled.\n \"\"\"\n+ pass\n \n \n class PhysicsWarning(PlasmaPyWarning):\n \"\"\"Warning for using a mildly worrisome physics value\"\"\"\n+ pass\n \n \n class RelativityWarning(PhysicsWarning):\n \"\"\"Warning for use of a speed quantity approaching the speed of light\"\"\"\n+ pass\n+\n+\n+class AtomicWarning(PlasmaPyWarning):\n+ \"\"\"Warnings for use in the atomic subpackage.\"\"\"\n+ pass\n+\n+\n+class MissingAtomicDataWarning(AtomicWarning):\n+ \"\"\"Warning for use when atomic data is missing.\"\"\"\n+ pass\n", "issue": "Create physics exceptions and warnings\nSeveral of the exceptions and warnings that come up in PlasmaPy result from when we try to violate the laws of physics. Most of the time these violations cause a `UserWarning` or raise a `ValueError`, but these are pretty generic and don't give much insight into the cause of the problem. To help with exception handling, I propose that we create new exceptions and warnings, such as `PhysicsError` and `PhysicsWarning`. We could additionally be more specific and also have `RelativityError` and/or `RelativityWarning`. One possibility would be to put these in a new file called `plasmapy/utils/exceptions.py`.\r\n\r\nThis would be a good first contribution for someone who is somewhat familiar with Python. Here's documentation on [user-defined exceptions](https://docs.python.org/3/tutorial/errors.html#user-defined-exceptions. Thank you!\r\n\n", "before_files": [{"content": "\"\"\"\nCustom Error and Warning names to improve readability\n\"\"\"\n\n\n# ----------\n# Exceptions:\n# ----------\n\nclass PlasmaPyError(Exception):\n \"\"\"\n Base class of PlasmaPy custom errors.\n\n All custom exceptions raised by PlasmaPy should inherit from this class\n and be defined in this module.\n\n Custom exceptions can inherit from other exception types too. Thus, if code\n already knows how to handle a ValueError, it won't need any specific\n modification.\n \"\"\"\n\n\nclass PhysicsError(PlasmaPyError, ValueError):\n \"\"\"Error for use of a physics value outside PlasmaPy theoretical bounds\"\"\"\n\n\nclass RelativityError(PhysicsError):\n \"\"\"Error for use of a speed greater than or equal to the speed of light\"\"\"\n\n\n# ----------\n# Warnings:\n# ----------\n\nclass PlasmaPyWarning(Warning):\n \"\"\"Base class of PlasmaPy custom warnings.\n\n All PlasmaPy custom warnings should inherit from this class and be defined\n in this module.\n\n Warnings should be issued using warnings.warn, which will not break\n execution if unhandled.\n \"\"\"\n\n\nclass PhysicsWarning(PlasmaPyWarning):\n \"\"\"Warning for using a mildly worrisome physics value\"\"\"\n\n\nclass RelativityWarning(PhysicsWarning):\n \"\"\"Warning for use of a speed quantity approaching the speed of light\"\"\"\n", "path": "plasmapy/utils/exceptions.py"}]}
1,103
492
gh_patches_debug_916
rasdani/github-patches
git_diff
facebookresearch__ParlAI-581
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Can we keep a mturk task from outside parlai/mturk/tasks? Hi @JackUrb, I have a few questions regarding the mturk evaluation: 1. This link (http://parl.ai/static/docs/mturk.html#running-a-task) says that > to run an MTurk task, first ensure that the task directory is in `parlai/mturk/tasks/`. Is it by design? I tried to keep my task in some other directory (outside root parlai directory) and tried to import parlai as a package but that doesn't seem to work. Basically I am trying to use ParlAI as one of the dependency for one of my project for Human In the loop evaluation. 2. How easy/hard it is to provide support for keeping the mturk task anywhere? </issue> <code> [start of setup.py] 1 # Copyright (c) 2017-present, Facebook, Inc. 2 # All rights reserved. 3 # This source code is licensed under the BSD-style license found in the 4 # LICENSE file in the root directory of this source tree. An additional grant 5 # of patent rights can be found in the PATENTS file in the same directory. 6 7 8 from setuptools import setup, find_packages 9 import sys 10 11 if sys.version_info < (3,): 12 sys.exit('Sorry, Python3 is required for ParlAI.') 13 14 with open('README.md', encoding="utf8") as f: 15 readme = f.read() 16 17 with open('LICENSE') as f: 18 license = f.read() 19 20 with open('requirements.txt') as f: 21 reqs = f.read() 22 23 setup( 24 name='parlai', 25 version='0.1.0', 26 description='Unified API for accessing dialog datasets.', 27 long_description=readme, 28 url='http://parl.ai/', 29 license=license, 30 packages=find_packages(exclude=( 31 'data', 'docs', 'downloads', 'examples', 'logs', 'tests')), 32 install_requires=reqs.strip().split('\n'), 33 ) 34 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -30,4 +30,5 @@ packages=find_packages(exclude=( 'data', 'docs', 'downloads', 'examples', 'logs', 'tests')), install_requires=reqs.strip().split('\n'), + include_package_data=True, )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,4 +30,5 @@\n packages=find_packages(exclude=(\n 'data', 'docs', 'downloads', 'examples', 'logs', 'tests')),\n install_requires=reqs.strip().split('\\n'),\n+ include_package_data=True,\n )\n", "issue": "Can we keep a mturk task from outside parlai/mturk/tasks?\nHi @JackUrb, I have a few questions regarding the mturk evaluation:\r\n\r\n1. This link (http://parl.ai/static/docs/mturk.html#running-a-task) says that \r\n\r\n> to run an MTurk task, first ensure that the task directory is in `parlai/mturk/tasks/`. \r\n\r\n Is it by design? I tried to keep my task in some other directory (outside root parlai directory) and tried to import parlai as a package but that doesn't seem to work. Basically I am trying to use ParlAI as one of the dependency for one of my project for Human In the loop evaluation.\r\n\r\n2. How easy/hard it is to provide support for keeping the mturk task anywhere?\r\n\n", "before_files": [{"content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\n\nfrom setuptools import setup, find_packages\nimport sys\n\nif sys.version_info < (3,):\n sys.exit('Sorry, Python3 is required for ParlAI.')\n\nwith open('README.md', encoding=\"utf8\") as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nwith open('requirements.txt') as f:\n reqs = f.read()\n\nsetup(\n name='parlai',\n version='0.1.0',\n description='Unified API for accessing dialog datasets.',\n long_description=readme,\n url='http://parl.ai/',\n license=license,\n packages=find_packages(exclude=(\n 'data', 'docs', 'downloads', 'examples', 'logs', 'tests')),\n install_requires=reqs.strip().split('\\n'),\n)\n", "path": "setup.py"}]}
1,010
77
gh_patches_debug_26439
rasdani/github-patches
git_diff
python-discord__bot-772
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Verify command shouldn't log tracebacks for DM failure. Sentry Issue: [BOT-11](https://sentry.io/organizations/python-discord/issues/1526042507/?referrer=github_integration) It currently uses `log.exception()` for all exceptions. `Forbidden` should be a special case and use a lower log level. A better approach may be to only handle `Forbidden` and re-raise all other exceptions (to defer it to the default command error handler). A `finally` block can be used to still delete the message afterwards. ```py try: await ctx.author.send(WELCOME_MESSAGE) except Exception: # Catch the exception, in case they have DMs off or something log.exception(f"Unable to send welcome message to user {ctx.author}.") ``` ``` Forbidden: 403 FORBIDDEN (error code: 50007): Cannot send messages to this user File "bot/cogs/verification.py", line 129, in accept_command await ctx.author.send(WELCOME_MESSAGE) File "discord/abc.py", line 856, in send data = await state.http.send_message(channel.id, content, tts=tts, embed=embed, nonce=nonce) File "discord/http.py", line 216, in request raise Forbidden(r, data) Unable to send welcome message to user <REDACTED>. ``` </issue> <code> [start of bot/cogs/verification.py] 1 import logging 2 from datetime import datetime 3 4 from discord import Colour, Message, NotFound, Object 5 from discord.ext import tasks 6 from discord.ext.commands import Cog, Context, command 7 8 from bot.bot import Bot 9 from bot.cogs.moderation import ModLog 10 from bot.constants import ( 11 Bot as BotConfig, 12 Channels, Colours, Event, 13 Filter, Icons, MODERATION_ROLES, Roles 14 ) 15 from bot.decorators import InChannelCheckFailure, in_channel, without_role 16 from bot.utils.checks import without_role_check 17 18 log = logging.getLogger(__name__) 19 20 WELCOME_MESSAGE = f""" 21 Hello! Welcome to the server, and thanks for verifying yourself! 22 23 For your records, these are the documents you accepted: 24 25 `1)` Our rules, here: <https://pythondiscord.com/pages/rules> 26 `2)` Our privacy policy, here: <https://pythondiscord.com/pages/privacy> - you can find information on how to have \ 27 your information removed here as well. 28 29 Feel free to review them at any point! 30 31 Additionally, if you'd like to receive notifications for the announcements we post in <#{Channels.announcements}> \ 32 from time to time, you can send `!subscribe` to <#{Channels.bot}> at any time to assign yourself the \ 33 **Announcements** role. We'll mention this role every time we make an announcement. 34 35 If you'd like to unsubscribe from the announcement notifications, simply send `!unsubscribe` to <#{Channels.bot}>. 36 """ 37 38 PERIODIC_PING = ( 39 f"@everyone To verify that you have read our rules, please type `{BotConfig.prefix}accept`." 40 f" If you encounter any problems during the verification process, ping the <@&{Roles.admin}> role in this channel." 41 ) 42 BOT_MESSAGE_DELETE_DELAY = 10 43 44 45 class Verification(Cog): 46 """User verification and role self-management.""" 47 48 def __init__(self, bot: Bot): 49 self.bot = bot 50 self.periodic_ping.start() 51 52 @property 53 def mod_log(self) -> ModLog: 54 """Get currently loaded ModLog cog instance.""" 55 return self.bot.get_cog("ModLog") 56 57 @Cog.listener() 58 async def on_message(self, message: Message) -> None: 59 """Check new message event for messages to the checkpoint channel & process.""" 60 if message.channel.id != Channels.verification: 61 return # Only listen for #checkpoint messages 62 63 if message.author.bot: 64 # They're a bot, delete their message after the delay. 65 # But not the periodic ping; we like that one. 66 if message.content != PERIODIC_PING: 67 await message.delete(delay=BOT_MESSAGE_DELETE_DELAY) 68 return 69 70 # if a user mentions a role or guild member 71 # alert the mods in mod-alerts channel 72 if message.mentions or message.role_mentions: 73 log.debug( 74 f"{message.author} mentioned one or more users " 75 f"and/or roles in {message.channel.name}" 76 ) 77 78 embed_text = ( 79 f"{message.author.mention} sent a message in " 80 f"{message.channel.mention} that contained user and/or role mentions." 81 f"\n\n**Original message:**\n>>> {message.content}" 82 ) 83 84 # Send pretty mod log embed to mod-alerts 85 await self.mod_log.send_log_message( 86 icon_url=Icons.filtering, 87 colour=Colour(Colours.soft_red), 88 title=f"User/Role mentioned in {message.channel.name}", 89 text=embed_text, 90 thumbnail=message.author.avatar_url_as(static_format="png"), 91 channel_id=Channels.mod_alerts, 92 ping_everyone=Filter.ping_everyone, 93 ) 94 95 ctx = await self.bot.get_context(message) # type: Context 96 97 if ctx.command is not None and ctx.command.name == "accept": 98 return # They used the accept command 99 100 for role in ctx.author.roles: 101 if role.id == Roles.verified: 102 log.warning(f"{ctx.author} posted '{ctx.message.content}' " 103 "in the verification channel, but is already verified.") 104 return # They're already verified 105 106 log.debug(f"{ctx.author} posted '{ctx.message.content}' in the verification " 107 "channel. We are providing instructions how to verify.") 108 await ctx.send( 109 f"{ctx.author.mention} Please type `!accept` to verify that you accept our rules, " 110 f"and gain access to the rest of the server.", 111 delete_after=20 112 ) 113 114 log.trace(f"Deleting the message posted by {ctx.author}") 115 116 try: 117 await ctx.message.delete() 118 except NotFound: 119 log.trace("No message found, it must have been deleted by another bot.") 120 121 @command(name='accept', aliases=('verify', 'verified', 'accepted'), hidden=True) 122 @without_role(Roles.verified) 123 @in_channel(Channels.verification) 124 async def accept_command(self, ctx: Context, *_) -> None: # We don't actually care about the args 125 """Accept our rules and gain access to the rest of the server.""" 126 log.debug(f"{ctx.author} called !accept. Assigning the 'Developer' role.") 127 await ctx.author.add_roles(Object(Roles.verified), reason="Accepted the rules") 128 try: 129 await ctx.author.send(WELCOME_MESSAGE) 130 except Exception: 131 # Catch the exception, in case they have DMs off or something 132 log.exception(f"Unable to send welcome message to user {ctx.author}.") 133 134 log.trace(f"Deleting the message posted by {ctx.author}.") 135 136 try: 137 self.mod_log.ignore(Event.message_delete, ctx.message.id) 138 await ctx.message.delete() 139 except NotFound: 140 log.trace("No message found, it must have been deleted by another bot.") 141 142 @command(name='subscribe') 143 @in_channel(Channels.bot) 144 async def subscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args 145 """Subscribe to announcement notifications by assigning yourself the role.""" 146 has_role = False 147 148 for role in ctx.author.roles: 149 if role.id == Roles.announcements: 150 has_role = True 151 break 152 153 if has_role: 154 await ctx.send(f"{ctx.author.mention} You're already subscribed!") 155 return 156 157 log.debug(f"{ctx.author} called !subscribe. Assigning the 'Announcements' role.") 158 await ctx.author.add_roles(Object(Roles.announcements), reason="Subscribed to announcements") 159 160 log.trace(f"Deleting the message posted by {ctx.author}.") 161 162 await ctx.send( 163 f"{ctx.author.mention} Subscribed to <#{Channels.announcements}> notifications.", 164 ) 165 166 @command(name='unsubscribe') 167 @in_channel(Channels.bot) 168 async def unsubscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args 169 """Unsubscribe from announcement notifications by removing the role from yourself.""" 170 has_role = False 171 172 for role in ctx.author.roles: 173 if role.id == Roles.announcements: 174 has_role = True 175 break 176 177 if not has_role: 178 await ctx.send(f"{ctx.author.mention} You're already unsubscribed!") 179 return 180 181 log.debug(f"{ctx.author} called !unsubscribe. Removing the 'Announcements' role.") 182 await ctx.author.remove_roles(Object(Roles.announcements), reason="Unsubscribed from announcements") 183 184 log.trace(f"Deleting the message posted by {ctx.author}.") 185 186 await ctx.send( 187 f"{ctx.author.mention} Unsubscribed from <#{Channels.announcements}> notifications." 188 ) 189 190 # This cannot be static (must have a __func__ attribute). 191 async def cog_command_error(self, ctx: Context, error: Exception) -> None: 192 """Check for & ignore any InChannelCheckFailure.""" 193 if isinstance(error, InChannelCheckFailure): 194 error.handled = True 195 196 @staticmethod 197 def bot_check(ctx: Context) -> bool: 198 """Block any command within the verification channel that is not !accept.""" 199 if ctx.channel.id == Channels.verification and without_role_check(ctx, *MODERATION_ROLES): 200 return ctx.command.name == "accept" 201 else: 202 return True 203 204 @tasks.loop(hours=12) 205 async def periodic_ping(self) -> None: 206 """Every week, mention @everyone to remind them to verify.""" 207 messages = self.bot.get_channel(Channels.verification).history(limit=10) 208 need_to_post = True # True if a new message needs to be sent. 209 210 async for message in messages: 211 if message.author == self.bot.user and message.content == PERIODIC_PING: 212 delta = datetime.utcnow() - message.created_at # Time since last message. 213 if delta.days >= 7: # Message is older than a week. 214 await message.delete() 215 else: 216 need_to_post = False 217 218 break 219 220 if need_to_post: 221 await self.bot.get_channel(Channels.verification).send(PERIODIC_PING) 222 223 @periodic_ping.before_loop 224 async def before_ping(self) -> None: 225 """Only start the loop when the bot is ready.""" 226 await self.bot.wait_until_ready() 227 228 def cog_unload(self) -> None: 229 """Cancel the periodic ping task when the cog is unloaded.""" 230 self.periodic_ping.cancel() 231 232 233 def setup(bot: Bot) -> None: 234 """Load the Verification cog.""" 235 bot.add_cog(Verification(bot)) 236 [end of bot/cogs/verification.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bot/cogs/verification.py b/bot/cogs/verification.py --- a/bot/cogs/verification.py +++ b/bot/cogs/verification.py @@ -1,7 +1,8 @@ import logging +from contextlib import suppress from datetime import datetime -from discord import Colour, Message, NotFound, Object +from discord import Colour, Forbidden, Message, NotFound, Object from discord.ext import tasks from discord.ext.commands import Cog, Context, command @@ -127,17 +128,13 @@ await ctx.author.add_roles(Object(Roles.verified), reason="Accepted the rules") try: await ctx.author.send(WELCOME_MESSAGE) - except Exception: - # Catch the exception, in case they have DMs off or something - log.exception(f"Unable to send welcome message to user {ctx.author}.") - - log.trace(f"Deleting the message posted by {ctx.author}.") - - try: - self.mod_log.ignore(Event.message_delete, ctx.message.id) - await ctx.message.delete() - except NotFound: - log.trace("No message found, it must have been deleted by another bot.") + except Forbidden: + log.info(f"Sending welcome message failed for {ctx.author}.") + finally: + log.trace(f"Deleting accept message by {ctx.author}.") + with suppress(NotFound): + self.mod_log.ignore(Event.message_delete, ctx.message.id) + await ctx.message.delete() @command(name='subscribe') @in_channel(Channels.bot)
{"golden_diff": "diff --git a/bot/cogs/verification.py b/bot/cogs/verification.py\n--- a/bot/cogs/verification.py\n+++ b/bot/cogs/verification.py\n@@ -1,7 +1,8 @@\n import logging\n+from contextlib import suppress\n from datetime import datetime\n \n-from discord import Colour, Message, NotFound, Object\n+from discord import Colour, Forbidden, Message, NotFound, Object\n from discord.ext import tasks\n from discord.ext.commands import Cog, Context, command\n \n@@ -127,17 +128,13 @@\n await ctx.author.add_roles(Object(Roles.verified), reason=\"Accepted the rules\")\n try:\n await ctx.author.send(WELCOME_MESSAGE)\n- except Exception:\n- # Catch the exception, in case they have DMs off or something\n- log.exception(f\"Unable to send welcome message to user {ctx.author}.\")\n-\n- log.trace(f\"Deleting the message posted by {ctx.author}.\")\n-\n- try:\n- self.mod_log.ignore(Event.message_delete, ctx.message.id)\n- await ctx.message.delete()\n- except NotFound:\n- log.trace(\"No message found, it must have been deleted by another bot.\")\n+ except Forbidden:\n+ log.info(f\"Sending welcome message failed for {ctx.author}.\")\n+ finally:\n+ log.trace(f\"Deleting accept message by {ctx.author}.\")\n+ with suppress(NotFound):\n+ self.mod_log.ignore(Event.message_delete, ctx.message.id)\n+ await ctx.message.delete()\n \n @command(name='subscribe')\n @in_channel(Channels.bot)\n", "issue": "Verify command shouldn't log tracebacks for DM failure.\nSentry Issue: [BOT-11](https://sentry.io/organizations/python-discord/issues/1526042507/?referrer=github_integration)\r\n\r\nIt currently uses `log.exception()` for all exceptions. `Forbidden` should be a special case and use a lower log level. A better approach may be to only handle `Forbidden` and re-raise all other exceptions (to defer it to the default command error handler). A `finally` block can be used to still delete the message afterwards.\r\n\r\n```py\r\n try:\r\n await ctx.author.send(WELCOME_MESSAGE)\r\n except Exception:\r\n # Catch the exception, in case they have DMs off or something\r\n log.exception(f\"Unable to send welcome message to user {ctx.author}.\")\r\n```\r\n\r\n```\r\nForbidden: 403 FORBIDDEN (error code: 50007): Cannot send messages to this user\r\n File \"bot/cogs/verification.py\", line 129, in accept_command\r\n await ctx.author.send(WELCOME_MESSAGE)\r\n File \"discord/abc.py\", line 856, in send\r\n data = await state.http.send_message(channel.id, content, tts=tts, embed=embed, nonce=nonce)\r\n File \"discord/http.py\", line 216, in request\r\n raise Forbidden(r, data)\r\n\r\nUnable to send welcome message to user <REDACTED>.\r\n```\n", "before_files": [{"content": "import logging\nfrom datetime import datetime\n\nfrom discord import Colour, Message, NotFound, Object\nfrom discord.ext import tasks\nfrom discord.ext.commands import Cog, Context, command\n\nfrom bot.bot import Bot\nfrom bot.cogs.moderation import ModLog\nfrom bot.constants import (\n Bot as BotConfig,\n Channels, Colours, Event,\n Filter, Icons, MODERATION_ROLES, Roles\n)\nfrom bot.decorators import InChannelCheckFailure, in_channel, without_role\nfrom bot.utils.checks import without_role_check\n\nlog = logging.getLogger(__name__)\n\nWELCOME_MESSAGE = f\"\"\"\nHello! Welcome to the server, and thanks for verifying yourself!\n\nFor your records, these are the documents you accepted:\n\n`1)` Our rules, here: <https://pythondiscord.com/pages/rules>\n`2)` Our privacy policy, here: <https://pythondiscord.com/pages/privacy> - you can find information on how to have \\\nyour information removed here as well.\n\nFeel free to review them at any point!\n\nAdditionally, if you'd like to receive notifications for the announcements we post in <#{Channels.announcements}> \\\nfrom time to time, you can send `!subscribe` to <#{Channels.bot}> at any time to assign yourself the \\\n**Announcements** role. We'll mention this role every time we make an announcement.\n\nIf you'd like to unsubscribe from the announcement notifications, simply send `!unsubscribe` to <#{Channels.bot}>.\n\"\"\"\n\nPERIODIC_PING = (\n f\"@everyone To verify that you have read our rules, please type `{BotConfig.prefix}accept`.\"\n f\" If you encounter any problems during the verification process, ping the <@&{Roles.admin}> role in this channel.\"\n)\nBOT_MESSAGE_DELETE_DELAY = 10\n\n\nclass Verification(Cog):\n \"\"\"User verification and role self-management.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.periodic_ping.start()\n\n @property\n def mod_log(self) -> ModLog:\n \"\"\"Get currently loaded ModLog cog instance.\"\"\"\n return self.bot.get_cog(\"ModLog\")\n\n @Cog.listener()\n async def on_message(self, message: Message) -> None:\n \"\"\"Check new message event for messages to the checkpoint channel & process.\"\"\"\n if message.channel.id != Channels.verification:\n return # Only listen for #checkpoint messages\n\n if message.author.bot:\n # They're a bot, delete their message after the delay.\n # But not the periodic ping; we like that one.\n if message.content != PERIODIC_PING:\n await message.delete(delay=BOT_MESSAGE_DELETE_DELAY)\n return\n\n # if a user mentions a role or guild member\n # alert the mods in mod-alerts channel\n if message.mentions or message.role_mentions:\n log.debug(\n f\"{message.author} mentioned one or more users \"\n f\"and/or roles in {message.channel.name}\"\n )\n\n embed_text = (\n f\"{message.author.mention} sent a message in \"\n f\"{message.channel.mention} that contained user and/or role mentions.\"\n f\"\\n\\n**Original message:**\\n>>> {message.content}\"\n )\n\n # Send pretty mod log embed to mod-alerts\n await self.mod_log.send_log_message(\n icon_url=Icons.filtering,\n colour=Colour(Colours.soft_red),\n title=f\"User/Role mentioned in {message.channel.name}\",\n text=embed_text,\n thumbnail=message.author.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.mod_alerts,\n ping_everyone=Filter.ping_everyone,\n )\n\n ctx = await self.bot.get_context(message) # type: Context\n\n if ctx.command is not None and ctx.command.name == \"accept\":\n return # They used the accept command\n\n for role in ctx.author.roles:\n if role.id == Roles.verified:\n log.warning(f\"{ctx.author} posted '{ctx.message.content}' \"\n \"in the verification channel, but is already verified.\")\n return # They're already verified\n\n log.debug(f\"{ctx.author} posted '{ctx.message.content}' in the verification \"\n \"channel. We are providing instructions how to verify.\")\n await ctx.send(\n f\"{ctx.author.mention} Please type `!accept` to verify that you accept our rules, \"\n f\"and gain access to the rest of the server.\",\n delete_after=20\n )\n\n log.trace(f\"Deleting the message posted by {ctx.author}\")\n\n try:\n await ctx.message.delete()\n except NotFound:\n log.trace(\"No message found, it must have been deleted by another bot.\")\n\n @command(name='accept', aliases=('verify', 'verified', 'accepted'), hidden=True)\n @without_role(Roles.verified)\n @in_channel(Channels.verification)\n async def accept_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Accept our rules and gain access to the rest of the server.\"\"\"\n log.debug(f\"{ctx.author} called !accept. Assigning the 'Developer' role.\")\n await ctx.author.add_roles(Object(Roles.verified), reason=\"Accepted the rules\")\n try:\n await ctx.author.send(WELCOME_MESSAGE)\n except Exception:\n # Catch the exception, in case they have DMs off or something\n log.exception(f\"Unable to send welcome message to user {ctx.author}.\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n try:\n self.mod_log.ignore(Event.message_delete, ctx.message.id)\n await ctx.message.delete()\n except NotFound:\n log.trace(\"No message found, it must have been deleted by another bot.\")\n\n @command(name='subscribe')\n @in_channel(Channels.bot)\n async def subscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Subscribe to announcement notifications by assigning yourself the role.\"\"\"\n has_role = False\n\n for role in ctx.author.roles:\n if role.id == Roles.announcements:\n has_role = True\n break\n\n if has_role:\n await ctx.send(f\"{ctx.author.mention} You're already subscribed!\")\n return\n\n log.debug(f\"{ctx.author} called !subscribe. Assigning the 'Announcements' role.\")\n await ctx.author.add_roles(Object(Roles.announcements), reason=\"Subscribed to announcements\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n await ctx.send(\n f\"{ctx.author.mention} Subscribed to <#{Channels.announcements}> notifications.\",\n )\n\n @command(name='unsubscribe')\n @in_channel(Channels.bot)\n async def unsubscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Unsubscribe from announcement notifications by removing the role from yourself.\"\"\"\n has_role = False\n\n for role in ctx.author.roles:\n if role.id == Roles.announcements:\n has_role = True\n break\n\n if not has_role:\n await ctx.send(f\"{ctx.author.mention} You're already unsubscribed!\")\n return\n\n log.debug(f\"{ctx.author} called !unsubscribe. Removing the 'Announcements' role.\")\n await ctx.author.remove_roles(Object(Roles.announcements), reason=\"Unsubscribed from announcements\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n await ctx.send(\n f\"{ctx.author.mention} Unsubscribed from <#{Channels.announcements}> notifications.\"\n )\n\n # This cannot be static (must have a __func__ attribute).\n async def cog_command_error(self, ctx: Context, error: Exception) -> None:\n \"\"\"Check for & ignore any InChannelCheckFailure.\"\"\"\n if isinstance(error, InChannelCheckFailure):\n error.handled = True\n\n @staticmethod\n def bot_check(ctx: Context) -> bool:\n \"\"\"Block any command within the verification channel that is not !accept.\"\"\"\n if ctx.channel.id == Channels.verification and without_role_check(ctx, *MODERATION_ROLES):\n return ctx.command.name == \"accept\"\n else:\n return True\n\n @tasks.loop(hours=12)\n async def periodic_ping(self) -> None:\n \"\"\"Every week, mention @everyone to remind them to verify.\"\"\"\n messages = self.bot.get_channel(Channels.verification).history(limit=10)\n need_to_post = True # True if a new message needs to be sent.\n\n async for message in messages:\n if message.author == self.bot.user and message.content == PERIODIC_PING:\n delta = datetime.utcnow() - message.created_at # Time since last message.\n if delta.days >= 7: # Message is older than a week.\n await message.delete()\n else:\n need_to_post = False\n\n break\n\n if need_to_post:\n await self.bot.get_channel(Channels.verification).send(PERIODIC_PING)\n\n @periodic_ping.before_loop\n async def before_ping(self) -> None:\n \"\"\"Only start the loop when the bot is ready.\"\"\"\n await self.bot.wait_until_ready()\n\n def cog_unload(self) -> None:\n \"\"\"Cancel the periodic ping task when the cog is unloaded.\"\"\"\n self.periodic_ping.cancel()\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Verification cog.\"\"\"\n bot.add_cog(Verification(bot))\n", "path": "bot/cogs/verification.py"}]}
3,495
343
gh_patches_debug_16255
rasdani/github-patches
git_diff
redis__redis-py-3042
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `disable_decoding` not used in async version of `read_response`. **Version**: redis-py version - 5.0.0 (I still see it on main) **Description**: Is there a reason why `disable_decoding` is not being used in async version of `read_response`? It seems like there's no async way to get raw data (`NEVER_DECODE`) from Redis. Are there any plans of adding the `NEVER_DECODE` support for async `read_response`? We want to save pickled data (not compatible with `UTF-8`) in Redis, and for now we will use the workaround (as in https://github.com/redis/redis-py/issues/1006#issuecomment-1100506453 ), but it's not a very clean solution. redis/_parsers/hiredis.py:204 https://github.com/redis/redis-py/blob/d3a3ada03e080f39144807c9fbe44876c40e0548/redis/_parsers/hiredis.py#L192-L217 </issue> <code> [start of redis/_parsers/hiredis.py] 1 import asyncio 2 import socket 3 import sys 4 from typing import Callable, List, Optional, Union 5 6 if sys.version_info.major >= 3 and sys.version_info.minor >= 11: 7 from asyncio import timeout as async_timeout 8 else: 9 from async_timeout import timeout as async_timeout 10 11 from redis.compat import TypedDict 12 13 from ..exceptions import ConnectionError, InvalidResponse, RedisError 14 from ..typing import EncodableT 15 from ..utils import HIREDIS_AVAILABLE 16 from .base import AsyncBaseParser, BaseParser 17 from .socket import ( 18 NONBLOCKING_EXCEPTION_ERROR_NUMBERS, 19 NONBLOCKING_EXCEPTIONS, 20 SENTINEL, 21 SERVER_CLOSED_CONNECTION_ERROR, 22 ) 23 24 25 class _HiredisReaderArgs(TypedDict, total=False): 26 protocolError: Callable[[str], Exception] 27 replyError: Callable[[str], Exception] 28 encoding: Optional[str] 29 errors: Optional[str] 30 31 32 class _HiredisParser(BaseParser): 33 "Parser class for connections using Hiredis" 34 35 def __init__(self, socket_read_size): 36 if not HIREDIS_AVAILABLE: 37 raise RedisError("Hiredis is not installed") 38 self.socket_read_size = socket_read_size 39 self._buffer = bytearray(socket_read_size) 40 41 def __del__(self): 42 try: 43 self.on_disconnect() 44 except Exception: 45 pass 46 47 def on_connect(self, connection, **kwargs): 48 import hiredis 49 50 self._sock = connection._sock 51 self._socket_timeout = connection.socket_timeout 52 kwargs = { 53 "protocolError": InvalidResponse, 54 "replyError": self.parse_error, 55 "errors": connection.encoder.encoding_errors, 56 } 57 58 if connection.encoder.decode_responses: 59 kwargs["encoding"] = connection.encoder.encoding 60 self._reader = hiredis.Reader(**kwargs) 61 self._next_response = False 62 63 def on_disconnect(self): 64 self._sock = None 65 self._reader = None 66 self._next_response = False 67 68 def can_read(self, timeout): 69 if not self._reader: 70 raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) 71 72 if self._next_response is False: 73 self._next_response = self._reader.gets() 74 if self._next_response is False: 75 return self.read_from_socket(timeout=timeout, raise_on_timeout=False) 76 return True 77 78 def read_from_socket(self, timeout=SENTINEL, raise_on_timeout=True): 79 sock = self._sock 80 custom_timeout = timeout is not SENTINEL 81 try: 82 if custom_timeout: 83 sock.settimeout(timeout) 84 bufflen = self._sock.recv_into(self._buffer) 85 if bufflen == 0: 86 raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) 87 self._reader.feed(self._buffer, 0, bufflen) 88 # data was read from the socket and added to the buffer. 89 # return True to indicate that data was read. 90 return True 91 except socket.timeout: 92 if raise_on_timeout: 93 raise TimeoutError("Timeout reading from socket") 94 return False 95 except NONBLOCKING_EXCEPTIONS as ex: 96 # if we're in nonblocking mode and the recv raises a 97 # blocking error, simply return False indicating that 98 # there's no data to be read. otherwise raise the 99 # original exception. 100 allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1) 101 if not raise_on_timeout and ex.errno == allowed: 102 return False 103 raise ConnectionError(f"Error while reading from socket: {ex.args}") 104 finally: 105 if custom_timeout: 106 sock.settimeout(self._socket_timeout) 107 108 def read_response(self, disable_decoding=False): 109 if not self._reader: 110 raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) 111 112 # _next_response might be cached from a can_read() call 113 if self._next_response is not False: 114 response = self._next_response 115 self._next_response = False 116 return response 117 118 if disable_decoding: 119 response = self._reader.gets(False) 120 else: 121 response = self._reader.gets() 122 123 while response is False: 124 self.read_from_socket() 125 if disable_decoding: 126 response = self._reader.gets(False) 127 else: 128 response = self._reader.gets() 129 # if the response is a ConnectionError or the response is a list and 130 # the first item is a ConnectionError, raise it as something bad 131 # happened 132 if isinstance(response, ConnectionError): 133 raise response 134 elif ( 135 isinstance(response, list) 136 and response 137 and isinstance(response[0], ConnectionError) 138 ): 139 raise response[0] 140 return response 141 142 143 class _AsyncHiredisParser(AsyncBaseParser): 144 """Async implementation of parser class for connections using Hiredis""" 145 146 __slots__ = ("_reader",) 147 148 def __init__(self, socket_read_size: int): 149 if not HIREDIS_AVAILABLE: 150 raise RedisError("Hiredis is not available.") 151 super().__init__(socket_read_size=socket_read_size) 152 self._reader = None 153 154 def on_connect(self, connection): 155 import hiredis 156 157 self._stream = connection._reader 158 kwargs: _HiredisReaderArgs = { 159 "protocolError": InvalidResponse, 160 "replyError": self.parse_error, 161 } 162 if connection.encoder.decode_responses: 163 kwargs["encoding"] = connection.encoder.encoding 164 kwargs["errors"] = connection.encoder.encoding_errors 165 166 self._reader = hiredis.Reader(**kwargs) 167 self._connected = True 168 169 def on_disconnect(self): 170 self._connected = False 171 172 async def can_read_destructive(self): 173 if not self._connected: 174 raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) 175 if self._reader.gets(): 176 return True 177 try: 178 async with async_timeout(0): 179 return await self.read_from_socket() 180 except asyncio.TimeoutError: 181 return False 182 183 async def read_from_socket(self): 184 buffer = await self._stream.read(self._read_size) 185 if not buffer or not isinstance(buffer, bytes): 186 raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None 187 self._reader.feed(buffer) 188 # data was read from the socket and added to the buffer. 189 # return True to indicate that data was read. 190 return True 191 192 async def read_response( 193 self, disable_decoding: bool = False 194 ) -> Union[EncodableT, List[EncodableT]]: 195 # If `on_disconnect()` has been called, prohibit any more reads 196 # even if they could happen because data might be present. 197 # We still allow reads in progress to finish 198 if not self._connected: 199 raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None 200 201 response = self._reader.gets() 202 while response is False: 203 await self.read_from_socket() 204 response = self._reader.gets() 205 206 # if the response is a ConnectionError or the response is a list and 207 # the first item is a ConnectionError, raise it as something bad 208 # happened 209 if isinstance(response, ConnectionError): 210 raise response 211 elif ( 212 isinstance(response, list) 213 and response 214 and isinstance(response[0], ConnectionError) 215 ): 216 raise response[0] 217 return response 218 [end of redis/_parsers/hiredis.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redis/_parsers/hiredis.py b/redis/_parsers/hiredis.py --- a/redis/_parsers/hiredis.py +++ b/redis/_parsers/hiredis.py @@ -198,10 +198,16 @@ if not self._connected: raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None - response = self._reader.gets() + if disable_decoding: + response = self._reader.gets(False) + else: + response = self._reader.gets() while response is False: await self.read_from_socket() - response = self._reader.gets() + if disable_decoding: + response = self._reader.gets(False) + else: + response = self._reader.gets() # if the response is a ConnectionError or the response is a list and # the first item is a ConnectionError, raise it as something bad
{"golden_diff": "diff --git a/redis/_parsers/hiredis.py b/redis/_parsers/hiredis.py\n--- a/redis/_parsers/hiredis.py\n+++ b/redis/_parsers/hiredis.py\n@@ -198,10 +198,16 @@\n if not self._connected:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None\n \n- response = self._reader.gets()\n+ if disable_decoding:\n+ response = self._reader.gets(False)\n+ else:\n+ response = self._reader.gets()\n while response is False:\n await self.read_from_socket()\n- response = self._reader.gets()\n+ if disable_decoding:\n+ response = self._reader.gets(False)\n+ else:\n+ response = self._reader.gets()\n \n # if the response is a ConnectionError or the response is a list and\n # the first item is a ConnectionError, raise it as something bad\n", "issue": "`disable_decoding` not used in async version of `read_response`.\n**Version**: redis-py version - 5.0.0 (I still see it on main)\r\n\r\n**Description**:\r\nIs there a reason why `disable_decoding` is not being used in async version of `read_response`? \r\nIt seems like there's no async way to get raw data (`NEVER_DECODE`) from Redis.\r\nAre there any plans of adding the `NEVER_DECODE` support for async `read_response`?\r\nWe want to save pickled data (not compatible with `UTF-8`) in Redis, and for now we will use the workaround (as in https://github.com/redis/redis-py/issues/1006#issuecomment-1100506453 ), but it's not a very clean solution.\r\n\r\nredis/_parsers/hiredis.py:204\r\nhttps://github.com/redis/redis-py/blob/d3a3ada03e080f39144807c9fbe44876c40e0548/redis/_parsers/hiredis.py#L192-L217\r\n\r\n\n", "before_files": [{"content": "import asyncio\nimport socket\nimport sys\nfrom typing import Callable, List, Optional, Union\n\nif sys.version_info.major >= 3 and sys.version_info.minor >= 11:\n from asyncio import timeout as async_timeout\nelse:\n from async_timeout import timeout as async_timeout\n\nfrom redis.compat import TypedDict\n\nfrom ..exceptions import ConnectionError, InvalidResponse, RedisError\nfrom ..typing import EncodableT\nfrom ..utils import HIREDIS_AVAILABLE\nfrom .base import AsyncBaseParser, BaseParser\nfrom .socket import (\n NONBLOCKING_EXCEPTION_ERROR_NUMBERS,\n NONBLOCKING_EXCEPTIONS,\n SENTINEL,\n SERVER_CLOSED_CONNECTION_ERROR,\n)\n\n\nclass _HiredisReaderArgs(TypedDict, total=False):\n protocolError: Callable[[str], Exception]\n replyError: Callable[[str], Exception]\n encoding: Optional[str]\n errors: Optional[str]\n\n\nclass _HiredisParser(BaseParser):\n \"Parser class for connections using Hiredis\"\n\n def __init__(self, socket_read_size):\n if not HIREDIS_AVAILABLE:\n raise RedisError(\"Hiredis is not installed\")\n self.socket_read_size = socket_read_size\n self._buffer = bytearray(socket_read_size)\n\n def __del__(self):\n try:\n self.on_disconnect()\n except Exception:\n pass\n\n def on_connect(self, connection, **kwargs):\n import hiredis\n\n self._sock = connection._sock\n self._socket_timeout = connection.socket_timeout\n kwargs = {\n \"protocolError\": InvalidResponse,\n \"replyError\": self.parse_error,\n \"errors\": connection.encoder.encoding_errors,\n }\n\n if connection.encoder.decode_responses:\n kwargs[\"encoding\"] = connection.encoder.encoding\n self._reader = hiredis.Reader(**kwargs)\n self._next_response = False\n\n def on_disconnect(self):\n self._sock = None\n self._reader = None\n self._next_response = False\n\n def can_read(self, timeout):\n if not self._reader:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n\n if self._next_response is False:\n self._next_response = self._reader.gets()\n if self._next_response is False:\n return self.read_from_socket(timeout=timeout, raise_on_timeout=False)\n return True\n\n def read_from_socket(self, timeout=SENTINEL, raise_on_timeout=True):\n sock = self._sock\n custom_timeout = timeout is not SENTINEL\n try:\n if custom_timeout:\n sock.settimeout(timeout)\n bufflen = self._sock.recv_into(self._buffer)\n if bufflen == 0:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n self._reader.feed(self._buffer, 0, bufflen)\n # data was read from the socket and added to the buffer.\n # return True to indicate that data was read.\n return True\n except socket.timeout:\n if raise_on_timeout:\n raise TimeoutError(\"Timeout reading from socket\")\n return False\n except NONBLOCKING_EXCEPTIONS as ex:\n # if we're in nonblocking mode and the recv raises a\n # blocking error, simply return False indicating that\n # there's no data to be read. otherwise raise the\n # original exception.\n allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)\n if not raise_on_timeout and ex.errno == allowed:\n return False\n raise ConnectionError(f\"Error while reading from socket: {ex.args}\")\n finally:\n if custom_timeout:\n sock.settimeout(self._socket_timeout)\n\n def read_response(self, disable_decoding=False):\n if not self._reader:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n\n # _next_response might be cached from a can_read() call\n if self._next_response is not False:\n response = self._next_response\n self._next_response = False\n return response\n\n if disable_decoding:\n response = self._reader.gets(False)\n else:\n response = self._reader.gets()\n\n while response is False:\n self.read_from_socket()\n if disable_decoding:\n response = self._reader.gets(False)\n else:\n response = self._reader.gets()\n # if the response is a ConnectionError or the response is a list and\n # the first item is a ConnectionError, raise it as something bad\n # happened\n if isinstance(response, ConnectionError):\n raise response\n elif (\n isinstance(response, list)\n and response\n and isinstance(response[0], ConnectionError)\n ):\n raise response[0]\n return response\n\n\nclass _AsyncHiredisParser(AsyncBaseParser):\n \"\"\"Async implementation of parser class for connections using Hiredis\"\"\"\n\n __slots__ = (\"_reader\",)\n\n def __init__(self, socket_read_size: int):\n if not HIREDIS_AVAILABLE:\n raise RedisError(\"Hiredis is not available.\")\n super().__init__(socket_read_size=socket_read_size)\n self._reader = None\n\n def on_connect(self, connection):\n import hiredis\n\n self._stream = connection._reader\n kwargs: _HiredisReaderArgs = {\n \"protocolError\": InvalidResponse,\n \"replyError\": self.parse_error,\n }\n if connection.encoder.decode_responses:\n kwargs[\"encoding\"] = connection.encoder.encoding\n kwargs[\"errors\"] = connection.encoder.encoding_errors\n\n self._reader = hiredis.Reader(**kwargs)\n self._connected = True\n\n def on_disconnect(self):\n self._connected = False\n\n async def can_read_destructive(self):\n if not self._connected:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n if self._reader.gets():\n return True\n try:\n async with async_timeout(0):\n return await self.read_from_socket()\n except asyncio.TimeoutError:\n return False\n\n async def read_from_socket(self):\n buffer = await self._stream.read(self._read_size)\n if not buffer or not isinstance(buffer, bytes):\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None\n self._reader.feed(buffer)\n # data was read from the socket and added to the buffer.\n # return True to indicate that data was read.\n return True\n\n async def read_response(\n self, disable_decoding: bool = False\n ) -> Union[EncodableT, List[EncodableT]]:\n # If `on_disconnect()` has been called, prohibit any more reads\n # even if they could happen because data might be present.\n # We still allow reads in progress to finish\n if not self._connected:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None\n\n response = self._reader.gets()\n while response is False:\n await self.read_from_socket()\n response = self._reader.gets()\n\n # if the response is a ConnectionError or the response is a list and\n # the first item is a ConnectionError, raise it as something bad\n # happened\n if isinstance(response, ConnectionError):\n raise response\n elif (\n isinstance(response, list)\n and response\n and isinstance(response[0], ConnectionError)\n ):\n raise response[0]\n return response\n", "path": "redis/_parsers/hiredis.py"}]}
2,910
211
gh_patches_debug_2453
rasdani/github-patches
git_diff
cupy__cupy-3468
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove mock from test requirements? I'm learning how to write mock tests, and I noticed things like `import mock` are workarounds to support PY27 and older PY3. Since CuPy now support PY35+ only and `mock` becomes part of the standard Python library, I suppose this line is no longer needed: https://github.com/cupy/cupy/blob/74dcb4172578a0771e06f4e44b10b5f73f68fb59/setup.py#L39 and all `import mock` can be replaced by `from unittest import mock`? </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 import os 4 from setuptools import setup, find_packages 5 import sys 6 7 import cupy_setup_build 8 9 10 if sys.version_info[:3] == (3, 5, 0): 11 if not int(os.getenv('CUPY_PYTHON_350_FORCE', '0')): 12 msg = """ 13 CuPy does not work with Python 3.5.0. 14 15 We strongly recommend to use another version of Python. 16 If you want to use CuPy with Python 3.5.0 at your own risk, 17 set 1 to CUPY_PYTHON_350_FORCE environment variable.""" 18 print(msg) 19 sys.exit(1) 20 21 22 requirements = { 23 'setup': [ 24 'fastrlock>=0.3', 25 ], 26 'install': [ 27 'numpy>=1.15', 28 'fastrlock>=0.3', 29 ], 30 'stylecheck': [ 31 'autopep8==1.3.5', 32 'flake8==3.5.0', 33 'pbr==4.0.4', 34 'pycodestyle==2.3.1', 35 ], 36 'test': [ 37 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI. 38 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0 39 'mock', 40 ], 41 'doctest': [ 42 'matplotlib', 43 'optuna', 44 ], 45 'docs': [ 46 'sphinx==3.0.4', 47 'sphinx_rtd_theme', 48 ], 49 'travis': [ 50 '-r stylecheck', 51 '-r docs', 52 ], 53 'appveyor': [ 54 '-r test', 55 ], 56 'jenkins': [ 57 '-r test', 58 'pytest-timeout', 59 'pytest-cov', 60 'coveralls', 61 'codecov', 62 ], 63 } 64 65 66 def reduce_requirements(key): 67 # Resolve recursive requirements notation (-r) 68 reqs = requirements[key] 69 resolved_reqs = [] 70 for req in reqs: 71 if req.startswith('-r'): 72 depend_key = req[2:].lstrip() 73 reduce_requirements(depend_key) 74 resolved_reqs += requirements[depend_key] 75 else: 76 resolved_reqs.append(req) 77 requirements[key] = resolved_reqs 78 79 80 for k in requirements.keys(): 81 reduce_requirements(k) 82 83 84 extras_require = {k: v for k, v in requirements.items() if k != 'install'} 85 86 87 setup_requires = requirements['setup'] 88 install_requires = requirements['install'] 89 tests_require = requirements['test'] 90 91 92 package_data = { 93 'cupy': [ 94 'core/include/cupy/complex/arithmetic.h', 95 'core/include/cupy/complex/catrig.h', 96 'core/include/cupy/complex/catrigf.h', 97 'core/include/cupy/complex/ccosh.h', 98 'core/include/cupy/complex/ccoshf.h', 99 'core/include/cupy/complex/cexp.h', 100 'core/include/cupy/complex/cexpf.h', 101 'core/include/cupy/complex/clog.h', 102 'core/include/cupy/complex/clogf.h', 103 'core/include/cupy/complex/complex.h', 104 'core/include/cupy/complex/complex_inl.h', 105 'core/include/cupy/complex/cpow.h', 106 'core/include/cupy/complex/cproj.h', 107 'core/include/cupy/complex/csinh.h', 108 'core/include/cupy/complex/csinhf.h', 109 'core/include/cupy/complex/csqrt.h', 110 'core/include/cupy/complex/csqrtf.h', 111 'core/include/cupy/complex/ctanh.h', 112 'core/include/cupy/complex/ctanhf.h', 113 'core/include/cupy/complex/math_private.h', 114 'core/include/cupy/carray.cuh', 115 'core/include/cupy/complex.cuh', 116 'core/include/cupy/atomics.cuh', 117 'core/include/cupy/cuComplex_bridge.h', 118 'core/include/cupy/_cuda/cuda-*/*.h', 119 'core/include/cupy/_cuda/cuda-*/*.hpp', 120 'cuda/cupy_thrust.cu', 121 ], 122 } 123 124 package_data['cupy'] += cupy_setup_build.prepare_wheel_libs() 125 126 package_name = cupy_setup_build.get_package_name() 127 long_description = cupy_setup_build.get_long_description() 128 ext_modules = cupy_setup_build.get_ext_modules() 129 build_ext = cupy_setup_build.custom_build_ext 130 sdist = cupy_setup_build.sdist_with_cython 131 132 here = os.path.abspath(os.path.dirname(__file__)) 133 # Get __version__ variable 134 exec(open(os.path.join(here, 'cupy', '_version.py')).read()) 135 136 CLASSIFIERS = """\ 137 Development Status :: 5 - Production/Stable 138 Intended Audience :: Science/Research 139 Intended Audience :: Developers 140 License :: OSI Approved :: MIT License 141 Programming Language :: Python 142 Programming Language :: Python :: 3 143 Programming Language :: Python :: 3.5 144 Programming Language :: Python :: 3.6 145 Programming Language :: Python :: 3.7 146 Programming Language :: Python :: 3 :: Only 147 Programming Language :: Cython 148 Topic :: Software Development 149 Topic :: Scientific/Engineering 150 Operating System :: Microsoft :: Windows 151 Operating System :: POSIX 152 Operating System :: MacOS 153 """ 154 155 156 setup( 157 name=package_name, 158 version=__version__, # NOQA 159 description='CuPy: NumPy-like API accelerated with CUDA', 160 long_description=long_description, 161 author='Seiya Tokui', 162 author_email='[email protected]', 163 url='https://cupy.chainer.org/', 164 license='MIT License', 165 project_urls={ 166 "Bug Tracker": "https://github.com/cupy/cupy/issues", 167 "Documentation": "https://docs-cupy.chainer.org/", 168 "Source Code": "https://github.com/cupy/cupy", 169 }, 170 classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], 171 packages=find_packages(exclude=['install', 'tests']), 172 package_data=package_data, 173 zip_safe=False, 174 python_requires='>=3.5.0', 175 setup_requires=setup_requires, 176 install_requires=install_requires, 177 tests_require=tests_require, 178 extras_require=extras_require, 179 ext_modules=ext_modules, 180 cmdclass={'build_ext': build_ext, 181 'sdist': sdist}, 182 ) 183 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -36,7 +36,6 @@ 'test': [ 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI. 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0 - 'mock', ], 'doctest': [ 'matplotlib',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -36,7 +36,6 @@\n 'test': [\n 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.\n 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0\n- 'mock',\n ],\n 'doctest': [\n 'matplotlib',\n", "issue": "Remove mock from test requirements?\nI'm learning how to write mock tests, and I noticed things like `import mock` are workarounds to support PY27 and older PY3. Since CuPy now support PY35+ only and `mock` becomes part of the standard Python library, I suppose this line is no longer needed:\r\nhttps://github.com/cupy/cupy/blob/74dcb4172578a0771e06f4e44b10b5f73f68fb59/setup.py#L39\r\nand all `import mock` can be replaced by `from unittest import mock`?\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nimport cupy_setup_build\n\n\nif sys.version_info[:3] == (3, 5, 0):\n if not int(os.getenv('CUPY_PYTHON_350_FORCE', '0')):\n msg = \"\"\"\nCuPy does not work with Python 3.5.0.\n\nWe strongly recommend to use another version of Python.\nIf you want to use CuPy with Python 3.5.0 at your own risk,\nset 1 to CUPY_PYTHON_350_FORCE environment variable.\"\"\"\n print(msg)\n sys.exit(1)\n\n\nrequirements = {\n 'setup': [\n 'fastrlock>=0.3',\n ],\n 'install': [\n 'numpy>=1.15',\n 'fastrlock>=0.3',\n ],\n 'stylecheck': [\n 'autopep8==1.3.5',\n 'flake8==3.5.0',\n 'pbr==4.0.4',\n 'pycodestyle==2.3.1',\n ],\n 'test': [\n 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.\n 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0\n 'mock',\n ],\n 'doctest': [\n 'matplotlib',\n 'optuna',\n ],\n 'docs': [\n 'sphinx==3.0.4',\n 'sphinx_rtd_theme',\n ],\n 'travis': [\n '-r stylecheck',\n '-r docs',\n ],\n 'appveyor': [\n '-r test',\n ],\n 'jenkins': [\n '-r test',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n\npackage_data = {\n 'cupy': [\n 'core/include/cupy/complex/arithmetic.h',\n 'core/include/cupy/complex/catrig.h',\n 'core/include/cupy/complex/catrigf.h',\n 'core/include/cupy/complex/ccosh.h',\n 'core/include/cupy/complex/ccoshf.h',\n 'core/include/cupy/complex/cexp.h',\n 'core/include/cupy/complex/cexpf.h',\n 'core/include/cupy/complex/clog.h',\n 'core/include/cupy/complex/clogf.h',\n 'core/include/cupy/complex/complex.h',\n 'core/include/cupy/complex/complex_inl.h',\n 'core/include/cupy/complex/cpow.h',\n 'core/include/cupy/complex/cproj.h',\n 'core/include/cupy/complex/csinh.h',\n 'core/include/cupy/complex/csinhf.h',\n 'core/include/cupy/complex/csqrt.h',\n 'core/include/cupy/complex/csqrtf.h',\n 'core/include/cupy/complex/ctanh.h',\n 'core/include/cupy/complex/ctanhf.h',\n 'core/include/cupy/complex/math_private.h',\n 'core/include/cupy/carray.cuh',\n 'core/include/cupy/complex.cuh',\n 'core/include/cupy/atomics.cuh',\n 'core/include/cupy/cuComplex_bridge.h',\n 'core/include/cupy/_cuda/cuda-*/*.h',\n 'core/include/cupy/_cuda/cuda-*/*.hpp',\n 'cuda/cupy_thrust.cu',\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\nsdist = cupy_setup_build.sdist_with_cython\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nexec(open(os.path.join(here, 'cupy', '_version.py')).read())\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.5\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: Microsoft :: Windows\nOperating System :: POSIX\nOperating System :: MacOS\n\"\"\"\n\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy-like API accelerated with CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://cupy.chainer.org/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs-cupy.chainer.org/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.5.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext,\n 'sdist': sdist},\n)\n", "path": "setup.py"}]}
2,516
115
gh_patches_debug_17298
rasdani/github-patches
git_diff
pytorch__TensorRT-2080
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 🐛 [Bug] Encountered TRT Error in Dynamo compile path ## Bug Description When compiling GPT-2 with Dynamo compile, the following error is encountered: ```python [07/01/2023-00:07:12] [TRT] [E] 3: [executionContext.cpp::enqueueInternal::795] Error Code 3: API Usage Error (Parameter check failed at: runtime/api/executionContext.cpp::enqueueInternal::795, condition: bindings[x] || nullBindingOK ) ``` Additionally, this does not seem to cause failures in the Dynamo runtime (not caught by `pass_through_build_failures`), and it appears both with and without the experimental runtime. ## To Reproduce ```python model = GPT2Model.from_pretrained("gpt2").eval().cuda() input_ids = torch.randint(0, 2, (1, 14), dtype=torch.int32).cuda() attention_mask = torch.randint(0, 2, (1, 14), dtype=torch.int32).cuda() traced = transformers_trace(model, input_names=["input_ids", "attention_mask"]).eval().cuda() fx_trt_model = torch_tensorrt.compile(traced, ir="dynamo_compile", inputs=[input_ids, attention_mask], debug=True, pass_through_build_failures=True, min_block_size=10) ``` ## Expected behavior The model should not encounter TRT errors while compiling. ## Environment - Torch-TensorRT Version (e.g. 1.0.0): 2844630 - PyTorch Version (e.g. 1.0): `2.1.0.dev20230620+cu118` - TensorRT Version: `8.6.1` ## Additional context This error no longer appears when we apply the `@fake_tensor_unsupported` flag to the backends, as was removed in #1955. It is unclear whether this is the direct _cause_ of the bug, however. Additionally, this bug seems to appear only sometimes, and not consistently. </issue> <code> [start of py/torch_tensorrt/dynamo/backend/backends.py] 1 import logging 2 from typing import Sequence 3 import torch 4 from functools import partial 5 import torch._dynamo as td 6 7 from torch_tensorrt.dynamo.backend._settings import CompilationSettings 8 from torch_tensorrt.dynamo.backend.lowering._decompositions import ( 9 get_decompositions, 10 ) 11 from torch_tensorrt.dynamo.backend.lowering._pre_aot_lowering import ( 12 pre_aot_substitutions, 13 ) 14 from torch_tensorrt.dynamo.backend.lowering._partition import ( 15 partition, 16 get_submod_inputs, 17 ) 18 from torch_tensorrt.dynamo.backend.utils import parse_dynamo_kwargs 19 from torch_tensorrt.dynamo.backend.conversion import convert_module 20 21 from torch._functorch.aot_autograd import aot_module_simplified, make_boxed_compiler 22 23 24 logger = logging.getLogger(__name__) 25 26 27 @td.register_backend(name="torch_tensorrt") 28 def torch_tensorrt_backend( 29 gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs 30 ): 31 DEFAULT_BACKEND = aot_torch_tensorrt_aten_backend 32 33 return DEFAULT_BACKEND(gm, sample_inputs, **kwargs) 34 35 36 @td.register_backend(name="aot_torch_tensorrt_aten") 37 def aot_torch_tensorrt_aten_backend( 38 gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs 39 ): 40 settings = parse_dynamo_kwargs(kwargs) 41 42 custom_backend = partial( 43 _pretraced_backend, 44 settings=settings, 45 ) 46 47 # Perform Pre-AOT Lowering for Module-Level Replacement 48 gm = pre_aot_substitutions(gm) 49 50 # Invoke AOTAutograd to translate operators to aten 51 return aot_module_simplified( 52 gm, 53 sample_inputs, 54 fw_compiler=make_boxed_compiler(custom_backend), 55 decompositions=get_decompositions(), 56 ) 57 58 59 def _pretraced_backend( 60 gm: torch.fx.GraphModule, 61 sample_inputs: Sequence[torch.Tensor], 62 settings: CompilationSettings = CompilationSettings(), 63 ): 64 """Helper function to manage translation of traced FX module to TRT engines 65 66 Args: 67 module: FX GraphModule to convert 68 inputs: Inputs to the module 69 settings: Compilation settings 70 Returns: 71 Compiled FX GraphModule 72 """ 73 try: 74 logger.debug("Post-AOT Autograd graph:\n" + str(gm.graph)) 75 76 trt_compiled = _compile_module( 77 gm, 78 sample_inputs, 79 settings=settings, 80 ) 81 return trt_compiled 82 except: 83 if not settings.pass_through_build_failures: 84 logger.warning( 85 "TRT conversion failed on the subgraph. See trace above. " 86 + "Returning GraphModule forward instead.", 87 exc_info=True, 88 ) 89 return gm.forward 90 else: 91 raise AssertionError( 92 "Halting compilation on build failure since " 93 + "pass_through_build_failures was specified as True. " 94 + "To return the default Torch implementation and avoid " 95 + "halting compilation on engine build failures, " 96 + "specify pass_through_build_failures=False." 97 ) 98 99 100 def _compile_module( 101 gm: torch.fx.GraphModule, 102 sample_inputs: Sequence[torch.Tensor], 103 settings: CompilationSettings = CompilationSettings(), 104 ) -> torch.fx.GraphModule: 105 """Compile a traced FX module 106 107 Includes: Partitioning + Conversion Phases 108 109 Args: 110 module: FX GraphModule to convert 111 inputs: Inputs to the module 112 settings: Compilation settings 113 Returns: 114 Compiled FX GraphModule 115 """ 116 # Partition module into components that can be TRT-accelerated 117 partitioned_module = partition( 118 gm, 119 verbose=settings.debug, 120 min_block_size=settings.min_block_size, 121 torch_executed_ops=settings.torch_executed_ops, 122 ) 123 124 # Iterate over all components that can be accelerated 125 # Generate the corresponding TRT Module for those 126 for name, _ in partitioned_module.named_children(): 127 submodule = getattr(partitioned_module, name) 128 129 # Get submodule inputs 130 submodule_inputs = get_submod_inputs( 131 partitioned_module, submodule, sample_inputs 132 ) 133 134 # Create TRT Module from submodule 135 trt_mod = convert_module( 136 submodule, 137 submodule_inputs, 138 settings=settings, 139 ) 140 141 # Replace FX Module with TRT Module 142 setattr(partitioned_module, name, trt_mod) 143 144 return partitioned_module 145 [end of py/torch_tensorrt/dynamo/backend/backends.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/py/torch_tensorrt/dynamo/backend/backends.py b/py/torch_tensorrt/dynamo/backend/backends.py --- a/py/torch_tensorrt/dynamo/backend/backends.py +++ b/py/torch_tensorrt/dynamo/backend/backends.py @@ -121,6 +121,9 @@ torch_executed_ops=settings.torch_executed_ops, ) + # Store TRT replicas of Torch subgraphs + trt_modules = {} + # Iterate over all components that can be accelerated # Generate the corresponding TRT Module for those for name, _ in partitioned_module.named_children(): @@ -138,7 +141,10 @@ settings=settings, ) - # Replace FX Module with TRT Module + trt_modules[name] = trt_mod + + # Replace all FX Modules with TRT Modules + for name, trt_mod in trt_modules.items(): setattr(partitioned_module, name, trt_mod) return partitioned_module
{"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/backend/backends.py b/py/torch_tensorrt/dynamo/backend/backends.py\n--- a/py/torch_tensorrt/dynamo/backend/backends.py\n+++ b/py/torch_tensorrt/dynamo/backend/backends.py\n@@ -121,6 +121,9 @@\n torch_executed_ops=settings.torch_executed_ops,\n )\n \n+ # Store TRT replicas of Torch subgraphs\n+ trt_modules = {}\n+\n # Iterate over all components that can be accelerated\n # Generate the corresponding TRT Module for those\n for name, _ in partitioned_module.named_children():\n@@ -138,7 +141,10 @@\n settings=settings,\n )\n \n- # Replace FX Module with TRT Module\n+ trt_modules[name] = trt_mod\n+\n+ # Replace all FX Modules with TRT Modules\n+ for name, trt_mod in trt_modules.items():\n setattr(partitioned_module, name, trt_mod)\n \n return partitioned_module\n", "issue": "\ud83d\udc1b [Bug] Encountered TRT Error in Dynamo compile path\n## Bug Description\r\nWhen compiling GPT-2 with Dynamo compile, the following error is encountered:\r\n```python\r\n[07/01/2023-00:07:12] [TRT] [E] 3: [executionContext.cpp::enqueueInternal::795] Error Code 3: API Usage Error (Parameter check failed at: runtime/api/executionContext.cpp::enqueueInternal::795, condition: bindings[x] || nullBindingOK\r\n)\r\n```\r\nAdditionally, this does not seem to cause failures in the Dynamo runtime (not caught by `pass_through_build_failures`), and it appears both with and without the experimental runtime.\r\n\r\n## To Reproduce\r\n```python\r\nmodel = GPT2Model.from_pretrained(\"gpt2\").eval().cuda()\r\ninput_ids = torch.randint(0, 2, (1, 14), dtype=torch.int32).cuda()\r\nattention_mask = torch.randint(0, 2, (1, 14), dtype=torch.int32).cuda()\r\ntraced = transformers_trace(model, input_names=[\"input_ids\", \"attention_mask\"]).eval().cuda()\r\nfx_trt_model = torch_tensorrt.compile(traced, ir=\"dynamo_compile\", inputs=[input_ids, attention_mask], debug=True, pass_through_build_failures=True, min_block_size=10)\r\n```\r\n\r\n## Expected behavior\r\n\r\nThe model should not encounter TRT errors while compiling.\r\n\r\n## Environment\r\n\r\n - Torch-TensorRT Version (e.g. 1.0.0): 2844630\r\n - PyTorch Version (e.g. 1.0): `2.1.0.dev20230620+cu118`\r\n - TensorRT Version: `8.6.1`\r\n\r\n## Additional context\r\n\r\nThis error no longer appears when we apply the `@fake_tensor_unsupported` flag to the backends, as was removed in #1955. It is unclear whether this is the direct _cause_ of the bug, however.\r\n\r\nAdditionally, this bug seems to appear only sometimes, and not consistently.\n", "before_files": [{"content": "import logging\nfrom typing import Sequence\nimport torch\nfrom functools import partial\nimport torch._dynamo as td\n\nfrom torch_tensorrt.dynamo.backend._settings import CompilationSettings\nfrom torch_tensorrt.dynamo.backend.lowering._decompositions import (\n get_decompositions,\n)\nfrom torch_tensorrt.dynamo.backend.lowering._pre_aot_lowering import (\n pre_aot_substitutions,\n)\nfrom torch_tensorrt.dynamo.backend.lowering._partition import (\n partition,\n get_submod_inputs,\n)\nfrom torch_tensorrt.dynamo.backend.utils import parse_dynamo_kwargs\nfrom torch_tensorrt.dynamo.backend.conversion import convert_module\n\nfrom torch._functorch.aot_autograd import aot_module_simplified, make_boxed_compiler\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]_backend(name=\"torch_tensorrt\")\ndef torch_tensorrt_backend(\n gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs\n):\n DEFAULT_BACKEND = aot_torch_tensorrt_aten_backend\n\n return DEFAULT_BACKEND(gm, sample_inputs, **kwargs)\n\n\[email protected]_backend(name=\"aot_torch_tensorrt_aten\")\ndef aot_torch_tensorrt_aten_backend(\n gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs\n):\n settings = parse_dynamo_kwargs(kwargs)\n\n custom_backend = partial(\n _pretraced_backend,\n settings=settings,\n )\n\n # Perform Pre-AOT Lowering for Module-Level Replacement\n gm = pre_aot_substitutions(gm)\n\n # Invoke AOTAutograd to translate operators to aten\n return aot_module_simplified(\n gm,\n sample_inputs,\n fw_compiler=make_boxed_compiler(custom_backend),\n decompositions=get_decompositions(),\n )\n\n\ndef _pretraced_backend(\n gm: torch.fx.GraphModule,\n sample_inputs: Sequence[torch.Tensor],\n settings: CompilationSettings = CompilationSettings(),\n):\n \"\"\"Helper function to manage translation of traced FX module to TRT engines\n\n Args:\n module: FX GraphModule to convert\n inputs: Inputs to the module\n settings: Compilation settings\n Returns:\n Compiled FX GraphModule\n \"\"\"\n try:\n logger.debug(\"Post-AOT Autograd graph:\\n\" + str(gm.graph))\n\n trt_compiled = _compile_module(\n gm,\n sample_inputs,\n settings=settings,\n )\n return trt_compiled\n except:\n if not settings.pass_through_build_failures:\n logger.warning(\n \"TRT conversion failed on the subgraph. See trace above. \"\n + \"Returning GraphModule forward instead.\",\n exc_info=True,\n )\n return gm.forward\n else:\n raise AssertionError(\n \"Halting compilation on build failure since \"\n + \"pass_through_build_failures was specified as True. \"\n + \"To return the default Torch implementation and avoid \"\n + \"halting compilation on engine build failures, \"\n + \"specify pass_through_build_failures=False.\"\n )\n\n\ndef _compile_module(\n gm: torch.fx.GraphModule,\n sample_inputs: Sequence[torch.Tensor],\n settings: CompilationSettings = CompilationSettings(),\n) -> torch.fx.GraphModule:\n \"\"\"Compile a traced FX module\n\n Includes: Partitioning + Conversion Phases\n\n Args:\n module: FX GraphModule to convert\n inputs: Inputs to the module\n settings: Compilation settings\n Returns:\n Compiled FX GraphModule\n \"\"\"\n # Partition module into components that can be TRT-accelerated\n partitioned_module = partition(\n gm,\n verbose=settings.debug,\n min_block_size=settings.min_block_size,\n torch_executed_ops=settings.torch_executed_ops,\n )\n\n # Iterate over all components that can be accelerated\n # Generate the corresponding TRT Module for those\n for name, _ in partitioned_module.named_children():\n submodule = getattr(partitioned_module, name)\n\n # Get submodule inputs\n submodule_inputs = get_submod_inputs(\n partitioned_module, submodule, sample_inputs\n )\n\n # Create TRT Module from submodule\n trt_mod = convert_module(\n submodule,\n submodule_inputs,\n settings=settings,\n )\n\n # Replace FX Module with TRT Module\n setattr(partitioned_module, name, trt_mod)\n\n return partitioned_module\n", "path": "py/torch_tensorrt/dynamo/backend/backends.py"}]}
2,276
231
gh_patches_debug_30885
rasdani/github-patches
git_diff
plotly__dash-2207
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Render arbitrary keys of components inside dictionaries. It should be possible to render arbitrary keys as component inside an object prop. Example typescript props: ```ts type Props = { dynamic: {[k: string]: JSX.Element} } ``` prop-types: `PropTypes.objectOf(PropTypes.node)` </issue> <code> [start of dash/development/_collect_nodes.py] 1 def is_node(value): 2 return value in ("node", "element") 3 4 5 def is_shape(value): 6 return value in ("shape", "exact") 7 8 9 def collect_array(a_value, base, nodes): 10 a_type = a_value["name"] 11 if is_node(a_type): 12 nodes.append(base) 13 elif a_type in ("shape", "exact"): 14 nodes = collect_nodes(a_value["value"], base + "[]", nodes) 15 elif a_type == "union": 16 nodes = collect_union(a_value["value"], base + "[]", nodes) 17 return nodes 18 19 20 def collect_union(type_list, base, nodes): 21 for t in type_list: 22 if is_node(t["name"]): 23 nodes.append(base) 24 elif is_shape(t["name"]): 25 nodes = collect_nodes(t["value"], base, nodes) 26 elif t["name"] == "arrayOf": 27 nodes = collect_array(t["value"], base, nodes) 28 return nodes 29 30 31 def collect_nodes(metadata, base="", nodes=None): 32 nodes = nodes or [] 33 34 for prop_name, value in metadata.items(): 35 # Support for recursive shapes, the type is directly in the field. 36 t_value = value.get("type", value) 37 p_type = t_value.get("name") 38 39 if base: 40 key = f"{base}.{prop_name}" 41 else: 42 key = prop_name 43 if is_node(p_type): 44 nodes.append(key) 45 elif p_type == "arrayOf": 46 a_value = t_value.get("value", t_value) 47 nodes = collect_array(a_value, key, nodes) 48 elif is_shape(p_type): 49 nodes = collect_nodes(t_value["value"], key, nodes) 50 elif p_type == "union": 51 nodes = collect_union(t_value["value"], key, nodes) 52 53 return nodes 54 55 56 def filter_base_nodes(nodes): 57 return [n for n in nodes if not any(e in n for e in ("[]", "."))] 58 [end of dash/development/_collect_nodes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dash/development/_collect_nodes.py b/dash/development/_collect_nodes.py --- a/dash/development/_collect_nodes.py +++ b/dash/development/_collect_nodes.py @@ -14,6 +14,8 @@ nodes = collect_nodes(a_value["value"], base + "[]", nodes) elif a_type == "union": nodes = collect_union(a_value["value"], base + "[]", nodes) + elif a_type == "objectOf": + nodes = collect_object(a_value["value"], base + "[]", nodes) return nodes @@ -25,6 +27,22 @@ nodes = collect_nodes(t["value"], base, nodes) elif t["name"] == "arrayOf": nodes = collect_array(t["value"], base, nodes) + elif t["name"] == "objectOf": + nodes = collect_object(t["value"], base, nodes) + return nodes + + +def collect_object(o_value, base, nodes): + o_name = o_value.get("name") + o_key = base + "{}" + if is_node(o_name): + nodes.append(o_key) + elif is_shape(o_name): + nodes = collect_nodes(o_value.get("value", {}), o_key, nodes) + elif o_name == "union": + nodes = collect_union(o_value.get("value"), o_key, nodes) + elif o_name == "arrayOf": + nodes = collect_array(o_value, o_key, nodes) return nodes @@ -49,9 +67,12 @@ nodes = collect_nodes(t_value["value"], key, nodes) elif p_type == "union": nodes = collect_union(t_value["value"], key, nodes) + elif p_type == "objectOf": + o_value = t_value.get("value", {}) + nodes = collect_object(o_value, key, nodes) return nodes def filter_base_nodes(nodes): - return [n for n in nodes if not any(e in n for e in ("[]", "."))] + return [n for n in nodes if not any(e in n for e in ("[]", ".", "{}"))]
{"golden_diff": "diff --git a/dash/development/_collect_nodes.py b/dash/development/_collect_nodes.py\n--- a/dash/development/_collect_nodes.py\n+++ b/dash/development/_collect_nodes.py\n@@ -14,6 +14,8 @@\n nodes = collect_nodes(a_value[\"value\"], base + \"[]\", nodes)\n elif a_type == \"union\":\n nodes = collect_union(a_value[\"value\"], base + \"[]\", nodes)\n+ elif a_type == \"objectOf\":\n+ nodes = collect_object(a_value[\"value\"], base + \"[]\", nodes)\n return nodes\n \n \n@@ -25,6 +27,22 @@\n nodes = collect_nodes(t[\"value\"], base, nodes)\n elif t[\"name\"] == \"arrayOf\":\n nodes = collect_array(t[\"value\"], base, nodes)\n+ elif t[\"name\"] == \"objectOf\":\n+ nodes = collect_object(t[\"value\"], base, nodes)\n+ return nodes\n+\n+\n+def collect_object(o_value, base, nodes):\n+ o_name = o_value.get(\"name\")\n+ o_key = base + \"{}\"\n+ if is_node(o_name):\n+ nodes.append(o_key)\n+ elif is_shape(o_name):\n+ nodes = collect_nodes(o_value.get(\"value\", {}), o_key, nodes)\n+ elif o_name == \"union\":\n+ nodes = collect_union(o_value.get(\"value\"), o_key, nodes)\n+ elif o_name == \"arrayOf\":\n+ nodes = collect_array(o_value, o_key, nodes)\n return nodes\n \n \n@@ -49,9 +67,12 @@\n nodes = collect_nodes(t_value[\"value\"], key, nodes)\n elif p_type == \"union\":\n nodes = collect_union(t_value[\"value\"], key, nodes)\n+ elif p_type == \"objectOf\":\n+ o_value = t_value.get(\"value\", {})\n+ nodes = collect_object(o_value, key, nodes)\n \n return nodes\n \n \n def filter_base_nodes(nodes):\n- return [n for n in nodes if not any(e in n for e in (\"[]\", \".\"))]\n+ return [n for n in nodes if not any(e in n for e in (\"[]\", \".\", \"{}\"))]\n", "issue": "Render arbitrary keys of components inside dictionaries.\nIt should be possible to render arbitrary keys as component inside an object prop.\r\n\r\nExample typescript props:\r\n```ts\r\ntype Props = {\r\n dynamic: {[k: string]: JSX.Element}\r\n}\r\n```\r\nprop-types: `PropTypes.objectOf(PropTypes.node)` \n", "before_files": [{"content": "def is_node(value):\n return value in (\"node\", \"element\")\n\n\ndef is_shape(value):\n return value in (\"shape\", \"exact\")\n\n\ndef collect_array(a_value, base, nodes):\n a_type = a_value[\"name\"]\n if is_node(a_type):\n nodes.append(base)\n elif a_type in (\"shape\", \"exact\"):\n nodes = collect_nodes(a_value[\"value\"], base + \"[]\", nodes)\n elif a_type == \"union\":\n nodes = collect_union(a_value[\"value\"], base + \"[]\", nodes)\n return nodes\n\n\ndef collect_union(type_list, base, nodes):\n for t in type_list:\n if is_node(t[\"name\"]):\n nodes.append(base)\n elif is_shape(t[\"name\"]):\n nodes = collect_nodes(t[\"value\"], base, nodes)\n elif t[\"name\"] == \"arrayOf\":\n nodes = collect_array(t[\"value\"], base, nodes)\n return nodes\n\n\ndef collect_nodes(metadata, base=\"\", nodes=None):\n nodes = nodes or []\n\n for prop_name, value in metadata.items():\n # Support for recursive shapes, the type is directly in the field.\n t_value = value.get(\"type\", value)\n p_type = t_value.get(\"name\")\n\n if base:\n key = f\"{base}.{prop_name}\"\n else:\n key = prop_name\n if is_node(p_type):\n nodes.append(key)\n elif p_type == \"arrayOf\":\n a_value = t_value.get(\"value\", t_value)\n nodes = collect_array(a_value, key, nodes)\n elif is_shape(p_type):\n nodes = collect_nodes(t_value[\"value\"], key, nodes)\n elif p_type == \"union\":\n nodes = collect_union(t_value[\"value\"], key, nodes)\n\n return nodes\n\n\ndef filter_base_nodes(nodes):\n return [n for n in nodes if not any(e in n for e in (\"[]\", \".\"))]\n", "path": "dash/development/_collect_nodes.py"}]}
1,125
486
gh_patches_debug_1509
rasdani/github-patches
git_diff
searx__searx-1483
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> wolframalpha engine is broken The wolframalpha engine appears to be broken in 0.15.0 ``` searx-run[9330]: ERROR:searx.search:engine wolframalpha : exception : Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration. searx-run[9330]: Traceback (most recent call last): searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py", line 104, in search_one_request_safe searx-run[9330]: search_results = search_one_request(engine, query, request_params) searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py", line 87, in search_one_request searx-run[9330]: return engine.response(response) searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/engines/wolframalpha_api.py", line 68, in response searx-run[9330]: search_results = etree.XML(resp.text) searx-run[9330]: File "src/lxml/etree.pyx", line 3192, in lxml.etree.XML searx-run[9330]: File "src/lxml/parser.pxi", line 1872, in lxml.etree._parseMemoryDocument searx-run[9330]: ValueError: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration. ``` wolframalpha engine is broken The wolframalpha engine appears to be broken in 0.15.0 ``` searx-run[9330]: ERROR:searx.search:engine wolframalpha : exception : Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration. searx-run[9330]: Traceback (most recent call last): searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py", line 104, in search_one_request_safe searx-run[9330]: search_results = search_one_request(engine, query, request_params) searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py", line 87, in search_one_request searx-run[9330]: return engine.response(response) searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/engines/wolframalpha_api.py", line 68, in response searx-run[9330]: search_results = etree.XML(resp.text) searx-run[9330]: File "src/lxml/etree.pyx", line 3192, in lxml.etree.XML searx-run[9330]: File "src/lxml/parser.pxi", line 1872, in lxml.etree._parseMemoryDocument searx-run[9330]: ValueError: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration. ``` </issue> <code> [start of searx/engines/wolframalpha_api.py] 1 # Wolfram Alpha (Science) 2 # 3 # @website https://www.wolframalpha.com 4 # @provide-api yes (https://api.wolframalpha.com/v2/) 5 # 6 # @using-api yes 7 # @results XML 8 # @stable yes 9 # @parse url, infobox 10 11 from lxml import etree 12 from searx.url_utils import urlencode 13 14 # search-url 15 search_url = 'https://api.wolframalpha.com/v2/query?appid={api_key}&{query}' 16 site_url = 'https://www.wolframalpha.com/input/?{query}' 17 api_key = '' # defined in settings.yml 18 19 # xpath variables 20 failure_xpath = '/queryresult[attribute::success="false"]' 21 input_xpath = '//pod[starts-with(attribute::id, "Input")]/subpod/plaintext' 22 pods_xpath = '//pod' 23 subpods_xpath = './subpod' 24 pod_primary_xpath = './@primary' 25 pod_id_xpath = './@id' 26 pod_title_xpath = './@title' 27 plaintext_xpath = './plaintext' 28 image_xpath = './img' 29 img_src_xpath = './@src' 30 img_alt_xpath = './@alt' 31 32 # pods to display as image in infobox 33 # this pods do return a plaintext, but they look better and are more useful as images 34 image_pods = {'VisualRepresentation', 35 'Illustration'} 36 37 38 # do search-request 39 def request(query, params): 40 params['url'] = search_url.format(query=urlencode({'input': query}), api_key=api_key) 41 params['headers']['Referer'] = site_url.format(query=urlencode({'i': query})) 42 43 return params 44 45 46 # replace private user area characters to make text legible 47 def replace_pua_chars(text): 48 pua_chars = {u'\uf522': u'\u2192', # rigth arrow 49 u'\uf7b1': u'\u2115', # set of natural numbers 50 u'\uf7b4': u'\u211a', # set of rational numbers 51 u'\uf7b5': u'\u211d', # set of real numbers 52 u'\uf7bd': u'\u2124', # set of integer numbers 53 u'\uf74c': 'd', # differential 54 u'\uf74d': u'\u212f', # euler's number 55 u'\uf74e': 'i', # imaginary number 56 u'\uf7d9': '='} # equals sign 57 58 for k, v in pua_chars.items(): 59 text = text.replace(k, v) 60 61 return text 62 63 64 # get response from search-request 65 def response(resp): 66 results = [] 67 68 search_results = etree.XML(resp.text) 69 70 # return empty array if there are no results 71 if search_results.xpath(failure_xpath): 72 return [] 73 74 try: 75 infobox_title = search_results.xpath(input_xpath)[0].text 76 except: 77 infobox_title = "" 78 79 pods = search_results.xpath(pods_xpath) 80 result_chunks = [] 81 result_content = "" 82 for pod in pods: 83 pod_id = pod.xpath(pod_id_xpath)[0] 84 pod_title = pod.xpath(pod_title_xpath)[0] 85 pod_is_result = pod.xpath(pod_primary_xpath) 86 87 subpods = pod.xpath(subpods_xpath) 88 if not subpods: 89 continue 90 91 # Appends either a text or an image, depending on which one is more suitable 92 for subpod in subpods: 93 content = subpod.xpath(plaintext_xpath)[0].text 94 image = subpod.xpath(image_xpath) 95 96 if content and pod_id not in image_pods: 97 98 if pod_is_result or not result_content: 99 if pod_id != "Input": 100 result_content = "%s: %s" % (pod_title, content) 101 102 # if no input pod was found, title is first plaintext pod 103 if not infobox_title: 104 infobox_title = content 105 106 content = replace_pua_chars(content) 107 result_chunks.append({'label': pod_title, 'value': content}) 108 109 elif image: 110 result_chunks.append({'label': pod_title, 111 'image': {'src': image[0].xpath(img_src_xpath)[0], 112 'alt': image[0].xpath(img_alt_xpath)[0]}}) 113 114 if not result_chunks: 115 return [] 116 117 title = "Wolfram|Alpha (%s)" % infobox_title 118 119 # append infobox 120 results.append({'infobox': infobox_title, 121 'attributes': result_chunks, 122 'urls': [{'title': 'Wolfram|Alpha', 'url': resp.request.headers['Referer']}]}) 123 124 # append link to site 125 results.append({'url': resp.request.headers['Referer'], 126 'title': title, 127 'content': result_content}) 128 129 return results 130 [end of searx/engines/wolframalpha_api.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/searx/engines/wolframalpha_api.py b/searx/engines/wolframalpha_api.py --- a/searx/engines/wolframalpha_api.py +++ b/searx/engines/wolframalpha_api.py @@ -65,7 +65,7 @@ def response(resp): results = [] - search_results = etree.XML(resp.text) + search_results = etree.XML(resp.content) # return empty array if there are no results if search_results.xpath(failure_xpath):
{"golden_diff": "diff --git a/searx/engines/wolframalpha_api.py b/searx/engines/wolframalpha_api.py\n--- a/searx/engines/wolframalpha_api.py\n+++ b/searx/engines/wolframalpha_api.py\n@@ -65,7 +65,7 @@\n def response(resp):\n results = []\n \n- search_results = etree.XML(resp.text)\n+ search_results = etree.XML(resp.content)\n \n # return empty array if there are no results\n if search_results.xpath(failure_xpath):\n", "issue": "wolframalpha engine is broken\nThe wolframalpha engine appears to be broken in 0.15.0\r\n```\r\nsearx-run[9330]: ERROR:searx.search:engine wolframalpha : exception : Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.\r\nsearx-run[9330]: Traceback (most recent call last):\r\nsearx-run[9330]: File \"/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py\", line 104, in search_one_request_safe\r\nsearx-run[9330]: search_results = search_one_request(engine, query, request_params)\r\nsearx-run[9330]: File \"/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py\", line 87, in search_one_request\r\nsearx-run[9330]: return engine.response(response)\r\nsearx-run[9330]: File \"/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/engines/wolframalpha_api.py\", line 68, in response\r\nsearx-run[9330]: search_results = etree.XML(resp.text)\r\nsearx-run[9330]: File \"src/lxml/etree.pyx\", line 3192, in lxml.etree.XML\r\nsearx-run[9330]: File \"src/lxml/parser.pxi\", line 1872, in lxml.etree._parseMemoryDocument\r\nsearx-run[9330]: ValueError: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.\r\n```\nwolframalpha engine is broken\nThe wolframalpha engine appears to be broken in 0.15.0\r\n```\r\nsearx-run[9330]: ERROR:searx.search:engine wolframalpha : exception : Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.\r\nsearx-run[9330]: Traceback (most recent call last):\r\nsearx-run[9330]: File \"/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py\", line 104, in search_one_request_safe\r\nsearx-run[9330]: search_results = search_one_request(engine, query, request_params)\r\nsearx-run[9330]: File \"/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py\", line 87, in search_one_request\r\nsearx-run[9330]: return engine.response(response)\r\nsearx-run[9330]: File \"/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/engines/wolframalpha_api.py\", line 68, in response\r\nsearx-run[9330]: search_results = etree.XML(resp.text)\r\nsearx-run[9330]: File \"src/lxml/etree.pyx\", line 3192, in lxml.etree.XML\r\nsearx-run[9330]: File \"src/lxml/parser.pxi\", line 1872, in lxml.etree._parseMemoryDocument\r\nsearx-run[9330]: ValueError: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.\r\n```\n", "before_files": [{"content": "# Wolfram Alpha (Science)\n#\n# @website https://www.wolframalpha.com\n# @provide-api yes (https://api.wolframalpha.com/v2/)\n#\n# @using-api yes\n# @results XML\n# @stable yes\n# @parse url, infobox\n\nfrom lxml import etree\nfrom searx.url_utils import urlencode\n\n# search-url\nsearch_url = 'https://api.wolframalpha.com/v2/query?appid={api_key}&{query}'\nsite_url = 'https://www.wolframalpha.com/input/?{query}'\napi_key = '' # defined in settings.yml\n\n# xpath variables\nfailure_xpath = '/queryresult[attribute::success=\"false\"]'\ninput_xpath = '//pod[starts-with(attribute::id, \"Input\")]/subpod/plaintext'\npods_xpath = '//pod'\nsubpods_xpath = './subpod'\npod_primary_xpath = './@primary'\npod_id_xpath = './@id'\npod_title_xpath = './@title'\nplaintext_xpath = './plaintext'\nimage_xpath = './img'\nimg_src_xpath = './@src'\nimg_alt_xpath = './@alt'\n\n# pods to display as image in infobox\n# this pods do return a plaintext, but they look better and are more useful as images\nimage_pods = {'VisualRepresentation',\n 'Illustration'}\n\n\n# do search-request\ndef request(query, params):\n params['url'] = search_url.format(query=urlencode({'input': query}), api_key=api_key)\n params['headers']['Referer'] = site_url.format(query=urlencode({'i': query}))\n\n return params\n\n\n# replace private user area characters to make text legible\ndef replace_pua_chars(text):\n pua_chars = {u'\\uf522': u'\\u2192', # rigth arrow\n u'\\uf7b1': u'\\u2115', # set of natural numbers\n u'\\uf7b4': u'\\u211a', # set of rational numbers\n u'\\uf7b5': u'\\u211d', # set of real numbers\n u'\\uf7bd': u'\\u2124', # set of integer numbers\n u'\\uf74c': 'd', # differential\n u'\\uf74d': u'\\u212f', # euler's number\n u'\\uf74e': 'i', # imaginary number\n u'\\uf7d9': '='} # equals sign\n\n for k, v in pua_chars.items():\n text = text.replace(k, v)\n\n return text\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n search_results = etree.XML(resp.text)\n\n # return empty array if there are no results\n if search_results.xpath(failure_xpath):\n return []\n\n try:\n infobox_title = search_results.xpath(input_xpath)[0].text\n except:\n infobox_title = \"\"\n\n pods = search_results.xpath(pods_xpath)\n result_chunks = []\n result_content = \"\"\n for pod in pods:\n pod_id = pod.xpath(pod_id_xpath)[0]\n pod_title = pod.xpath(pod_title_xpath)[0]\n pod_is_result = pod.xpath(pod_primary_xpath)\n\n subpods = pod.xpath(subpods_xpath)\n if not subpods:\n continue\n\n # Appends either a text or an image, depending on which one is more suitable\n for subpod in subpods:\n content = subpod.xpath(plaintext_xpath)[0].text\n image = subpod.xpath(image_xpath)\n\n if content and pod_id not in image_pods:\n\n if pod_is_result or not result_content:\n if pod_id != \"Input\":\n result_content = \"%s: %s\" % (pod_title, content)\n\n # if no input pod was found, title is first plaintext pod\n if not infobox_title:\n infobox_title = content\n\n content = replace_pua_chars(content)\n result_chunks.append({'label': pod_title, 'value': content})\n\n elif image:\n result_chunks.append({'label': pod_title,\n 'image': {'src': image[0].xpath(img_src_xpath)[0],\n 'alt': image[0].xpath(img_alt_xpath)[0]}})\n\n if not result_chunks:\n return []\n\n title = \"Wolfram|Alpha (%s)\" % infobox_title\n\n # append infobox\n results.append({'infobox': infobox_title,\n 'attributes': result_chunks,\n 'urls': [{'title': 'Wolfram|Alpha', 'url': resp.request.headers['Referer']}]})\n\n # append link to site\n results.append({'url': resp.request.headers['Referer'],\n 'title': title,\n 'content': result_content})\n\n return results\n", "path": "searx/engines/wolframalpha_api.py"}]}
2,867
122
gh_patches_debug_16417
rasdani/github-patches
git_diff
celery__kombu-101
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Kombu compression "encodes" the body ? At https://github.com/ask/kombu/blob/master/kombu/compression.py#L61, we see: ``` return encoder(body.encode("utf-8")), content_type ``` Same for decoding. What gives? It crashes with msgpack (which can't be encoded). Is there any reason for that ? </issue> <code> [start of kombu/compression.py] 1 """ 2 kombu.compression 3 ================= 4 5 Compression utilities. 6 7 :copyright: (c) 2009 - 2012 by Ask Solem. 8 :license: BSD, see LICENSE for more details. 9 10 """ 11 from __future__ import absolute_import 12 13 import zlib 14 15 _aliases = {} 16 _encoders = {} 17 _decoders = {} 18 19 __all__ = ["register", "encoders", "get_encoder", 20 "get_decoder", "compress", "decompress"] 21 22 23 def register(encoder, decoder, content_type, aliases=[]): 24 """Register new compression method. 25 26 :param encoder: Function used to compress text. 27 :param decoder: Function used to decompress previously compressed text. 28 :param content_type: The mime type this compression method identifies as. 29 :param aliases: A list of names to associate with this compression method. 30 31 """ 32 _encoders[content_type] = encoder 33 _decoders[content_type] = decoder 34 _aliases.update((alias, content_type) for alias in aliases) 35 36 37 def encoders(): 38 """Returns a list of available compression methods.""" 39 return _encoders.keys() 40 41 42 def get_encoder(t): 43 """Get encoder by alias name.""" 44 t = _aliases.get(t, t) 45 return _encoders[t], t 46 47 48 def get_decoder(t): 49 """Get decoder by alias name.""" 50 return _decoders[_aliases.get(t, t)] 51 52 53 def compress(body, content_type): 54 """Compress text. 55 56 :param body: The text to compress. 57 :param content_type: mime-type of compression method to use. 58 59 """ 60 encoder, content_type = get_encoder(content_type) 61 return encoder(body.encode("utf-8")), content_type 62 63 64 def decompress(body, content_type): 65 """Decompress compressed text. 66 67 :param body: Previously compressed text to uncompress. 68 :param content_type: mime-type of compression method used. 69 70 """ 71 return get_decoder(content_type)(body).decode("utf-8") 72 73 74 register(zlib.compress, 75 zlib.decompress, 76 "application/x-gzip", aliases=["gzip", "zlib"]) 77 try: 78 import bz2 79 except ImportError: 80 pass # Jython? 81 else: 82 register(bz2.compress, 83 bz2.decompress, 84 "application/x-bz2", aliases=["bzip2", "bzip"]) 85 [end of kombu/compression.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kombu/compression.py b/kombu/compression.py --- a/kombu/compression.py +++ b/kombu/compression.py @@ -10,6 +10,8 @@ """ from __future__ import absolute_import +from kombu.utils.encoding import ensure_bytes, bytes_to_str + import zlib _aliases = {} @@ -58,7 +60,7 @@ """ encoder, content_type = get_encoder(content_type) - return encoder(body.encode("utf-8")), content_type + return encoder(ensure_bytes(body)), content_type def decompress(body, content_type): @@ -68,7 +70,7 @@ :param content_type: mime-type of compression method used. """ - return get_decoder(content_type)(body).decode("utf-8") + return bytes_to_str(get_decoder(content_type)(body)) register(zlib.compress,
{"golden_diff": "diff --git a/kombu/compression.py b/kombu/compression.py\n--- a/kombu/compression.py\n+++ b/kombu/compression.py\n@@ -10,6 +10,8 @@\n \"\"\"\n from __future__ import absolute_import\n \n+from kombu.utils.encoding import ensure_bytes, bytes_to_str\n+\n import zlib\n \n _aliases = {}\n@@ -58,7 +60,7 @@\n \n \"\"\"\n encoder, content_type = get_encoder(content_type)\n- return encoder(body.encode(\"utf-8\")), content_type\n+ return encoder(ensure_bytes(body)), content_type\n \n \n def decompress(body, content_type):\n@@ -68,7 +70,7 @@\n :param content_type: mime-type of compression method used.\n \n \"\"\"\n- return get_decoder(content_type)(body).decode(\"utf-8\")\n+ return bytes_to_str(get_decoder(content_type)(body))\n \n \n register(zlib.compress,\n", "issue": "Kombu compression \"encodes\" the body ?\nAt https://github.com/ask/kombu/blob/master/kombu/compression.py#L61, we see:\n\n```\nreturn encoder(body.encode(\"utf-8\")), content_type\n```\n\nSame for decoding. What gives? It crashes with msgpack (which can't be encoded). Is there any reason for that ?\n\n", "before_files": [{"content": "\"\"\"\nkombu.compression\n=================\n\nCompression utilities.\n\n:copyright: (c) 2009 - 2012 by Ask Solem.\n:license: BSD, see LICENSE for more details.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport zlib\n\n_aliases = {}\n_encoders = {}\n_decoders = {}\n\n__all__ = [\"register\", \"encoders\", \"get_encoder\",\n \"get_decoder\", \"compress\", \"decompress\"]\n\n\ndef register(encoder, decoder, content_type, aliases=[]):\n \"\"\"Register new compression method.\n\n :param encoder: Function used to compress text.\n :param decoder: Function used to decompress previously compressed text.\n :param content_type: The mime type this compression method identifies as.\n :param aliases: A list of names to associate with this compression method.\n\n \"\"\"\n _encoders[content_type] = encoder\n _decoders[content_type] = decoder\n _aliases.update((alias, content_type) for alias in aliases)\n\n\ndef encoders():\n \"\"\"Returns a list of available compression methods.\"\"\"\n return _encoders.keys()\n\n\ndef get_encoder(t):\n \"\"\"Get encoder by alias name.\"\"\"\n t = _aliases.get(t, t)\n return _encoders[t], t\n\n\ndef get_decoder(t):\n \"\"\"Get decoder by alias name.\"\"\"\n return _decoders[_aliases.get(t, t)]\n\n\ndef compress(body, content_type):\n \"\"\"Compress text.\n\n :param body: The text to compress.\n :param content_type: mime-type of compression method to use.\n\n \"\"\"\n encoder, content_type = get_encoder(content_type)\n return encoder(body.encode(\"utf-8\")), content_type\n\n\ndef decompress(body, content_type):\n \"\"\"Decompress compressed text.\n\n :param body: Previously compressed text to uncompress.\n :param content_type: mime-type of compression method used.\n\n \"\"\"\n return get_decoder(content_type)(body).decode(\"utf-8\")\n\n\nregister(zlib.compress,\n zlib.decompress,\n \"application/x-gzip\", aliases=[\"gzip\", \"zlib\"])\ntry:\n import bz2\nexcept ImportError:\n pass # Jython?\nelse:\n register(bz2.compress,\n bz2.decompress,\n \"application/x-bz2\", aliases=[\"bzip2\", \"bzip\"])\n", "path": "kombu/compression.py"}]}
1,287
203
gh_patches_debug_31050
rasdani/github-patches
git_diff
litestar-org__litestar-2204
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: pydantic validations aren't being handled by DTOs ### Description In the documentation of Litestar we have: > When a value fails pydantic validation, the result will be a ValidationException with the extra key set to the pydantic validation errors. Thus, this data will be made available for the API consumers by default. But when combined with DTO's in a post request, if a pydantic validation fails the exception is not handled and the client receives an InternalServerError (500) instead of a BadRequest (400): ``` __pydantic_self__.__pydantic_validator__.validate_python(data, self_instance=__pydantic_self__) pydantic_core._pydantic_core.ValidationError: 1 validation error for User name String should have at most 2 characters [type=string_too_long, input_value='abc', input_type=str] For further information visit https://errors.pydantic.dev/2.2/v/string_too_long Status code: 500 ``` If the validation isn't handled by pydantic (or at least it isn't handled **first** by pydantic) it works, for example an Enum validation returns a proper 400 status code because `msgspec` catches it first: ```python from enum import Enum class NameEnum(str, Enum): a = "A" b = "B" # (replace the User class of the example with this) class User(BaseModel): name: NameEnum ``` Output for incorrect Enum: ``` File ".../.venv/lib64/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 191, in decode_json raise SerializationException(str(msgspec_error)) from msgspec_error litestar.exceptions.base_exceptions.SerializationException: Invalid enum value 'abc' - at `$.name` ... File ".../.venv/lib64/python3.11/site-packages/litestar/routes/http.py", line 186, in _get_response_data raise ClientException(str(e)) from e litestar.exceptions.http_exceptions.ClientException: 400: Invalid enum value 'abc' - at `$.name` ``` ### URL to code causing the issue _No response_ ### MCVE ```python from litestar import Litestar, post from litestar.contrib.pydantic import PydanticDTO from litestar.testing import TestClient from pydantic import BaseModel, Field class User(BaseModel): name: str = Field(max_length=2) UserDTO = PydanticDTO[User] @post("/user", dto=UserDTO, sync_to_thread=False) def create_user(data: User) -> User: return data with TestClient(Litestar([create_user], debug=True)) as client: response = client.post("/user", json={"name": "abc"}) print(response.text) print(f"Status code: {response.status_code}") assert response.status_code == 201 ``` ### Steps to reproduce ```bash 1. Execute the MCVE ``` ### Screenshots _No response_ ### Logs _No response_ ### Litestar Version 2.0.0rc1 ### Platform - [X] Linux - [ ] Mac - [ ] Windows - [ ] Other (Please specify in the description above) <!-- POLAR PLEDGE BADGE START --> --- ## Funding * If you would like to see an issue prioritized, make a pledge towards it! * We receive the pledge once the issue is completed & verified <a href="https://polar.sh/litestar-org/litestar/issues/2190"> <picture> <source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2190/pledge.svg?darkmode=1"> <img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2190/pledge.svg"> </picture> </a> <!-- POLAR PLEDGE BADGE END --> StaticFilesConfig and virtual directories I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems. https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32 </issue> <code> [start of litestar/contrib/pydantic/pydantic_dto_factory.py] 1 from __future__ import annotations 2 3 from dataclasses import replace 4 from typing import TYPE_CHECKING, Collection, Generic, TypeVar 5 6 from litestar.dto.base_dto import AbstractDTO 7 from litestar.dto.data_structures import DTOFieldDefinition 8 from litestar.dto.field import DTO_FIELD_META_KEY, DTOField 9 from litestar.exceptions import MissingDependencyException 10 from litestar.types.empty import Empty 11 12 if TYPE_CHECKING: 13 from typing import Generator 14 15 from litestar.typing import FieldDefinition 16 17 18 try: 19 import pydantic 20 21 if pydantic.VERSION.startswith("2"): 22 from pydantic_core import PydanticUndefined 23 else: # pragma: no cover 24 from pydantic.fields import Undefined as PydanticUndefined # type: ignore 25 except ImportError as e: 26 raise MissingDependencyException("pydantic") from e 27 28 __all__ = ("PydanticDTO",) 29 30 T = TypeVar("T", bound="pydantic.BaseModel | Collection[pydantic.BaseModel]") 31 32 33 class PydanticDTO(AbstractDTO[T], Generic[T]): 34 """Support for domain modelling with Pydantic.""" 35 36 @classmethod 37 def generate_field_definitions( 38 cls, model_type: type[pydantic.BaseModel] 39 ) -> Generator[DTOFieldDefinition, None, None]: 40 model_field_definitions = cls.get_model_type_hints(model_type) 41 42 if pydantic.VERSION.startswith("1"): # pragma: no cover 43 model_fields: dict[str, pydantic.fields.FieldInfo] = {k: model_field.field_info for k, model_field in model_type.__fields__.items()} # type: ignore 44 else: 45 model_fields = dict(model_type.model_fields) 46 47 for field_name, field_info in model_fields.items(): 48 field_definition = model_field_definitions[field_name] 49 dto_field = (field_definition.extra or {}).pop(DTO_FIELD_META_KEY, DTOField()) 50 51 if field_info.default is not PydanticUndefined: 52 default = field_info.default 53 elif field_definition.is_optional: 54 default = None 55 else: 56 default = Empty 57 58 yield replace( 59 DTOFieldDefinition.from_field_definition( 60 field_definition=field_definition, 61 dto_field=dto_field, 62 model_name=model_type.__name__, 63 default_factory=field_info.default_factory 64 if field_info.default_factory and field_info.default_factory is not PydanticUndefined # type: ignore[comparison-overlap] 65 else Empty, 66 ), 67 default=default, 68 name=field_name, 69 ) 70 71 @classmethod 72 def detect_nested_field(cls, field_definition: FieldDefinition) -> bool: 73 return field_definition.is_subclass_of(pydantic.BaseModel) 74 [end of litestar/contrib/pydantic/pydantic_dto_factory.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/litestar/contrib/pydantic/pydantic_dto_factory.py b/litestar/contrib/pydantic/pydantic_dto_factory.py --- a/litestar/contrib/pydantic/pydantic_dto_factory.py +++ b/litestar/contrib/pydantic/pydantic_dto_factory.py @@ -3,20 +3,23 @@ from dataclasses import replace from typing import TYPE_CHECKING, Collection, Generic, TypeVar +from typing_extensions import override + from litestar.dto.base_dto import AbstractDTO from litestar.dto.data_structures import DTOFieldDefinition from litestar.dto.field import DTO_FIELD_META_KEY, DTOField -from litestar.exceptions import MissingDependencyException +from litestar.exceptions import MissingDependencyException, ValidationException from litestar.types.empty import Empty if TYPE_CHECKING: - from typing import Generator + from typing import Any, Generator from litestar.typing import FieldDefinition try: import pydantic + from pydantic import ValidationError if pydantic.VERSION.startswith("2"): from pydantic_core import PydanticUndefined @@ -33,6 +36,20 @@ class PydanticDTO(AbstractDTO[T], Generic[T]): """Support for domain modelling with Pydantic.""" + @override + def decode_builtins(self, value: dict[str, Any]) -> Any: + try: + return super().decode_builtins(value) + except ValidationError as ex: + raise ValidationException(extra=ex.errors()) from ex + + @override + def decode_bytes(self, value: bytes) -> Any: + try: + return super().decode_bytes(value) + except ValidationError as ex: + raise ValidationException(extra=ex.errors()) from ex + @classmethod def generate_field_definitions( cls, model_type: type[pydantic.BaseModel]
{"golden_diff": "diff --git a/litestar/contrib/pydantic/pydantic_dto_factory.py b/litestar/contrib/pydantic/pydantic_dto_factory.py\n--- a/litestar/contrib/pydantic/pydantic_dto_factory.py\n+++ b/litestar/contrib/pydantic/pydantic_dto_factory.py\n@@ -3,20 +3,23 @@\n from dataclasses import replace\n from typing import TYPE_CHECKING, Collection, Generic, TypeVar\n \n+from typing_extensions import override\n+\n from litestar.dto.base_dto import AbstractDTO\n from litestar.dto.data_structures import DTOFieldDefinition\n from litestar.dto.field import DTO_FIELD_META_KEY, DTOField\n-from litestar.exceptions import MissingDependencyException\n+from litestar.exceptions import MissingDependencyException, ValidationException\n from litestar.types.empty import Empty\n \n if TYPE_CHECKING:\n- from typing import Generator\n+ from typing import Any, Generator\n \n from litestar.typing import FieldDefinition\n \n \n try:\n import pydantic\n+ from pydantic import ValidationError\n \n if pydantic.VERSION.startswith(\"2\"):\n from pydantic_core import PydanticUndefined\n@@ -33,6 +36,20 @@\n class PydanticDTO(AbstractDTO[T], Generic[T]):\n \"\"\"Support for domain modelling with Pydantic.\"\"\"\n \n+ @override\n+ def decode_builtins(self, value: dict[str, Any]) -> Any:\n+ try:\n+ return super().decode_builtins(value)\n+ except ValidationError as ex:\n+ raise ValidationException(extra=ex.errors()) from ex\n+\n+ @override\n+ def decode_bytes(self, value: bytes) -> Any:\n+ try:\n+ return super().decode_bytes(value)\n+ except ValidationError as ex:\n+ raise ValidationException(extra=ex.errors()) from ex\n+\n @classmethod\n def generate_field_definitions(\n cls, model_type: type[pydantic.BaseModel]\n", "issue": "Bug: pydantic validations aren't being handled by DTOs\n### Description\r\n\r\nIn the documentation of Litestar we have:\r\n\r\n> When a value fails pydantic validation, the result will be a ValidationException with the extra key set to the pydantic validation errors. Thus, this data will be made available for the API consumers by default.\r\n\r\nBut when combined with DTO's in a post request, if a pydantic validation fails the exception is not handled and the client receives an InternalServerError (500) instead of a BadRequest (400):\r\n\r\n\r\n```\r\n __pydantic_self__.__pydantic_validator__.validate_python(data, self_instance=__pydantic_self__)\r\npydantic_core._pydantic_core.ValidationError: 1 validation error for User\r\nname\r\n String should have at most 2 characters [type=string_too_long, input_value='abc', input_type=str]\r\n For further information visit https://errors.pydantic.dev/2.2/v/string_too_long\r\n\r\nStatus code: 500\r\n```\r\n\r\nIf the validation isn't handled by pydantic (or at least it isn't handled **first** by pydantic) it works, for example an Enum validation returns a proper 400 status code because `msgspec` catches it first:\r\n\r\n```python\r\nfrom enum import Enum\r\n\r\nclass NameEnum(str, Enum):\r\n a = \"A\"\r\n b = \"B\"\r\n\r\n# (replace the User class of the example with this)\r\nclass User(BaseModel):\r\n name: NameEnum\r\n```\r\n\r\nOutput for incorrect Enum:\r\n\r\n```\r\n File \".../.venv/lib64/python3.11/site-packages/litestar/serialization/msgspec_hooks.py\", line 191, in decode_json\r\n raise SerializationException(str(msgspec_error)) from msgspec_error\r\nlitestar.exceptions.base_exceptions.SerializationException: Invalid enum value 'abc' - at `$.name`\r\n\r\n\r\n...\r\n\r\n File \".../.venv/lib64/python3.11/site-packages/litestar/routes/http.py\", line 186, in _get_response_data\r\n raise ClientException(str(e)) from e\r\nlitestar.exceptions.http_exceptions.ClientException: 400: Invalid enum value 'abc' - at `$.name`\r\n```\r\n\r\n### URL to code causing the issue\r\n\r\n_No response_\r\n\r\n### MCVE\r\n\r\n```python\r\nfrom litestar import Litestar, post\r\nfrom litestar.contrib.pydantic import PydanticDTO\r\nfrom litestar.testing import TestClient\r\nfrom pydantic import BaseModel, Field\r\n\r\n\r\nclass User(BaseModel):\r\n name: str = Field(max_length=2)\r\n\r\n\r\nUserDTO = PydanticDTO[User]\r\n\r\n\r\n@post(\"/user\", dto=UserDTO, sync_to_thread=False)\r\ndef create_user(data: User) -> User:\r\n return data\r\n\r\n\r\nwith TestClient(Litestar([create_user], debug=True)) as client:\r\n response = client.post(\"/user\", json={\"name\": \"abc\"})\r\n print(response.text)\r\n print(f\"Status code: {response.status_code}\")\r\n assert response.status_code == 201\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\n```bash\r\n1. Execute the MCVE\r\n```\r\n\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Logs\r\n\r\n_No response_\r\n\r\n### Litestar Version\r\n\r\n2.0.0rc1\r\n\r\n### Platform\r\n\r\n- [X] Linux\r\n- [ ] Mac\r\n- [ ] Windows\r\n- [ ] Other (Please specify in the description above)\r\n\r\n<!-- POLAR PLEDGE BADGE START -->\r\n---\r\n\r\n## Funding\r\n* If you would like to see an issue prioritized, make a pledge towards it!\r\n* We receive the pledge once the issue is completed & verified\r\n\r\n<a href=\"https://polar.sh/litestar-org/litestar/issues/2190\">\r\n<picture>\r\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/2190/pledge.svg?darkmode=1\">\r\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/2190/pledge.svg\">\r\n</picture>\r\n</a>\r\n<!-- POLAR PLEDGE BADGE END -->\r\n\nStaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import replace\nfrom typing import TYPE_CHECKING, Collection, Generic, TypeVar\n\nfrom litestar.dto.base_dto import AbstractDTO\nfrom litestar.dto.data_structures import DTOFieldDefinition\nfrom litestar.dto.field import DTO_FIELD_META_KEY, DTOField\nfrom litestar.exceptions import MissingDependencyException\nfrom litestar.types.empty import Empty\n\nif TYPE_CHECKING:\n from typing import Generator\n\n from litestar.typing import FieldDefinition\n\n\ntry:\n import pydantic\n\n if pydantic.VERSION.startswith(\"2\"):\n from pydantic_core import PydanticUndefined\n else: # pragma: no cover\n from pydantic.fields import Undefined as PydanticUndefined # type: ignore\nexcept ImportError as e:\n raise MissingDependencyException(\"pydantic\") from e\n\n__all__ = (\"PydanticDTO\",)\n\nT = TypeVar(\"T\", bound=\"pydantic.BaseModel | Collection[pydantic.BaseModel]\")\n\n\nclass PydanticDTO(AbstractDTO[T], Generic[T]):\n \"\"\"Support for domain modelling with Pydantic.\"\"\"\n\n @classmethod\n def generate_field_definitions(\n cls, model_type: type[pydantic.BaseModel]\n ) -> Generator[DTOFieldDefinition, None, None]:\n model_field_definitions = cls.get_model_type_hints(model_type)\n\n if pydantic.VERSION.startswith(\"1\"): # pragma: no cover\n model_fields: dict[str, pydantic.fields.FieldInfo] = {k: model_field.field_info for k, model_field in model_type.__fields__.items()} # type: ignore\n else:\n model_fields = dict(model_type.model_fields)\n\n for field_name, field_info in model_fields.items():\n field_definition = model_field_definitions[field_name]\n dto_field = (field_definition.extra or {}).pop(DTO_FIELD_META_KEY, DTOField())\n\n if field_info.default is not PydanticUndefined:\n default = field_info.default\n elif field_definition.is_optional:\n default = None\n else:\n default = Empty\n\n yield replace(\n DTOFieldDefinition.from_field_definition(\n field_definition=field_definition,\n dto_field=dto_field,\n model_name=model_type.__name__,\n default_factory=field_info.default_factory\n if field_info.default_factory and field_info.default_factory is not PydanticUndefined # type: ignore[comparison-overlap]\n else Empty,\n ),\n default=default,\n name=field_name,\n )\n\n @classmethod\n def detect_nested_field(cls, field_definition: FieldDefinition) -> bool:\n return field_definition.is_subclass_of(pydantic.BaseModel)\n", "path": "litestar/contrib/pydantic/pydantic_dto_factory.py"}]}
2,332
424
gh_patches_debug_263
rasdani/github-patches
git_diff
numpy__numpy-3235
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 2to3 run `itertools_imports` fixer </issue> <code> [start of tools/py3tool.py] 1 #!/usr/bin/env python3 2 # -*- python -*- 3 """ 4 %prog SUBMODULE... 5 6 Hack to pipe submodules of Numpy through 2to3 and build them in-place 7 one-by-one. 8 9 Example usage: 10 11 python3 tools/py3tool.py testing distutils core 12 13 This will copy files to _py3k/numpy, add a dummy __init__.py and 14 version.py on the top level, and copy and 2to3 the files of the three 15 submodules. 16 17 When running py3tool again, only changed files are re-processed, which 18 makes the test-bugfix cycle faster. 19 20 """ 21 from __future__ import division, absolute_import, print_function 22 23 from optparse import OptionParser 24 import shutil 25 import os 26 import sys 27 import re 28 import subprocess 29 import fnmatch 30 31 if os.environ.get('USE_2TO3CACHE'): 32 import lib2to3cache 33 34 BASE = os.path.normpath(os.path.join(os.path.dirname(__file__), '..')) 35 TEMP = os.path.normpath(os.path.join(BASE, '_py3k')) 36 37 SCRIPT_2TO3 = os.path.join(BASE, 'tools', '2to3.py') 38 39 EXTRA_2TO3_FLAGS = { 40 'numpy/core/defchararray.py': '-x unicode', 41 'numpy/compat/py3k.py': '-x unicode', 42 'numpy/ma/timer_comparison.py': 'skip', 43 } 44 45 # Names of fixers to skip when running 2to3. This is a complete list of 46 # available fixers, with fixers not currently skipped commented out. 47 FIXES_TO_SKIP = [ 48 'apply', 49 # 'basestring', 50 'buffer', 51 'callable', 52 'dict', 53 'exec', 54 'execfile', 55 'exitfunc', 56 'filter', 57 'funcattrs', 58 'future', 59 'getcwdu', 60 'has_key', 61 # 'idioms', 62 'import', 63 'imports', 64 'imports2', 65 'input', 66 'intern', 67 # 'isinstance', 68 # 'itertools', 69 # 'itertools_imports', 70 # 'long', 71 'map', 72 'metaclass', 73 'methodattrs', 74 'ne', 75 # 'next', 76 # 'nonzero', 77 # 'numliterals', 78 'operator', 79 'paren', 80 'print', 81 'raise', 82 'raw_input', 83 'reduce', 84 # 'renames', 85 'repr', 86 'setliteral', 87 'standarderror', 88 'sys_exc', 89 'throw', 90 'tuple_params', 91 # 'types', 92 # 'unicode', 93 # 'urllib', 94 # 'ws_comma', 95 'xrange', 96 'xreadlines', 97 # 'zip', 98 ] 99 100 skip_fixes= [] 101 for _t in FIXES_TO_SKIP: 102 skip_fixes.append('-x') 103 skip_fixes.append(_t) 104 105 106 def main(): 107 p = OptionParser(usage=__doc__.strip()) 108 p.add_option("--clean", "-c", action="store_true", 109 help="clean source directory") 110 options, args = p.parse_args() 111 112 if not args: 113 p.error('no submodules given') 114 else: 115 dirs = ['numpy/%s' % x for x in map(os.path.basename, args)] 116 117 # Prepare 118 if not os.path.isdir(TEMP): 119 os.makedirs(TEMP) 120 121 # Set up dummy files (for building only submodules) 122 dummy_files = { 123 '__init__.py': 'from numpy.version import version as __version__', 124 'version.py': 'version = "1.4.0.dev"' 125 } 126 127 for fn, content in dummy_files.items(): 128 fn = os.path.join(TEMP, 'numpy', fn) 129 if not os.path.isfile(fn): 130 try: 131 os.makedirs(os.path.dirname(fn)) 132 except OSError: 133 pass 134 f = open(fn, 'wb+') 135 f.write(content.encode('ascii')) 136 f.close() 137 138 # Environment 139 pp = [os.path.abspath(TEMP)] 140 def getenv(): 141 env = dict(os.environ) 142 env.update({'PYTHONPATH': ':'.join(pp)}) 143 return env 144 145 # Copy 146 for d in dirs: 147 src = os.path.join(BASE, d) 148 dst = os.path.join(TEMP, d) 149 150 # Run 2to3 151 sync_2to3(dst=dst, 152 src=src, 153 patchfile=os.path.join(TEMP, os.path.basename(d) + '.patch'), 154 clean=options.clean) 155 156 # Run setup.py, falling back to Pdb post-mortem on exceptions 157 setup_py = os.path.join(dst, 'setup.py') 158 if os.path.isfile(setup_py): 159 code = """\ 160 import pdb, sys, traceback 161 p = pdb.Pdb() 162 try: 163 import __main__ 164 __main__.__dict__.update({ 165 "__name__": "__main__", "__file__": "setup.py", 166 "__builtins__": __builtins__}) 167 fp = open("setup.py", "rb") 168 try: 169 exec(compile(fp.read(), "setup.py", 'exec')) 170 finally: 171 fp.close() 172 except SystemExit: 173 raise 174 except: 175 traceback.print_exc() 176 t = sys.exc_info()[2] 177 p.interaction(None, t) 178 """ 179 ret = subprocess.call([sys.executable, '-c', code, 180 'build_ext', '-i'], 181 cwd=dst, 182 env=getenv()) 183 if ret != 0: 184 raise RuntimeError("Build failed.") 185 186 # Run nosetests 187 subprocess.call(['nosetests3', '-v', d], cwd=TEMP) 188 189 190 def walk_sync(dir1, dir2, _seen=None): 191 if _seen is None: 192 seen = {} 193 else: 194 seen = _seen 195 196 if not dir1.endswith(os.path.sep): 197 dir1 = dir1 + os.path.sep 198 199 # Walk through stuff (which we haven't yet gone through) in dir1 200 for root, dirs, files in os.walk(dir1): 201 sub = root[len(dir1):] 202 if sub in seen: 203 dirs = [x for x in dirs if x not in seen[sub][0]] 204 files = [x for x in files if x not in seen[sub][1]] 205 seen[sub][0].extend(dirs) 206 seen[sub][1].extend(files) 207 else: 208 seen[sub] = (dirs, files) 209 if not dirs and not files: 210 continue 211 yield os.path.join(dir1, sub), os.path.join(dir2, sub), dirs, files 212 213 if _seen is None: 214 # Walk through stuff (which we haven't yet gone through) in dir2 215 for root2, root1, dirs, files in walk_sync(dir2, dir1, _seen=seen): 216 yield root1, root2, dirs, files 217 218 def sync_2to3(src, dst, patchfile=None, clean=False): 219 import lib2to3.main 220 from io import StringIO 221 222 to_convert = [] 223 224 for src_dir, dst_dir, dirs, files in walk_sync(src, dst): 225 for fn in dirs + files: 226 src_fn = os.path.join(src_dir, fn) 227 dst_fn = os.path.join(dst_dir, fn) 228 229 # skip temporary etc. files 230 if fn.startswith('.#') or fn.endswith('~'): 231 continue 232 233 # remove non-existing 234 if os.path.exists(dst_fn) and not os.path.exists(src_fn): 235 if clean: 236 if os.path.isdir(dst_fn): 237 shutil.rmtree(dst_fn) 238 else: 239 os.unlink(dst_fn) 240 continue 241 242 # make directories 243 if os.path.isdir(src_fn): 244 if not os.path.isdir(dst_fn): 245 os.makedirs(dst_fn) 246 continue 247 248 dst_dir = os.path.dirname(dst_fn) 249 if os.path.isfile(dst_fn) and not os.path.isdir(dst_dir): 250 os.makedirs(dst_dir) 251 252 # don't replace up-to-date files 253 try: 254 if os.path.isfile(dst_fn) and \ 255 os.stat(dst_fn).st_mtime >= os.stat(src_fn).st_mtime: 256 continue 257 except OSError: 258 pass 259 260 # copy file 261 shutil.copyfile(src_fn, dst_fn) 262 263 # add .py files to 2to3 list 264 if dst_fn.endswith('.py'): 265 to_convert.append((src_fn, dst_fn)) 266 267 # run 2to3 268 flag_sets = {} 269 for fn, dst_fn in to_convert: 270 flag = '' 271 for pat, opt in EXTRA_2TO3_FLAGS.items(): 272 if fnmatch.fnmatch(fn, pat): 273 flag = opt 274 break 275 flag_sets.setdefault(flag, []).append(dst_fn) 276 277 if patchfile: 278 p = open(patchfile, 'wb+') 279 else: 280 p = open(os.devnull, 'wb') 281 282 for flags, filenames in flag_sets.items(): 283 if flags == 'skip': 284 continue 285 286 _old_stdout = sys.stdout 287 try: 288 sys.stdout = StringIO() 289 opt = [] 290 opt.extend(['-w', '-n']) 291 opt.extend(skip_fixes) 292 opt.extend(flags.split()) 293 opt.extend(filenames) 294 lib2to3.main.main("lib2to3.fixes", opt) 295 finally: 296 sys.stdout = _old_stdout 297 298 p.close() 299 300 if __name__ == "__main__": 301 main() 302 [end of tools/py3tool.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/py3tool.py b/tools/py3tool.py --- a/tools/py3tool.py +++ b/tools/py3tool.py @@ -66,7 +66,7 @@ 'intern', # 'isinstance', # 'itertools', -# 'itertools_imports', + 'itertools_imports', # 'long', 'map', 'metaclass',
{"golden_diff": "diff --git a/tools/py3tool.py b/tools/py3tool.py\n--- a/tools/py3tool.py\n+++ b/tools/py3tool.py\n@@ -66,7 +66,7 @@\n 'intern',\n # 'isinstance',\n # 'itertools',\n-# 'itertools_imports',\n+ 'itertools_imports',\n # 'long',\n 'map',\n 'metaclass',\n", "issue": "2to3 run `itertools_imports` fixer\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- python -*-\n\"\"\"\n%prog SUBMODULE...\n\nHack to pipe submodules of Numpy through 2to3 and build them in-place\none-by-one.\n\nExample usage:\n\n python3 tools/py3tool.py testing distutils core\n\nThis will copy files to _py3k/numpy, add a dummy __init__.py and\nversion.py on the top level, and copy and 2to3 the files of the three\nsubmodules.\n\nWhen running py3tool again, only changed files are re-processed, which\nmakes the test-bugfix cycle faster.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nfrom optparse import OptionParser\nimport shutil\nimport os\nimport sys\nimport re\nimport subprocess\nimport fnmatch\n\nif os.environ.get('USE_2TO3CACHE'):\n import lib2to3cache\n\nBASE = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))\nTEMP = os.path.normpath(os.path.join(BASE, '_py3k'))\n\nSCRIPT_2TO3 = os.path.join(BASE, 'tools', '2to3.py')\n\nEXTRA_2TO3_FLAGS = {\n 'numpy/core/defchararray.py': '-x unicode',\n 'numpy/compat/py3k.py': '-x unicode',\n 'numpy/ma/timer_comparison.py': 'skip',\n}\n\n# Names of fixers to skip when running 2to3. This is a complete list of\n# available fixers, with fixers not currently skipped commented out.\nFIXES_TO_SKIP = [\n 'apply',\n# 'basestring',\n 'buffer',\n 'callable',\n 'dict',\n 'exec',\n 'execfile',\n 'exitfunc',\n 'filter',\n 'funcattrs',\n 'future',\n 'getcwdu',\n 'has_key',\n# 'idioms',\n 'import',\n 'imports',\n 'imports2',\n 'input',\n 'intern',\n# 'isinstance',\n# 'itertools',\n# 'itertools_imports',\n# 'long',\n 'map',\n 'metaclass',\n 'methodattrs',\n 'ne',\n# 'next',\n# 'nonzero',\n# 'numliterals',\n 'operator',\n 'paren',\n 'print',\n 'raise',\n 'raw_input',\n 'reduce',\n# 'renames',\n 'repr',\n 'setliteral',\n 'standarderror',\n 'sys_exc',\n 'throw',\n 'tuple_params',\n# 'types',\n# 'unicode',\n# 'urllib',\n# 'ws_comma',\n 'xrange',\n 'xreadlines',\n# 'zip',\n]\n\nskip_fixes= []\nfor _t in FIXES_TO_SKIP:\n skip_fixes.append('-x')\n skip_fixes.append(_t)\n\n\ndef main():\n p = OptionParser(usage=__doc__.strip())\n p.add_option(\"--clean\", \"-c\", action=\"store_true\",\n help=\"clean source directory\")\n options, args = p.parse_args()\n\n if not args:\n p.error('no submodules given')\n else:\n dirs = ['numpy/%s' % x for x in map(os.path.basename, args)]\n\n # Prepare\n if not os.path.isdir(TEMP):\n os.makedirs(TEMP)\n\n # Set up dummy files (for building only submodules)\n dummy_files = {\n '__init__.py': 'from numpy.version import version as __version__',\n 'version.py': 'version = \"1.4.0.dev\"'\n }\n\n for fn, content in dummy_files.items():\n fn = os.path.join(TEMP, 'numpy', fn)\n if not os.path.isfile(fn):\n try:\n os.makedirs(os.path.dirname(fn))\n except OSError:\n pass\n f = open(fn, 'wb+')\n f.write(content.encode('ascii'))\n f.close()\n\n # Environment\n pp = [os.path.abspath(TEMP)]\n def getenv():\n env = dict(os.environ)\n env.update({'PYTHONPATH': ':'.join(pp)})\n return env\n\n # Copy\n for d in dirs:\n src = os.path.join(BASE, d)\n dst = os.path.join(TEMP, d)\n\n # Run 2to3\n sync_2to3(dst=dst,\n src=src,\n patchfile=os.path.join(TEMP, os.path.basename(d) + '.patch'),\n clean=options.clean)\n\n # Run setup.py, falling back to Pdb post-mortem on exceptions\n setup_py = os.path.join(dst, 'setup.py')\n if os.path.isfile(setup_py):\n code = \"\"\"\\\nimport pdb, sys, traceback\np = pdb.Pdb()\ntry:\n import __main__\n __main__.__dict__.update({\n \"__name__\": \"__main__\", \"__file__\": \"setup.py\",\n \"__builtins__\": __builtins__})\n fp = open(\"setup.py\", \"rb\")\n try:\n exec(compile(fp.read(), \"setup.py\", 'exec'))\n finally:\n fp.close()\nexcept SystemExit:\n raise\nexcept:\n traceback.print_exc()\n t = sys.exc_info()[2]\n p.interaction(None, t)\n\"\"\"\n ret = subprocess.call([sys.executable, '-c', code,\n 'build_ext', '-i'],\n cwd=dst,\n env=getenv())\n if ret != 0:\n raise RuntimeError(\"Build failed.\")\n\n # Run nosetests\n subprocess.call(['nosetests3', '-v', d], cwd=TEMP)\n\n\ndef walk_sync(dir1, dir2, _seen=None):\n if _seen is None:\n seen = {}\n else:\n seen = _seen\n\n if not dir1.endswith(os.path.sep):\n dir1 = dir1 + os.path.sep\n\n # Walk through stuff (which we haven't yet gone through) in dir1\n for root, dirs, files in os.walk(dir1):\n sub = root[len(dir1):]\n if sub in seen:\n dirs = [x for x in dirs if x not in seen[sub][0]]\n files = [x for x in files if x not in seen[sub][1]]\n seen[sub][0].extend(dirs)\n seen[sub][1].extend(files)\n else:\n seen[sub] = (dirs, files)\n if not dirs and not files:\n continue\n yield os.path.join(dir1, sub), os.path.join(dir2, sub), dirs, files\n\n if _seen is None:\n # Walk through stuff (which we haven't yet gone through) in dir2\n for root2, root1, dirs, files in walk_sync(dir2, dir1, _seen=seen):\n yield root1, root2, dirs, files\n\ndef sync_2to3(src, dst, patchfile=None, clean=False):\n import lib2to3.main\n from io import StringIO\n\n to_convert = []\n\n for src_dir, dst_dir, dirs, files in walk_sync(src, dst):\n for fn in dirs + files:\n src_fn = os.path.join(src_dir, fn)\n dst_fn = os.path.join(dst_dir, fn)\n\n # skip temporary etc. files\n if fn.startswith('.#') or fn.endswith('~'):\n continue\n\n # remove non-existing\n if os.path.exists(dst_fn) and not os.path.exists(src_fn):\n if clean:\n if os.path.isdir(dst_fn):\n shutil.rmtree(dst_fn)\n else:\n os.unlink(dst_fn)\n continue\n\n # make directories\n if os.path.isdir(src_fn):\n if not os.path.isdir(dst_fn):\n os.makedirs(dst_fn)\n continue\n\n dst_dir = os.path.dirname(dst_fn)\n if os.path.isfile(dst_fn) and not os.path.isdir(dst_dir):\n os.makedirs(dst_dir)\n\n # don't replace up-to-date files\n try:\n if os.path.isfile(dst_fn) and \\\n os.stat(dst_fn).st_mtime >= os.stat(src_fn).st_mtime:\n continue\n except OSError:\n pass\n\n # copy file\n shutil.copyfile(src_fn, dst_fn)\n\n # add .py files to 2to3 list\n if dst_fn.endswith('.py'):\n to_convert.append((src_fn, dst_fn))\n\n # run 2to3\n flag_sets = {}\n for fn, dst_fn in to_convert:\n flag = ''\n for pat, opt in EXTRA_2TO3_FLAGS.items():\n if fnmatch.fnmatch(fn, pat):\n flag = opt\n break\n flag_sets.setdefault(flag, []).append(dst_fn)\n\n if patchfile:\n p = open(patchfile, 'wb+')\n else:\n p = open(os.devnull, 'wb')\n\n for flags, filenames in flag_sets.items():\n if flags == 'skip':\n continue\n\n _old_stdout = sys.stdout\n try:\n sys.stdout = StringIO()\n opt = []\n opt.extend(['-w', '-n'])\n opt.extend(skip_fixes)\n opt.extend(flags.split())\n opt.extend(filenames)\n lib2to3.main.main(\"lib2to3.fixes\", opt)\n finally:\n sys.stdout = _old_stdout\n\n p.close()\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/py3tool.py"}]}
3,381
91
gh_patches_debug_12718
rasdani/github-patches
git_diff
ray-project__ray-6849
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [tune] Feature request: tune.sample_from does not support callable objects. ### System information - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu 16.04 - **Ray installed from (source or binary)**: binary - **Ray version**: 0.7.2 - **Python version**: 3.2 - **Exact command to reproduce**: See below ### Describe the problem The `tune` sample_from interface is strictly limited to function objects, such as lambdas. This serves most use cases, but there are a number of instances where it's very useful to define a callable object to yield samples. (See trivial example below.) At the moment, providing a callable object returns errors from within tune variant generation, as the non-function-based `sample_from` entries are processed in grid entries. This can be resolved by changeing the sample/grid check from a direct check for `FunctionType` (Source location: https://github.com/ray-project/ray/blob/fadfa5f30bb654a74c781eaf8396a35af3ab7760/python/ray/tune/suggest/variant_generator.py#L116) to the builtin function `callable`. I'm not entirely clear if this is an intentional limitation, and changing this logic will likely require expansion of tune's tests and documentation to cover the new behavior. I would be happy to open a PR for this if a maintainer gives the feature a 👍. ### Source code / logs ```python import random import ray.tune as tune from ray.tune.suggest.variant_generator import generate_variants class Normal: def __call__(self, _config): return random.normalvariate(mu=0, sigma=1) grid_config = {"grid": tune.grid_search(list(range(2)))} sample_config = {"normal": tune.sample_from(Normal())} print(grid_config) print(list(generate_variants(grid_config))) print(sample_config) print(list(generate_variants(sample_config))) ``` Results: ``` {'grid': {'grid_search': [0, 1]}} [('grid=0', {'grid': 0}), ('grid=1', {'grid': 1})] {'normal': tune.sample_from(<__main__.Normal object at 0x7f08ed1d0f50>)} Traceback (most recent call last): File "sample_error.py", line 19, in <module> print(list(generate_variants(sample_config))) File "/work/home/lexaf/workspace/alphabeta/.conda/lib/python3.7/site-packages/ray/tune/suggest/variant_generator.py", line 43, in generate_variants for resolved_vars, spec in _generate_variants(unresolved_spec): File "/work/home/lexaf/workspace/alphabeta/.conda/lib/python3.7/site-packages/ray/tune/suggest/variant_generator.py", line 123, in _generate_variants for resolved_spec in grid_search: File "/work/home/lexaf/workspace/alphabeta/.conda/lib/python3.7/site-packages/ray/tune/suggest/variant_generator.py", line 193, in _grid_search_generator while value_indices[-1] < len(grid_vars[-1][1]): TypeError: object of type 'Normal' has no len() ``` </issue> <code> [start of python/ray/tune/suggest/variant_generator.py] 1 import copy 2 import logging 3 import numpy 4 import random 5 import types 6 7 from ray.tune import TuneError 8 from ray.tune.sample import sample_from 9 10 logger = logging.getLogger(__name__) 11 12 13 def generate_variants(unresolved_spec): 14 """Generates variants from a spec (dict) with unresolved values. 15 16 There are two types of unresolved values: 17 18 Grid search: These define a grid search over values. For example, the 19 following grid search values in a spec will produce six distinct 20 variants in combination: 21 22 "activation": grid_search(["relu", "tanh"]) 23 "learning_rate": grid_search([1e-3, 1e-4, 1e-5]) 24 25 Lambda functions: These are evaluated to produce a concrete value, and 26 can express dependencies or conditional distributions between values. 27 They can also be used to express random search (e.g., by calling 28 into the `random` or `np` module). 29 30 "cpu": lambda spec: spec.config.num_workers 31 "batch_size": lambda spec: random.uniform(1, 1000) 32 33 Finally, to support defining specs in plain JSON / YAML, grid search 34 and lambda functions can also be defined alternatively as follows: 35 36 "activation": {"grid_search": ["relu", "tanh"]} 37 "cpu": {"eval": "spec.config.num_workers"} 38 39 Use `format_vars` to format the returned dict of hyperparameters. 40 41 Yields: 42 (Dict of resolved variables, Spec object) 43 """ 44 for resolved_vars, spec in _generate_variants(unresolved_spec): 45 assert not _unresolved_values(spec) 46 yield resolved_vars, spec 47 48 49 def grid_search(values): 50 """Convenience method for specifying grid search over a value. 51 52 Arguments: 53 values: An iterable whose parameters will be gridded. 54 """ 55 56 return {"grid_search": values} 57 58 59 _STANDARD_IMPORTS = { 60 "random": random, 61 "np": numpy, 62 } 63 64 _MAX_RESOLUTION_PASSES = 20 65 66 67 def resolve_nested_dict(nested_dict): 68 """Flattens a nested dict by joining keys into tuple of paths. 69 70 Can then be passed into `format_vars`. 71 """ 72 res = {} 73 for k, v in nested_dict.items(): 74 if isinstance(v, dict): 75 for k_, v_ in resolve_nested_dict(v).items(): 76 res[(k, ) + k_] = v_ 77 else: 78 res[(k, )] = v 79 return res 80 81 82 def format_vars(resolved_vars): 83 """Formats the resolved variable dict into a single string.""" 84 out = [] 85 for path, value in sorted(resolved_vars.items()): 86 if path[0] in ["run", "env", "resources_per_trial"]: 87 continue # TrialRunner already has these in the experiment_tag 88 pieces = [] 89 last_string = True 90 for k in path[::-1]: 91 if isinstance(k, int): 92 pieces.append(str(k)) 93 elif last_string: 94 last_string = False 95 pieces.append(k) 96 pieces.reverse() 97 out.append(_clean_value("_".join(pieces)) + "=" + _clean_value(value)) 98 return ",".join(out) 99 100 101 def flatten_resolved_vars(resolved_vars): 102 """Formats the resolved variable dict into a mapping of (str -> value).""" 103 flattened_resolved_vars_dict = {} 104 for pieces, value in resolved_vars.items(): 105 if pieces[0] == "config": 106 pieces = pieces[1:] 107 pieces = [str(piece) for piece in pieces] 108 flattened_resolved_vars_dict["/".join(pieces)] = value 109 return flattened_resolved_vars_dict 110 111 112 def _clean_value(value): 113 if isinstance(value, float): 114 return "{:.5}".format(value) 115 else: 116 return str(value).replace("/", "_") 117 118 119 def _generate_variants(spec): 120 spec = copy.deepcopy(spec) 121 unresolved = _unresolved_values(spec) 122 if not unresolved: 123 yield {}, spec 124 return 125 126 grid_vars = [] 127 lambda_vars = [] 128 for path, value in unresolved.items(): 129 if isinstance(value, types.FunctionType): 130 lambda_vars.append((path, value)) 131 else: 132 grid_vars.append((path, value)) 133 grid_vars.sort() 134 135 grid_search = _grid_search_generator(spec, grid_vars) 136 for resolved_spec in grid_search: 137 resolved_vars = _resolve_lambda_vars(resolved_spec, lambda_vars) 138 for resolved, spec in _generate_variants(resolved_spec): 139 for path, value in grid_vars: 140 resolved_vars[path] = _get_value(spec, path) 141 for k, v in resolved.items(): 142 if (k in resolved_vars and v != resolved_vars[k] 143 and _is_resolved(resolved_vars[k])): 144 raise ValueError( 145 "The variable `{}` could not be unambiguously " 146 "resolved to a single value. Consider simplifying " 147 "your configuration.".format(k)) 148 resolved_vars[k] = v 149 yield resolved_vars, spec 150 151 152 def _assign_value(spec, path, value): 153 for k in path[:-1]: 154 spec = spec[k] 155 spec[path[-1]] = value 156 157 158 def _get_value(spec, path): 159 for k in path: 160 spec = spec[k] 161 return spec 162 163 164 def _resolve_lambda_vars(spec, lambda_vars): 165 resolved = {} 166 error = True 167 num_passes = 0 168 while error and num_passes < _MAX_RESOLUTION_PASSES: 169 num_passes += 1 170 error = False 171 for path, fn in lambda_vars: 172 try: 173 value = fn(_UnresolvedAccessGuard(spec)) 174 except RecursiveDependencyError as e: 175 error = e 176 except Exception: 177 raise ValueError( 178 "Failed to evaluate expression: {}: {}".format(path, fn)) 179 else: 180 _assign_value(spec, path, value) 181 resolved[path] = value 182 if error: 183 raise error 184 return resolved 185 186 187 def _grid_search_generator(unresolved_spec, grid_vars): 188 value_indices = [0] * len(grid_vars) 189 190 def increment(i): 191 value_indices[i] += 1 192 if value_indices[i] >= len(grid_vars[i][1]): 193 value_indices[i] = 0 194 if i + 1 < len(value_indices): 195 return increment(i + 1) 196 else: 197 return True 198 return False 199 200 if not grid_vars: 201 yield unresolved_spec 202 return 203 204 while value_indices[-1] < len(grid_vars[-1][1]): 205 spec = copy.deepcopy(unresolved_spec) 206 for i, (path, values) in enumerate(grid_vars): 207 _assign_value(spec, path, values[value_indices[i]]) 208 yield spec 209 if grid_vars: 210 done = increment(0) 211 if done: 212 break 213 214 215 def _is_resolved(v): 216 resolved, _ = _try_resolve(v) 217 return resolved 218 219 220 def _try_resolve(v): 221 if isinstance(v, sample_from): 222 # Function to sample from 223 return False, v.func 224 elif isinstance(v, dict) and len(v) == 1 and "eval" in v: 225 # Lambda function in eval syntax 226 return False, lambda spec: eval( 227 v["eval"], _STANDARD_IMPORTS, {"spec": spec}) 228 elif isinstance(v, dict) and len(v) == 1 and "grid_search" in v: 229 # Grid search values 230 grid_values = v["grid_search"] 231 if not isinstance(grid_values, list): 232 raise TuneError( 233 "Grid search expected list of values, got: {}".format( 234 grid_values)) 235 return False, grid_values 236 return True, v 237 238 239 def _unresolved_values(spec): 240 found = {} 241 for k, v in spec.items(): 242 resolved, v = _try_resolve(v) 243 if not resolved: 244 found[(k, )] = v 245 elif isinstance(v, dict): 246 # Recurse into a dict 247 for (path, value) in _unresolved_values(v).items(): 248 found[(k, ) + path] = value 249 elif isinstance(v, list): 250 # Recurse into a list 251 for i, elem in enumerate(v): 252 for (path, value) in _unresolved_values({i: elem}).items(): 253 found[(k, ) + path] = value 254 return found 255 256 257 class _UnresolvedAccessGuard(dict): 258 def __init__(self, *args, **kwds): 259 super(_UnresolvedAccessGuard, self).__init__(*args, **kwds) 260 self.__dict__ = self 261 262 def __getattribute__(self, item): 263 value = dict.__getattribute__(self, item) 264 if not _is_resolved(value): 265 raise RecursiveDependencyError( 266 "`{}` recursively depends on {}".format(item, value)) 267 elif isinstance(value, dict): 268 return _UnresolvedAccessGuard(value) 269 else: 270 return value 271 272 273 class RecursiveDependencyError(Exception): 274 def __init__(self, msg): 275 Exception.__init__(self, msg) 276 [end of python/ray/tune/suggest/variant_generator.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/ray/tune/suggest/variant_generator.py b/python/ray/tune/suggest/variant_generator.py --- a/python/ray/tune/suggest/variant_generator.py +++ b/python/ray/tune/suggest/variant_generator.py @@ -2,7 +2,6 @@ import logging import numpy import random -import types from ray.tune import TuneError from ray.tune.sample import sample_from @@ -126,7 +125,7 @@ grid_vars = [] lambda_vars = [] for path, value in unresolved.items(): - if isinstance(value, types.FunctionType): + if callable(value): lambda_vars.append((path, value)) else: grid_vars.append((path, value))
{"golden_diff": "diff --git a/python/ray/tune/suggest/variant_generator.py b/python/ray/tune/suggest/variant_generator.py\n--- a/python/ray/tune/suggest/variant_generator.py\n+++ b/python/ray/tune/suggest/variant_generator.py\n@@ -2,7 +2,6 @@\n import logging\n import numpy\n import random\n-import types\n \n from ray.tune import TuneError\n from ray.tune.sample import sample_from\n@@ -126,7 +125,7 @@\n grid_vars = []\n lambda_vars = []\n for path, value in unresolved.items():\n- if isinstance(value, types.FunctionType):\n+ if callable(value):\n lambda_vars.append((path, value))\n else:\n grid_vars.append((path, value))\n", "issue": "[tune] Feature request: tune.sample_from does not support callable objects. \n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu 16.04\r\n- **Ray installed from (source or binary)**: binary\r\n- **Ray version**: 0.7.2\r\n- **Python version**: 3.2\r\n- **Exact command to reproduce**: See below\r\n\r\n### Describe the problem\r\n\r\nThe `tune` sample_from interface is strictly limited to function objects, such as lambdas. This serves most use cases, but there are a number of instances where it's very useful to define a callable object to yield samples. (See trivial example below.) At the moment, providing a callable object returns errors from within tune variant generation, as the non-function-based `sample_from` entries are processed in grid entries. This can be resolved by changeing the sample/grid check from a direct check for `FunctionType` (Source location: https://github.com/ray-project/ray/blob/fadfa5f30bb654a74c781eaf8396a35af3ab7760/python/ray/tune/suggest/variant_generator.py#L116) to the builtin function `callable`.\r\n\r\nI'm not entirely clear if this is an intentional limitation, and changing this logic will likely require expansion of tune's tests and documentation to cover the new behavior. I would be happy to open a PR for this if a maintainer gives the feature a \ud83d\udc4d.\r\n\r\n### Source code / logs\r\n\r\n```python\r\n\r\nimport random\r\n\r\nimport ray.tune as tune\r\nfrom ray.tune.suggest.variant_generator import generate_variants\r\n\r\nclass Normal:\r\n def __call__(self, _config):\r\n return random.normalvariate(mu=0, sigma=1)\r\n\r\n\r\ngrid_config = {\"grid\": tune.grid_search(list(range(2)))}\r\nsample_config = {\"normal\": tune.sample_from(Normal())}\r\n\r\n\r\nprint(grid_config)\r\nprint(list(generate_variants(grid_config)))\r\n\r\nprint(sample_config)\r\nprint(list(generate_variants(sample_config)))\r\n\r\n```\r\n\r\nResults:\r\n\r\n```\r\n{'grid': {'grid_search': [0, 1]}}\r\n[('grid=0', {'grid': 0}), ('grid=1', {'grid': 1})]\r\n{'normal': tune.sample_from(<__main__.Normal object at 0x7f08ed1d0f50>)}\r\nTraceback (most recent call last):\r\n File \"sample_error.py\", line 19, in <module>\r\n print(list(generate_variants(sample_config)))\r\n File \"/work/home/lexaf/workspace/alphabeta/.conda/lib/python3.7/site-packages/ray/tune/suggest/variant_generator.py\", line 43, in generate_variants\r\n for resolved_vars, spec in _generate_variants(unresolved_spec):\r\n File \"/work/home/lexaf/workspace/alphabeta/.conda/lib/python3.7/site-packages/ray/tune/suggest/variant_generator.py\", line 123, in _generate_variants\r\n for resolved_spec in grid_search:\r\n File \"/work/home/lexaf/workspace/alphabeta/.conda/lib/python3.7/site-packages/ray/tune/suggest/variant_generator.py\", line 193, in _grid_search_generator\r\n while value_indices[-1] < len(grid_vars[-1][1]):\r\nTypeError: object of type 'Normal' has no len()\r\n```\r\n\n", "before_files": [{"content": "import copy\nimport logging\nimport numpy\nimport random\nimport types\n\nfrom ray.tune import TuneError\nfrom ray.tune.sample import sample_from\n\nlogger = logging.getLogger(__name__)\n\n\ndef generate_variants(unresolved_spec):\n \"\"\"Generates variants from a spec (dict) with unresolved values.\n\n There are two types of unresolved values:\n\n Grid search: These define a grid search over values. For example, the\n following grid search values in a spec will produce six distinct\n variants in combination:\n\n \"activation\": grid_search([\"relu\", \"tanh\"])\n \"learning_rate\": grid_search([1e-3, 1e-4, 1e-5])\n\n Lambda functions: These are evaluated to produce a concrete value, and\n can express dependencies or conditional distributions between values.\n They can also be used to express random search (e.g., by calling\n into the `random` or `np` module).\n\n \"cpu\": lambda spec: spec.config.num_workers\n \"batch_size\": lambda spec: random.uniform(1, 1000)\n\n Finally, to support defining specs in plain JSON / YAML, grid search\n and lambda functions can also be defined alternatively as follows:\n\n \"activation\": {\"grid_search\": [\"relu\", \"tanh\"]}\n \"cpu\": {\"eval\": \"spec.config.num_workers\"}\n\n Use `format_vars` to format the returned dict of hyperparameters.\n\n Yields:\n (Dict of resolved variables, Spec object)\n \"\"\"\n for resolved_vars, spec in _generate_variants(unresolved_spec):\n assert not _unresolved_values(spec)\n yield resolved_vars, spec\n\n\ndef grid_search(values):\n \"\"\"Convenience method for specifying grid search over a value.\n\n Arguments:\n values: An iterable whose parameters will be gridded.\n \"\"\"\n\n return {\"grid_search\": values}\n\n\n_STANDARD_IMPORTS = {\n \"random\": random,\n \"np\": numpy,\n}\n\n_MAX_RESOLUTION_PASSES = 20\n\n\ndef resolve_nested_dict(nested_dict):\n \"\"\"Flattens a nested dict by joining keys into tuple of paths.\n\n Can then be passed into `format_vars`.\n \"\"\"\n res = {}\n for k, v in nested_dict.items():\n if isinstance(v, dict):\n for k_, v_ in resolve_nested_dict(v).items():\n res[(k, ) + k_] = v_\n else:\n res[(k, )] = v\n return res\n\n\ndef format_vars(resolved_vars):\n \"\"\"Formats the resolved variable dict into a single string.\"\"\"\n out = []\n for path, value in sorted(resolved_vars.items()):\n if path[0] in [\"run\", \"env\", \"resources_per_trial\"]:\n continue # TrialRunner already has these in the experiment_tag\n pieces = []\n last_string = True\n for k in path[::-1]:\n if isinstance(k, int):\n pieces.append(str(k))\n elif last_string:\n last_string = False\n pieces.append(k)\n pieces.reverse()\n out.append(_clean_value(\"_\".join(pieces)) + \"=\" + _clean_value(value))\n return \",\".join(out)\n\n\ndef flatten_resolved_vars(resolved_vars):\n \"\"\"Formats the resolved variable dict into a mapping of (str -> value).\"\"\"\n flattened_resolved_vars_dict = {}\n for pieces, value in resolved_vars.items():\n if pieces[0] == \"config\":\n pieces = pieces[1:]\n pieces = [str(piece) for piece in pieces]\n flattened_resolved_vars_dict[\"/\".join(pieces)] = value\n return flattened_resolved_vars_dict\n\n\ndef _clean_value(value):\n if isinstance(value, float):\n return \"{:.5}\".format(value)\n else:\n return str(value).replace(\"/\", \"_\")\n\n\ndef _generate_variants(spec):\n spec = copy.deepcopy(spec)\n unresolved = _unresolved_values(spec)\n if not unresolved:\n yield {}, spec\n return\n\n grid_vars = []\n lambda_vars = []\n for path, value in unresolved.items():\n if isinstance(value, types.FunctionType):\n lambda_vars.append((path, value))\n else:\n grid_vars.append((path, value))\n grid_vars.sort()\n\n grid_search = _grid_search_generator(spec, grid_vars)\n for resolved_spec in grid_search:\n resolved_vars = _resolve_lambda_vars(resolved_spec, lambda_vars)\n for resolved, spec in _generate_variants(resolved_spec):\n for path, value in grid_vars:\n resolved_vars[path] = _get_value(spec, path)\n for k, v in resolved.items():\n if (k in resolved_vars and v != resolved_vars[k]\n and _is_resolved(resolved_vars[k])):\n raise ValueError(\n \"The variable `{}` could not be unambiguously \"\n \"resolved to a single value. Consider simplifying \"\n \"your configuration.\".format(k))\n resolved_vars[k] = v\n yield resolved_vars, spec\n\n\ndef _assign_value(spec, path, value):\n for k in path[:-1]:\n spec = spec[k]\n spec[path[-1]] = value\n\n\ndef _get_value(spec, path):\n for k in path:\n spec = spec[k]\n return spec\n\n\ndef _resolve_lambda_vars(spec, lambda_vars):\n resolved = {}\n error = True\n num_passes = 0\n while error and num_passes < _MAX_RESOLUTION_PASSES:\n num_passes += 1\n error = False\n for path, fn in lambda_vars:\n try:\n value = fn(_UnresolvedAccessGuard(spec))\n except RecursiveDependencyError as e:\n error = e\n except Exception:\n raise ValueError(\n \"Failed to evaluate expression: {}: {}\".format(path, fn))\n else:\n _assign_value(spec, path, value)\n resolved[path] = value\n if error:\n raise error\n return resolved\n\n\ndef _grid_search_generator(unresolved_spec, grid_vars):\n value_indices = [0] * len(grid_vars)\n\n def increment(i):\n value_indices[i] += 1\n if value_indices[i] >= len(grid_vars[i][1]):\n value_indices[i] = 0\n if i + 1 < len(value_indices):\n return increment(i + 1)\n else:\n return True\n return False\n\n if not grid_vars:\n yield unresolved_spec\n return\n\n while value_indices[-1] < len(grid_vars[-1][1]):\n spec = copy.deepcopy(unresolved_spec)\n for i, (path, values) in enumerate(grid_vars):\n _assign_value(spec, path, values[value_indices[i]])\n yield spec\n if grid_vars:\n done = increment(0)\n if done:\n break\n\n\ndef _is_resolved(v):\n resolved, _ = _try_resolve(v)\n return resolved\n\n\ndef _try_resolve(v):\n if isinstance(v, sample_from):\n # Function to sample from\n return False, v.func\n elif isinstance(v, dict) and len(v) == 1 and \"eval\" in v:\n # Lambda function in eval syntax\n return False, lambda spec: eval(\n v[\"eval\"], _STANDARD_IMPORTS, {\"spec\": spec})\n elif isinstance(v, dict) and len(v) == 1 and \"grid_search\" in v:\n # Grid search values\n grid_values = v[\"grid_search\"]\n if not isinstance(grid_values, list):\n raise TuneError(\n \"Grid search expected list of values, got: {}\".format(\n grid_values))\n return False, grid_values\n return True, v\n\n\ndef _unresolved_values(spec):\n found = {}\n for k, v in spec.items():\n resolved, v = _try_resolve(v)\n if not resolved:\n found[(k, )] = v\n elif isinstance(v, dict):\n # Recurse into a dict\n for (path, value) in _unresolved_values(v).items():\n found[(k, ) + path] = value\n elif isinstance(v, list):\n # Recurse into a list\n for i, elem in enumerate(v):\n for (path, value) in _unresolved_values({i: elem}).items():\n found[(k, ) + path] = value\n return found\n\n\nclass _UnresolvedAccessGuard(dict):\n def __init__(self, *args, **kwds):\n super(_UnresolvedAccessGuard, self).__init__(*args, **kwds)\n self.__dict__ = self\n\n def __getattribute__(self, item):\n value = dict.__getattribute__(self, item)\n if not _is_resolved(value):\n raise RecursiveDependencyError(\n \"`{}` recursively depends on {}\".format(item, value))\n elif isinstance(value, dict):\n return _UnresolvedAccessGuard(value)\n else:\n return value\n\n\nclass RecursiveDependencyError(Exception):\n def __init__(self, msg):\n Exception.__init__(self, msg)\n", "path": "python/ray/tune/suggest/variant_generator.py"}]}
3,965
165
gh_patches_debug_37155
rasdani/github-patches
git_diff
pallets__werkzeug-1712
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> set charset in shareddatamiddleware # Description UTF-8 Javascript files served by the SharedDataMiddleware do not contain a charset in the `Content-Type` response header. This leads to issues of javascript interpreted as US-ASCII by browsers (https://github.com/posativ/isso/issues/607). **Current** ``` Content-Type: application/javascript ``` **Expected** ``` Content-Type: application/javascript; charset=UTF-8 ``` The mime type is deducted by using python's mimetypes module and the filename: https://github.com/pallets/werkzeug/blob/aa9676f5cd5ab7ee08b09f38df22ef3e3c564d87/src/werkzeug/middleware/shared_data.py#L256-L257 It is sent without appending an encoding: https://github.com/pallets/werkzeug/blob/aa9676f5cd5ab7ee08b09f38df22ef3e3c564d87/src/werkzeug/middleware/shared_data.py#L281 Other usages seem to suggest that a call to `get_content_type` is necessary to append the charset, if applicable: https://github.com/pallets/werkzeug/blob/e76aac8294626e24e1075e665cbf9657b88c4301/src/werkzeug/wrappers/common_descriptors.py#L146 # Possible solutions I am not familiar with the codebase, but would it be possible to call `get_content_type`? I tried to modify it myself, but I don't know where to get the encoding from inside `SharedDataMiddleware`. My problem is solved when I hardcode 'utf-8' as charset: ```python ("Content-Type", get_content_type(mime_type, 'utf-8')) ``` </issue> <code> [start of src/werkzeug/middleware/shared_data.py] 1 """ 2 Serve Shared Static Files 3 ========================= 4 5 .. autoclass:: SharedDataMiddleware 6 :members: is_allowed 7 8 :copyright: 2007 Pallets 9 :license: BSD-3-Clause 10 """ 11 import mimetypes 12 import os 13 import pkgutil 14 import posixpath 15 from datetime import datetime 16 from io import BytesIO 17 from time import mktime 18 from time import time 19 from zlib import adler32 20 21 from .._compat import PY2 22 from .._compat import string_types 23 from ..filesystem import get_filesystem_encoding 24 from ..http import http_date 25 from ..http import is_resource_modified 26 from ..security import safe_join 27 from ..wsgi import get_path_info 28 from ..wsgi import wrap_file 29 30 31 class SharedDataMiddleware(object): 32 33 """A WSGI middleware that provides static content for development 34 environments or simple server setups. Usage is quite simple:: 35 36 import os 37 from werkzeug.wsgi import SharedDataMiddleware 38 39 app = SharedDataMiddleware(app, { 40 '/static': os.path.join(os.path.dirname(__file__), 'static') 41 }) 42 43 The contents of the folder ``./shared`` will now be available on 44 ``http://example.com/shared/``. This is pretty useful during development 45 because a standalone media server is not required. One can also mount 46 files on the root folder and still continue to use the application because 47 the shared data middleware forwards all unhandled requests to the 48 application, even if the requests are below one of the shared folders. 49 50 If `pkg_resources` is available you can also tell the middleware to serve 51 files from package data:: 52 53 app = SharedDataMiddleware(app, { 54 '/static': ('myapplication', 'static') 55 }) 56 57 This will then serve the ``static`` folder in the `myapplication` 58 Python package. 59 60 The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch` 61 rules for files that are not accessible from the web. If `cache` is set to 62 `False` no caching headers are sent. 63 64 Currently the middleware does not support non ASCII filenames. If the 65 encoding on the file system happens to be the encoding of the URI it may 66 work but this could also be by accident. We strongly suggest using ASCII 67 only file names for static files. 68 69 The middleware will guess the mimetype using the Python `mimetype` 70 module. If it's unable to figure out the charset it will fall back 71 to `fallback_mimetype`. 72 73 .. versionchanged:: 0.5 74 The cache timeout is configurable now. 75 76 .. versionadded:: 0.6 77 The `fallback_mimetype` parameter was added. 78 79 :param app: the application to wrap. If you don't want to wrap an 80 application you can pass it :exc:`NotFound`. 81 :param exports: a list or dict of exported files and folders. 82 :param disallow: a list of :func:`~fnmatch.fnmatch` rules. 83 :param fallback_mimetype: the fallback mimetype for unknown files. 84 :param cache: enable or disable caching headers. 85 :param cache_timeout: the cache timeout in seconds for the headers. 86 """ 87 88 def __init__( 89 self, 90 app, 91 exports, 92 disallow=None, 93 cache=True, 94 cache_timeout=60 * 60 * 12, 95 fallback_mimetype="text/plain", 96 ): 97 self.app = app 98 self.exports = [] 99 self.cache = cache 100 self.cache_timeout = cache_timeout 101 102 if hasattr(exports, "items"): 103 exports = exports.items() 104 105 for key, value in exports: 106 if isinstance(value, tuple): 107 loader = self.get_package_loader(*value) 108 elif isinstance(value, string_types): 109 if os.path.isfile(value): 110 loader = self.get_file_loader(value) 111 else: 112 loader = self.get_directory_loader(value) 113 else: 114 raise TypeError("unknown def %r" % value) 115 116 self.exports.append((key, loader)) 117 118 if disallow is not None: 119 from fnmatch import fnmatch 120 121 self.is_allowed = lambda x: not fnmatch(x, disallow) 122 123 self.fallback_mimetype = fallback_mimetype 124 125 def is_allowed(self, filename): 126 """Subclasses can override this method to disallow the access to 127 certain files. However by providing `disallow` in the constructor 128 this method is overwritten. 129 """ 130 return True 131 132 def _opener(self, filename): 133 return lambda: ( 134 open(filename, "rb"), 135 datetime.utcfromtimestamp(os.path.getmtime(filename)), 136 int(os.path.getsize(filename)), 137 ) 138 139 def get_file_loader(self, filename): 140 return lambda x: (os.path.basename(filename), self._opener(filename)) 141 142 def get_package_loader(self, package, package_path): 143 loadtime = datetime.utcnow() 144 provider = pkgutil.get_loader(package) 145 146 if hasattr(provider, "get_resource_reader"): 147 # Python 3 148 reader = provider.get_resource_reader(package) 149 150 def loader(path): 151 if path is None: 152 return None, None 153 154 path = safe_join(package_path, path) 155 basename = posixpath.basename(path) 156 157 try: 158 resource = reader.open_resource(path) 159 except IOError: 160 return None, None 161 162 if isinstance(resource, BytesIO): 163 return ( 164 basename, 165 lambda: (resource, loadtime, len(resource.getvalue())), 166 ) 167 168 return ( 169 basename, 170 lambda: ( 171 resource, 172 datetime.utcfromtimestamp(os.path.getmtime(resource.name)), 173 os.path.getsize(resource.name), 174 ), 175 ) 176 177 else: 178 # Python 2 179 package_filename = provider.get_filename(package) 180 is_filesystem = os.path.exists(package_filename) 181 root = os.path.join(os.path.dirname(package_filename), package_path) 182 183 def loader(path): 184 if path is None: 185 return None, None 186 187 path = safe_join(root, path) 188 basename = posixpath.basename(path) 189 190 if is_filesystem: 191 if not os.path.isfile(path): 192 return None, None 193 194 return basename, self._opener(path) 195 196 try: 197 data = provider.get_data(path) 198 except IOError: 199 return None, None 200 201 return basename, lambda: (BytesIO(data), loadtime, len(data)) 202 203 return loader 204 205 def get_directory_loader(self, directory): 206 def loader(path): 207 if path is not None: 208 path = safe_join(directory, path) 209 else: 210 path = directory 211 212 if os.path.isfile(path): 213 return os.path.basename(path), self._opener(path) 214 215 return None, None 216 217 return loader 218 219 def generate_etag(self, mtime, file_size, real_filename): 220 if not isinstance(real_filename, bytes): 221 real_filename = real_filename.encode(get_filesystem_encoding()) 222 223 return "wzsdm-%d-%s-%s" % ( 224 mktime(mtime.timetuple()), 225 file_size, 226 adler32(real_filename) & 0xFFFFFFFF, 227 ) 228 229 def __call__(self, environ, start_response): 230 path = get_path_info(environ) 231 232 if PY2: 233 path = path.encode(get_filesystem_encoding()) 234 235 file_loader = None 236 237 for search_path, loader in self.exports: 238 if search_path == path: 239 real_filename, file_loader = loader(None) 240 241 if file_loader is not None: 242 break 243 244 if not search_path.endswith("/"): 245 search_path += "/" 246 247 if path.startswith(search_path): 248 real_filename, file_loader = loader(path[len(search_path) :]) 249 250 if file_loader is not None: 251 break 252 253 if file_loader is None or not self.is_allowed(real_filename): 254 return self.app(environ, start_response) 255 256 guessed_type = mimetypes.guess_type(real_filename) 257 mime_type = guessed_type[0] or self.fallback_mimetype 258 f, mtime, file_size = file_loader() 259 260 headers = [("Date", http_date())] 261 262 if self.cache: 263 timeout = self.cache_timeout 264 etag = self.generate_etag(mtime, file_size, real_filename) 265 headers += [ 266 ("Etag", '"%s"' % etag), 267 ("Cache-Control", "max-age=%d, public" % timeout), 268 ] 269 270 if not is_resource_modified(environ, etag, last_modified=mtime): 271 f.close() 272 start_response("304 Not Modified", headers) 273 return [] 274 275 headers.append(("Expires", http_date(time() + timeout))) 276 else: 277 headers.append(("Cache-Control", "public")) 278 279 headers.extend( 280 ( 281 ("Content-Type", mime_type), 282 ("Content-Length", str(file_size)), 283 ("Last-Modified", http_date(mtime)), 284 ) 285 ) 286 start_response("200 OK", headers) 287 return wrap_file(environ, f) 288 [end of src/werkzeug/middleware/shared_data.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/werkzeug/middleware/shared_data.py b/src/werkzeug/middleware/shared_data.py --- a/src/werkzeug/middleware/shared_data.py +++ b/src/werkzeug/middleware/shared_data.py @@ -24,6 +24,7 @@ from ..http import http_date from ..http import is_resource_modified from ..security import safe_join +from ..utils import get_content_type from ..wsgi import get_path_info from ..wsgi import wrap_file @@ -70,19 +71,24 @@ module. If it's unable to figure out the charset it will fall back to `fallback_mimetype`. - .. versionchanged:: 0.5 - The cache timeout is configurable now. - - .. versionadded:: 0.6 - The `fallback_mimetype` parameter was added. - :param app: the application to wrap. If you don't want to wrap an application you can pass it :exc:`NotFound`. :param exports: a list or dict of exported files and folders. :param disallow: a list of :func:`~fnmatch.fnmatch` rules. - :param fallback_mimetype: the fallback mimetype for unknown files. :param cache: enable or disable caching headers. :param cache_timeout: the cache timeout in seconds for the headers. + :param fallback_mimetype: The fallback mimetype for unknown files. + + .. versionchanged:: 1.0 + The default ``fallback_mimetype`` is + ``application/octet-stream``. If a filename looks like a text + mimetype, the ``utf-8`` charset is added to it. + + .. versionadded:: 0.6 + Added ``fallback_mimetype``. + + .. versionchanged:: 0.5 + Added ``cache_timeout``. """ def __init__( @@ -92,7 +98,7 @@ disallow=None, cache=True, cache_timeout=60 * 60 * 12, - fallback_mimetype="text/plain", + fallback_mimetype="application/octet-stream", ): self.app = app self.exports = [] @@ -254,7 +260,7 @@ return self.app(environ, start_response) guessed_type = mimetypes.guess_type(real_filename) - mime_type = guessed_type[0] or self.fallback_mimetype + mime_type = get_content_type(guessed_type[0] or self.fallback_mimetype, "utf-8") f, mtime, file_size = file_loader() headers = [("Date", http_date())]
{"golden_diff": "diff --git a/src/werkzeug/middleware/shared_data.py b/src/werkzeug/middleware/shared_data.py\n--- a/src/werkzeug/middleware/shared_data.py\n+++ b/src/werkzeug/middleware/shared_data.py\n@@ -24,6 +24,7 @@\n from ..http import http_date\n from ..http import is_resource_modified\n from ..security import safe_join\n+from ..utils import get_content_type\n from ..wsgi import get_path_info\n from ..wsgi import wrap_file\n \n@@ -70,19 +71,24 @@\n module. If it's unable to figure out the charset it will fall back\n to `fallback_mimetype`.\n \n- .. versionchanged:: 0.5\n- The cache timeout is configurable now.\n-\n- .. versionadded:: 0.6\n- The `fallback_mimetype` parameter was added.\n-\n :param app: the application to wrap. If you don't want to wrap an\n application you can pass it :exc:`NotFound`.\n :param exports: a list or dict of exported files and folders.\n :param disallow: a list of :func:`~fnmatch.fnmatch` rules.\n- :param fallback_mimetype: the fallback mimetype for unknown files.\n :param cache: enable or disable caching headers.\n :param cache_timeout: the cache timeout in seconds for the headers.\n+ :param fallback_mimetype: The fallback mimetype for unknown files.\n+\n+ .. versionchanged:: 1.0\n+ The default ``fallback_mimetype`` is\n+ ``application/octet-stream``. If a filename looks like a text\n+ mimetype, the ``utf-8`` charset is added to it.\n+\n+ .. versionadded:: 0.6\n+ Added ``fallback_mimetype``.\n+\n+ .. versionchanged:: 0.5\n+ Added ``cache_timeout``.\n \"\"\"\n \n def __init__(\n@@ -92,7 +98,7 @@\n disallow=None,\n cache=True,\n cache_timeout=60 * 60 * 12,\n- fallback_mimetype=\"text/plain\",\n+ fallback_mimetype=\"application/octet-stream\",\n ):\n self.app = app\n self.exports = []\n@@ -254,7 +260,7 @@\n return self.app(environ, start_response)\n \n guessed_type = mimetypes.guess_type(real_filename)\n- mime_type = guessed_type[0] or self.fallback_mimetype\n+ mime_type = get_content_type(guessed_type[0] or self.fallback_mimetype, \"utf-8\")\n f, mtime, file_size = file_loader()\n \n headers = [(\"Date\", http_date())]\n", "issue": "set charset in shareddatamiddleware\n# Description\r\nUTF-8 Javascript files served by the SharedDataMiddleware do not contain a charset in the `Content-Type` response header. This leads to issues of javascript interpreted as US-ASCII by browsers (https://github.com/posativ/isso/issues/607).\r\n\r\n**Current**\r\n```\r\nContent-Type: application/javascript\r\n```\r\n**Expected**\r\n```\r\nContent-Type: application/javascript; charset=UTF-8\r\n```\r\n\r\nThe mime type is deducted by using python's mimetypes module and the filename:\r\n\r\nhttps://github.com/pallets/werkzeug/blob/aa9676f5cd5ab7ee08b09f38df22ef3e3c564d87/src/werkzeug/middleware/shared_data.py#L256-L257\r\n\r\nIt is sent without appending an encoding:\r\n\r\nhttps://github.com/pallets/werkzeug/blob/aa9676f5cd5ab7ee08b09f38df22ef3e3c564d87/src/werkzeug/middleware/shared_data.py#L281\r\n\r\nOther usages seem to suggest that a call to `get_content_type` is necessary to append the charset, if applicable:\r\n\r\nhttps://github.com/pallets/werkzeug/blob/e76aac8294626e24e1075e665cbf9657b88c4301/src/werkzeug/wrappers/common_descriptors.py#L146\r\n\r\n# Possible solutions\r\nI am not familiar with the codebase, but would it be possible to call `get_content_type`? I tried to modify it myself, but I don't know where to get the encoding from inside `SharedDataMiddleware`.\r\n\r\nMy problem is solved when I hardcode 'utf-8' as charset:\r\n```python\r\n(\"Content-Type\", get_content_type(mime_type, 'utf-8'))\r\n```\n", "before_files": [{"content": "\"\"\"\nServe Shared Static Files\n=========================\n\n.. autoclass:: SharedDataMiddleware\n :members: is_allowed\n\n:copyright: 2007 Pallets\n:license: BSD-3-Clause\n\"\"\"\nimport mimetypes\nimport os\nimport pkgutil\nimport posixpath\nfrom datetime import datetime\nfrom io import BytesIO\nfrom time import mktime\nfrom time import time\nfrom zlib import adler32\n\nfrom .._compat import PY2\nfrom .._compat import string_types\nfrom ..filesystem import get_filesystem_encoding\nfrom ..http import http_date\nfrom ..http import is_resource_modified\nfrom ..security import safe_join\nfrom ..wsgi import get_path_info\nfrom ..wsgi import wrap_file\n\n\nclass SharedDataMiddleware(object):\n\n \"\"\"A WSGI middleware that provides static content for development\n environments or simple server setups. Usage is quite simple::\n\n import os\n from werkzeug.wsgi import SharedDataMiddleware\n\n app = SharedDataMiddleware(app, {\n '/static': os.path.join(os.path.dirname(__file__), 'static')\n })\n\n The contents of the folder ``./shared`` will now be available on\n ``http://example.com/shared/``. This is pretty useful during development\n because a standalone media server is not required. One can also mount\n files on the root folder and still continue to use the application because\n the shared data middleware forwards all unhandled requests to the\n application, even if the requests are below one of the shared folders.\n\n If `pkg_resources` is available you can also tell the middleware to serve\n files from package data::\n\n app = SharedDataMiddleware(app, {\n '/static': ('myapplication', 'static')\n })\n\n This will then serve the ``static`` folder in the `myapplication`\n Python package.\n\n The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`\n rules for files that are not accessible from the web. If `cache` is set to\n `False` no caching headers are sent.\n\n Currently the middleware does not support non ASCII filenames. If the\n encoding on the file system happens to be the encoding of the URI it may\n work but this could also be by accident. We strongly suggest using ASCII\n only file names for static files.\n\n The middleware will guess the mimetype using the Python `mimetype`\n module. If it's unable to figure out the charset it will fall back\n to `fallback_mimetype`.\n\n .. versionchanged:: 0.5\n The cache timeout is configurable now.\n\n .. versionadded:: 0.6\n The `fallback_mimetype` parameter was added.\n\n :param app: the application to wrap. If you don't want to wrap an\n application you can pass it :exc:`NotFound`.\n :param exports: a list or dict of exported files and folders.\n :param disallow: a list of :func:`~fnmatch.fnmatch` rules.\n :param fallback_mimetype: the fallback mimetype for unknown files.\n :param cache: enable or disable caching headers.\n :param cache_timeout: the cache timeout in seconds for the headers.\n \"\"\"\n\n def __init__(\n self,\n app,\n exports,\n disallow=None,\n cache=True,\n cache_timeout=60 * 60 * 12,\n fallback_mimetype=\"text/plain\",\n ):\n self.app = app\n self.exports = []\n self.cache = cache\n self.cache_timeout = cache_timeout\n\n if hasattr(exports, \"items\"):\n exports = exports.items()\n\n for key, value in exports:\n if isinstance(value, tuple):\n loader = self.get_package_loader(*value)\n elif isinstance(value, string_types):\n if os.path.isfile(value):\n loader = self.get_file_loader(value)\n else:\n loader = self.get_directory_loader(value)\n else:\n raise TypeError(\"unknown def %r\" % value)\n\n self.exports.append((key, loader))\n\n if disallow is not None:\n from fnmatch import fnmatch\n\n self.is_allowed = lambda x: not fnmatch(x, disallow)\n\n self.fallback_mimetype = fallback_mimetype\n\n def is_allowed(self, filename):\n \"\"\"Subclasses can override this method to disallow the access to\n certain files. However by providing `disallow` in the constructor\n this method is overwritten.\n \"\"\"\n return True\n\n def _opener(self, filename):\n return lambda: (\n open(filename, \"rb\"),\n datetime.utcfromtimestamp(os.path.getmtime(filename)),\n int(os.path.getsize(filename)),\n )\n\n def get_file_loader(self, filename):\n return lambda x: (os.path.basename(filename), self._opener(filename))\n\n def get_package_loader(self, package, package_path):\n loadtime = datetime.utcnow()\n provider = pkgutil.get_loader(package)\n\n if hasattr(provider, \"get_resource_reader\"):\n # Python 3\n reader = provider.get_resource_reader(package)\n\n def loader(path):\n if path is None:\n return None, None\n\n path = safe_join(package_path, path)\n basename = posixpath.basename(path)\n\n try:\n resource = reader.open_resource(path)\n except IOError:\n return None, None\n\n if isinstance(resource, BytesIO):\n return (\n basename,\n lambda: (resource, loadtime, len(resource.getvalue())),\n )\n\n return (\n basename,\n lambda: (\n resource,\n datetime.utcfromtimestamp(os.path.getmtime(resource.name)),\n os.path.getsize(resource.name),\n ),\n )\n\n else:\n # Python 2\n package_filename = provider.get_filename(package)\n is_filesystem = os.path.exists(package_filename)\n root = os.path.join(os.path.dirname(package_filename), package_path)\n\n def loader(path):\n if path is None:\n return None, None\n\n path = safe_join(root, path)\n basename = posixpath.basename(path)\n\n if is_filesystem:\n if not os.path.isfile(path):\n return None, None\n\n return basename, self._opener(path)\n\n try:\n data = provider.get_data(path)\n except IOError:\n return None, None\n\n return basename, lambda: (BytesIO(data), loadtime, len(data))\n\n return loader\n\n def get_directory_loader(self, directory):\n def loader(path):\n if path is not None:\n path = safe_join(directory, path)\n else:\n path = directory\n\n if os.path.isfile(path):\n return os.path.basename(path), self._opener(path)\n\n return None, None\n\n return loader\n\n def generate_etag(self, mtime, file_size, real_filename):\n if not isinstance(real_filename, bytes):\n real_filename = real_filename.encode(get_filesystem_encoding())\n\n return \"wzsdm-%d-%s-%s\" % (\n mktime(mtime.timetuple()),\n file_size,\n adler32(real_filename) & 0xFFFFFFFF,\n )\n\n def __call__(self, environ, start_response):\n path = get_path_info(environ)\n\n if PY2:\n path = path.encode(get_filesystem_encoding())\n\n file_loader = None\n\n for search_path, loader in self.exports:\n if search_path == path:\n real_filename, file_loader = loader(None)\n\n if file_loader is not None:\n break\n\n if not search_path.endswith(\"/\"):\n search_path += \"/\"\n\n if path.startswith(search_path):\n real_filename, file_loader = loader(path[len(search_path) :])\n\n if file_loader is not None:\n break\n\n if file_loader is None or not self.is_allowed(real_filename):\n return self.app(environ, start_response)\n\n guessed_type = mimetypes.guess_type(real_filename)\n mime_type = guessed_type[0] or self.fallback_mimetype\n f, mtime, file_size = file_loader()\n\n headers = [(\"Date\", http_date())]\n\n if self.cache:\n timeout = self.cache_timeout\n etag = self.generate_etag(mtime, file_size, real_filename)\n headers += [\n (\"Etag\", '\"%s\"' % etag),\n (\"Cache-Control\", \"max-age=%d, public\" % timeout),\n ]\n\n if not is_resource_modified(environ, etag, last_modified=mtime):\n f.close()\n start_response(\"304 Not Modified\", headers)\n return []\n\n headers.append((\"Expires\", http_date(time() + timeout)))\n else:\n headers.append((\"Cache-Control\", \"public\"))\n\n headers.extend(\n (\n (\"Content-Type\", mime_type),\n (\"Content-Length\", str(file_size)),\n (\"Last-Modified\", http_date(mtime)),\n )\n )\n start_response(\"200 OK\", headers)\n return wrap_file(environ, f)\n", "path": "src/werkzeug/middleware/shared_data.py"}]}
3,659
590
gh_patches_debug_38425
rasdani/github-patches
git_diff
python-pillow__Pillow-5258
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PyQt6 Support Hi everyone!. I'm updating my app to PyQt6 and found a problem. ImageQt.ImageQt class raises an error when try to use with QPainter: ```pycon >>> page_image = ImageQt.ImageQt(page) >>> painter = QtGui.QPainter() >>> painter.begin(page_image) TypeError: begin(self, QPaintDevice): argument 1 has unexpected type 'ImageQt' ``` Regards Python 3.8.6 Ubuntu 20.04 PyQt6 6.0.1 Pillow 8.1.0 </issue> <code> [start of src/PIL/ImageQt.py] 1 # 2 # The Python Imaging Library. 3 # $Id$ 4 # 5 # a simple Qt image interface. 6 # 7 # history: 8 # 2006-06-03 fl: created 9 # 2006-06-04 fl: inherit from QImage instead of wrapping it 10 # 2006-06-05 fl: removed toimage helper; move string support to ImageQt 11 # 2013-11-13 fl: add support for Qt5 ([email protected]) 12 # 13 # Copyright (c) 2006 by Secret Labs AB 14 # Copyright (c) 2006 by Fredrik Lundh 15 # 16 # See the README file for information on usage and redistribution. 17 # 18 19 import sys 20 from io import BytesIO 21 22 from . import Image 23 from ._util import isPath 24 25 qt_versions = [ 26 ["side6", "PySide6"], 27 ["5", "PyQt5"], 28 ["side2", "PySide2"], 29 ] 30 31 # If a version has already been imported, attempt it first 32 qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True) 33 for qt_version, qt_module in qt_versions: 34 try: 35 if qt_module == "PySide6": 36 from PySide6.QtCore import QBuffer, QIODevice 37 from PySide6.QtGui import QImage, QPixmap, qRgba 38 elif qt_module == "PyQt5": 39 from PyQt5.QtCore import QBuffer, QIODevice 40 from PyQt5.QtGui import QImage, QPixmap, qRgba 41 elif qt_module == "PySide2": 42 from PySide2.QtCore import QBuffer, QIODevice 43 from PySide2.QtGui import QImage, QPixmap, qRgba 44 except (ImportError, RuntimeError): 45 continue 46 qt_is_installed = True 47 break 48 else: 49 qt_is_installed = False 50 qt_version = None 51 52 53 def rgb(r, g, b, a=255): 54 """(Internal) Turns an RGB color into a Qt compatible color integer.""" 55 # use qRgb to pack the colors, and then turn the resulting long 56 # into a negative integer with the same bitpattern. 57 return qRgba(r, g, b, a) & 0xFFFFFFFF 58 59 60 def fromqimage(im): 61 """ 62 :param im: A PIL Image object, or a file name 63 (given either as Python string or a PyQt string object) 64 """ 65 buffer = QBuffer() 66 buffer.open(QIODevice.ReadWrite) 67 # preserve alpha channel with png 68 # otherwise ppm is more friendly with Image.open 69 if im.hasAlphaChannel(): 70 im.save(buffer, "png") 71 else: 72 im.save(buffer, "ppm") 73 74 b = BytesIO() 75 b.write(buffer.data()) 76 buffer.close() 77 b.seek(0) 78 79 return Image.open(b) 80 81 82 def fromqpixmap(im): 83 return fromqimage(im) 84 # buffer = QBuffer() 85 # buffer.open(QIODevice.ReadWrite) 86 # # im.save(buffer) 87 # # What if png doesn't support some image features like animation? 88 # im.save(buffer, 'ppm') 89 # bytes_io = BytesIO() 90 # bytes_io.write(buffer.data()) 91 # buffer.close() 92 # bytes_io.seek(0) 93 # return Image.open(bytes_io) 94 95 96 def align8to32(bytes, width, mode): 97 """ 98 converts each scanline of data from 8 bit to 32 bit aligned 99 """ 100 101 bits_per_pixel = {"1": 1, "L": 8, "P": 8}[mode] 102 103 # calculate bytes per line and the extra padding if needed 104 bits_per_line = bits_per_pixel * width 105 full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8) 106 bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0) 107 108 extra_padding = -bytes_per_line % 4 109 110 # already 32 bit aligned by luck 111 if not extra_padding: 112 return bytes 113 114 new_data = [] 115 for i in range(len(bytes) // bytes_per_line): 116 new_data.append( 117 bytes[i * bytes_per_line : (i + 1) * bytes_per_line] 118 + b"\x00" * extra_padding 119 ) 120 121 return b"".join(new_data) 122 123 124 def _toqclass_helper(im): 125 data = None 126 colortable = None 127 128 # handle filename, if given instead of image name 129 if hasattr(im, "toUtf8"): 130 # FIXME - is this really the best way to do this? 131 im = str(im.toUtf8(), "utf-8") 132 if isPath(im): 133 im = Image.open(im) 134 135 if im.mode == "1": 136 format = QImage.Format_Mono 137 elif im.mode == "L": 138 format = QImage.Format_Indexed8 139 colortable = [] 140 for i in range(256): 141 colortable.append(rgb(i, i, i)) 142 elif im.mode == "P": 143 format = QImage.Format_Indexed8 144 colortable = [] 145 palette = im.getpalette() 146 for i in range(0, len(palette), 3): 147 colortable.append(rgb(*palette[i : i + 3])) 148 elif im.mode == "RGB": 149 data = im.tobytes("raw", "BGRX") 150 format = QImage.Format_RGB32 151 elif im.mode == "RGBA": 152 data = im.tobytes("raw", "BGRA") 153 format = QImage.Format_ARGB32 154 else: 155 raise ValueError(f"unsupported image mode {repr(im.mode)}") 156 157 __data = data or align8to32(im.tobytes(), im.size[0], im.mode) 158 return {"data": __data, "im": im, "format": format, "colortable": colortable} 159 160 161 if qt_is_installed: 162 163 class ImageQt(QImage): 164 def __init__(self, im): 165 """ 166 An PIL image wrapper for Qt. This is a subclass of PyQt's QImage 167 class. 168 169 :param im: A PIL Image object, or a file name (given either as 170 Python string or a PyQt string object). 171 """ 172 im_data = _toqclass_helper(im) 173 # must keep a reference, or Qt will crash! 174 # All QImage constructors that take data operate on an existing 175 # buffer, so this buffer has to hang on for the life of the image. 176 # Fixes https://github.com/python-pillow/Pillow/issues/1370 177 self.__data = im_data["data"] 178 super().__init__( 179 self.__data, 180 im_data["im"].size[0], 181 im_data["im"].size[1], 182 im_data["format"], 183 ) 184 if im_data["colortable"]: 185 self.setColorTable(im_data["colortable"]) 186 187 188 def toqimage(im): 189 return ImageQt(im) 190 191 192 def toqpixmap(im): 193 # # This doesn't work. For now using a dumb approach. 194 # im_data = _toqclass_helper(im) 195 # result = QPixmap(im_data['im'].size[0], im_data['im'].size[1]) 196 # result.loadFromData(im_data['data']) 197 # Fix some strange bug that causes 198 if im.mode == "RGB": 199 im = im.convert("RGBA") 200 201 qimage = toqimage(im) 202 return QPixmap.fromImage(qimage) 203 [end of src/PIL/ImageQt.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/PIL/ImageQt.py b/src/PIL/ImageQt.py --- a/src/PIL/ImageQt.py +++ b/src/PIL/ImageQt.py @@ -23,6 +23,7 @@ from ._util import isPath qt_versions = [ + ["6", "PyQt6"], ["side6", "PySide6"], ["5", "PyQt5"], ["side2", "PySide2"], @@ -32,7 +33,10 @@ qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True) for qt_version, qt_module in qt_versions: try: - if qt_module == "PySide6": + if qt_module == "PyQt6": + from PyQt6.QtCore import QBuffer, QIODevice + from PyQt6.QtGui import QImage, QPixmap, qRgba + elif qt_module == "PySide6": from PySide6.QtCore import QBuffer, QIODevice from PySide6.QtGui import QImage, QPixmap, qRgba elif qt_module == "PyQt5": @@ -63,7 +67,8 @@ (given either as Python string or a PyQt string object) """ buffer = QBuffer() - buffer.open(QIODevice.ReadWrite) + qt_openmode = QIODevice.OpenMode if qt_version == "6" else QIODevice + buffer.open(qt_openmode.ReadWrite) # preserve alpha channel with png # otherwise ppm is more friendly with Image.open if im.hasAlphaChannel(): @@ -132,25 +137,26 @@ if isPath(im): im = Image.open(im) + qt_format = QImage.Format if qt_version == "6" else QImage if im.mode == "1": - format = QImage.Format_Mono + format = qt_format.Format_Mono elif im.mode == "L": - format = QImage.Format_Indexed8 + format = qt_format.Format_Indexed8 colortable = [] for i in range(256): colortable.append(rgb(i, i, i)) elif im.mode == "P": - format = QImage.Format_Indexed8 + format = qt_format.Format_Indexed8 colortable = [] palette = im.getpalette() for i in range(0, len(palette), 3): colortable.append(rgb(*palette[i : i + 3])) elif im.mode == "RGB": data = im.tobytes("raw", "BGRX") - format = QImage.Format_RGB32 + format = qt_format.Format_RGB32 elif im.mode == "RGBA": data = im.tobytes("raw", "BGRA") - format = QImage.Format_ARGB32 + format = qt_format.Format_ARGB32 else: raise ValueError(f"unsupported image mode {repr(im.mode)}")
{"golden_diff": "diff --git a/src/PIL/ImageQt.py b/src/PIL/ImageQt.py\n--- a/src/PIL/ImageQt.py\n+++ b/src/PIL/ImageQt.py\n@@ -23,6 +23,7 @@\n from ._util import isPath\n \n qt_versions = [\n+ [\"6\", \"PyQt6\"],\n [\"side6\", \"PySide6\"],\n [\"5\", \"PyQt5\"],\n [\"side2\", \"PySide2\"],\n@@ -32,7 +33,10 @@\n qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True)\n for qt_version, qt_module in qt_versions:\n try:\n- if qt_module == \"PySide6\":\n+ if qt_module == \"PyQt6\":\n+ from PyQt6.QtCore import QBuffer, QIODevice\n+ from PyQt6.QtGui import QImage, QPixmap, qRgba\n+ elif qt_module == \"PySide6\":\n from PySide6.QtCore import QBuffer, QIODevice\n from PySide6.QtGui import QImage, QPixmap, qRgba\n elif qt_module == \"PyQt5\":\n@@ -63,7 +67,8 @@\n (given either as Python string or a PyQt string object)\n \"\"\"\n buffer = QBuffer()\n- buffer.open(QIODevice.ReadWrite)\n+ qt_openmode = QIODevice.OpenMode if qt_version == \"6\" else QIODevice\n+ buffer.open(qt_openmode.ReadWrite)\n # preserve alpha channel with png\n # otherwise ppm is more friendly with Image.open\n if im.hasAlphaChannel():\n@@ -132,25 +137,26 @@\n if isPath(im):\n im = Image.open(im)\n \n+ qt_format = QImage.Format if qt_version == \"6\" else QImage\n if im.mode == \"1\":\n- format = QImage.Format_Mono\n+ format = qt_format.Format_Mono\n elif im.mode == \"L\":\n- format = QImage.Format_Indexed8\n+ format = qt_format.Format_Indexed8\n colortable = []\n for i in range(256):\n colortable.append(rgb(i, i, i))\n elif im.mode == \"P\":\n- format = QImage.Format_Indexed8\n+ format = qt_format.Format_Indexed8\n colortable = []\n palette = im.getpalette()\n for i in range(0, len(palette), 3):\n colortable.append(rgb(*palette[i : i + 3]))\n elif im.mode == \"RGB\":\n data = im.tobytes(\"raw\", \"BGRX\")\n- format = QImage.Format_RGB32\n+ format = qt_format.Format_RGB32\n elif im.mode == \"RGBA\":\n data = im.tobytes(\"raw\", \"BGRA\")\n- format = QImage.Format_ARGB32\n+ format = qt_format.Format_ARGB32\n else:\n raise ValueError(f\"unsupported image mode {repr(im.mode)}\")\n", "issue": "PyQt6 Support\nHi everyone!. I'm updating my app to PyQt6 and found a problem.\r\n\r\nImageQt.ImageQt class raises an error when try to use with QPainter:\r\n\r\n```pycon\r\n>>> page_image = ImageQt.ImageQt(page)\r\n>>> painter = QtGui.QPainter()\r\n>>> painter.begin(page_image)\r\nTypeError: begin(self, QPaintDevice): argument 1 has unexpected type 'ImageQt'\r\n```\r\n\r\nRegards\r\n\r\nPython 3.8.6\r\nUbuntu 20.04\r\nPyQt6 6.0.1\r\nPillow 8.1.0\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# a simple Qt image interface.\n#\n# history:\n# 2006-06-03 fl: created\n# 2006-06-04 fl: inherit from QImage instead of wrapping it\n# 2006-06-05 fl: removed toimage helper; move string support to ImageQt\n# 2013-11-13 fl: add support for Qt5 ([email protected])\n#\n# Copyright (c) 2006 by Secret Labs AB\n# Copyright (c) 2006 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport sys\nfrom io import BytesIO\n\nfrom . import Image\nfrom ._util import isPath\n\nqt_versions = [\n [\"side6\", \"PySide6\"],\n [\"5\", \"PyQt5\"],\n [\"side2\", \"PySide2\"],\n]\n\n# If a version has already been imported, attempt it first\nqt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True)\nfor qt_version, qt_module in qt_versions:\n try:\n if qt_module == \"PySide6\":\n from PySide6.QtCore import QBuffer, QIODevice\n from PySide6.QtGui import QImage, QPixmap, qRgba\n elif qt_module == \"PyQt5\":\n from PyQt5.QtCore import QBuffer, QIODevice\n from PyQt5.QtGui import QImage, QPixmap, qRgba\n elif qt_module == \"PySide2\":\n from PySide2.QtCore import QBuffer, QIODevice\n from PySide2.QtGui import QImage, QPixmap, qRgba\n except (ImportError, RuntimeError):\n continue\n qt_is_installed = True\n break\nelse:\n qt_is_installed = False\n qt_version = None\n\n\ndef rgb(r, g, b, a=255):\n \"\"\"(Internal) Turns an RGB color into a Qt compatible color integer.\"\"\"\n # use qRgb to pack the colors, and then turn the resulting long\n # into a negative integer with the same bitpattern.\n return qRgba(r, g, b, a) & 0xFFFFFFFF\n\n\ndef fromqimage(im):\n \"\"\"\n :param im: A PIL Image object, or a file name\n (given either as Python string or a PyQt string object)\n \"\"\"\n buffer = QBuffer()\n buffer.open(QIODevice.ReadWrite)\n # preserve alpha channel with png\n # otherwise ppm is more friendly with Image.open\n if im.hasAlphaChannel():\n im.save(buffer, \"png\")\n else:\n im.save(buffer, \"ppm\")\n\n b = BytesIO()\n b.write(buffer.data())\n buffer.close()\n b.seek(0)\n\n return Image.open(b)\n\n\ndef fromqpixmap(im):\n return fromqimage(im)\n # buffer = QBuffer()\n # buffer.open(QIODevice.ReadWrite)\n # # im.save(buffer)\n # # What if png doesn't support some image features like animation?\n # im.save(buffer, 'ppm')\n # bytes_io = BytesIO()\n # bytes_io.write(buffer.data())\n # buffer.close()\n # bytes_io.seek(0)\n # return Image.open(bytes_io)\n\n\ndef align8to32(bytes, width, mode):\n \"\"\"\n converts each scanline of data from 8 bit to 32 bit aligned\n \"\"\"\n\n bits_per_pixel = {\"1\": 1, \"L\": 8, \"P\": 8}[mode]\n\n # calculate bytes per line and the extra padding if needed\n bits_per_line = bits_per_pixel * width\n full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)\n bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)\n\n extra_padding = -bytes_per_line % 4\n\n # already 32 bit aligned by luck\n if not extra_padding:\n return bytes\n\n new_data = []\n for i in range(len(bytes) // bytes_per_line):\n new_data.append(\n bytes[i * bytes_per_line : (i + 1) * bytes_per_line]\n + b\"\\x00\" * extra_padding\n )\n\n return b\"\".join(new_data)\n\n\ndef _toqclass_helper(im):\n data = None\n colortable = None\n\n # handle filename, if given instead of image name\n if hasattr(im, \"toUtf8\"):\n # FIXME - is this really the best way to do this?\n im = str(im.toUtf8(), \"utf-8\")\n if isPath(im):\n im = Image.open(im)\n\n if im.mode == \"1\":\n format = QImage.Format_Mono\n elif im.mode == \"L\":\n format = QImage.Format_Indexed8\n colortable = []\n for i in range(256):\n colortable.append(rgb(i, i, i))\n elif im.mode == \"P\":\n format = QImage.Format_Indexed8\n colortable = []\n palette = im.getpalette()\n for i in range(0, len(palette), 3):\n colortable.append(rgb(*palette[i : i + 3]))\n elif im.mode == \"RGB\":\n data = im.tobytes(\"raw\", \"BGRX\")\n format = QImage.Format_RGB32\n elif im.mode == \"RGBA\":\n data = im.tobytes(\"raw\", \"BGRA\")\n format = QImage.Format_ARGB32\n else:\n raise ValueError(f\"unsupported image mode {repr(im.mode)}\")\n\n __data = data or align8to32(im.tobytes(), im.size[0], im.mode)\n return {\"data\": __data, \"im\": im, \"format\": format, \"colortable\": colortable}\n\n\nif qt_is_installed:\n\n class ImageQt(QImage):\n def __init__(self, im):\n \"\"\"\n An PIL image wrapper for Qt. This is a subclass of PyQt's QImage\n class.\n\n :param im: A PIL Image object, or a file name (given either as\n Python string or a PyQt string object).\n \"\"\"\n im_data = _toqclass_helper(im)\n # must keep a reference, or Qt will crash!\n # All QImage constructors that take data operate on an existing\n # buffer, so this buffer has to hang on for the life of the image.\n # Fixes https://github.com/python-pillow/Pillow/issues/1370\n self.__data = im_data[\"data\"]\n super().__init__(\n self.__data,\n im_data[\"im\"].size[0],\n im_data[\"im\"].size[1],\n im_data[\"format\"],\n )\n if im_data[\"colortable\"]:\n self.setColorTable(im_data[\"colortable\"])\n\n\ndef toqimage(im):\n return ImageQt(im)\n\n\ndef toqpixmap(im):\n # # This doesn't work. For now using a dumb approach.\n # im_data = _toqclass_helper(im)\n # result = QPixmap(im_data['im'].size[0], im_data['im'].size[1])\n # result.loadFromData(im_data['data'])\n # Fix some strange bug that causes\n if im.mode == \"RGB\":\n im = im.convert(\"RGBA\")\n\n qimage = toqimage(im)\n return QPixmap.fromImage(qimage)\n", "path": "src/PIL/ImageQt.py"}]}
2,806
656
gh_patches_debug_34003
rasdani/github-patches
git_diff
UTNkar__moore-191
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> List of Superusers <!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] --> ### Description Making it possible to sort after lvel under /admin/users/ so admins shows up and you get a proper overview. Or: Make a list of users with superuser access to the website ### Steps to Reproduce 1. [First Step] 2. [Second Step] 3. [and so on...] <!-- Please select the appropriate "topic category"/blue and "issue type"/yellow label --> </issue> <code> [start of src/members/wagtail_hooks.py] 1 from django.forms import CheckboxSelectMultiple 2 from wagtail.contrib.modeladmin.options import ModelAdmin, ModelAdminGroup, \ 3 modeladmin_register 4 from wagtail.contrib.modeladmin.views import EditView, CreateView 5 from wagtail.wagtailadmin.edit_handlers import TabbedInterface, ObjectList, \ 6 FieldPanel 7 8 from members.models import StudyProgram, Section 9 from django.utils.translation import ugettext_lazy as _ 10 11 12 class StudyProgramEditHandler: 13 def get_edit_handler_class(self): 14 edit_handler = TabbedInterface([ 15 ObjectList([ 16 FieldPanel('name_en'), 17 FieldPanel('name_sv'), 18 FieldPanel('degree'), 19 ], heading=_('General'), 20 ), 21 # TODO: http://stackoverflow.com/questions/43188124/ 22 # ObjectList([ 23 # FieldPanel('sections', widget=CheckboxSelectMultiple), 24 # ], heading=_('Sections'), 25 # ), 26 ]) 27 return edit_handler.bind_to_model(self.model) 28 29 30 class StudyProgramEditView(StudyProgramEditHandler, EditView): 31 pass 32 33 34 class StudyProgramCreateView(StudyProgramEditHandler, CreateView): 35 pass 36 37 38 class StudyProgramAdmin(ModelAdmin): 39 model = StudyProgram 40 menu_label = _('Study Program') 41 menu_icon = 'fa-graduation-cap' 42 menu_order = 510 43 add_to_settings_menu = False 44 create_view_class = StudyProgramCreateView 45 edit_view_class = StudyProgramEditView 46 list_display = ('degree', 'name_en', 'name_sv') 47 search_fields = ('name_en', 'name_sv') 48 49 50 class SectionEditHandler: 51 def get_edit_handler_class(self): 52 edit_handler = TabbedInterface([ 53 ObjectList([ 54 FieldPanel('name_en'), 55 FieldPanel('name_sv'), 56 FieldPanel('abbreviation'), 57 ], heading=_('General'), ), 58 ObjectList([ 59 FieldPanel('studies', widget=CheckboxSelectMultiple), 60 ], heading=_('Studies'), ), 61 ]) 62 return edit_handler.bind_to_model(self.model) 63 64 65 class SectionEditView(SectionEditHandler, EditView): 66 pass 67 68 69 class SectionCreateView(SectionEditHandler, CreateView): 70 pass 71 72 73 class SectionAdmin(ModelAdmin): 74 model = Section 75 menu_label = _('Sections') 76 menu_icon = 'fa-eye' 77 menu_order = 520 78 add_to_settings_menu = False 79 create_view_class = SectionCreateView 80 edit_view_class = SectionEditView 81 list_display = ('abbreviation', 'name_en', 'name_sv') 82 search_fields = ('name_en', 'name_sv', 'abbreviation') 83 84 85 class EducationAdminGroup(ModelAdminGroup): 86 menu_label = _('Education') 87 menu_icon = 'fa-university' 88 menu_order = 450 89 items = (StudyProgramAdmin, SectionAdmin) 90 91 92 modeladmin_register(EducationAdminGroup) 93 [end of src/members/wagtail_hooks.py] [start of src/members/models.py] 1 import requests 2 from django.conf import settings 3 from django.contrib.auth.models import AbstractUser 4 from django.core import validators 5 from django.db import models 6 from django.db.models import ManyToManyField 7 from django.utils import timezone 8 from django.utils.translation import ugettext_lazy as _ 9 from requests.auth import HTTPDigestAuth 10 from simple_email_confirmation.models import SimpleEmailConfirmationUserMixin 11 12 from utils.translation import TranslatedField 13 14 15 class StudyProgram(models.Model): 16 """This class describes a university study program""" 17 18 class Meta: 19 verbose_name = _('study program') 20 verbose_name_plural = _('study programs') 21 22 DEGREE_CHOICES = ( 23 ('bsc', _('Bachelor of Science')), 24 ('msc', _('Master of Science')), 25 ('be', _('Bachelor of Engineering')), 26 ('msceng', _('Master of Science in Engineering')), 27 ) 28 29 name_en = models.CharField( 30 max_length=255, 31 verbose_name=_('English program name'), 32 help_text=_('Enter the name of the study program'), 33 blank=False, 34 ) 35 36 name_sv = models.CharField( 37 max_length=255, 38 verbose_name=_('Swedish program name'), 39 help_text=_('Enter the name of the study program'), 40 blank=False, 41 ) 42 43 name = TranslatedField('name_en', 'name_sv') 44 45 degree = models.CharField( 46 max_length=20, 47 choices=DEGREE_CHOICES, 48 verbose_name=_('Degree type'), 49 blank=True, 50 ) 51 52 def __str__(self) -> str: 53 if self.degree: 54 return _('%(degree_type)s in %(study_program)s') % { 55 'degree_type': self.get_degree_display(), 56 'study_program': self.name, 57 } 58 else: 59 return self.name.__str__() 60 61 62 class Section(models.Model): 63 """This class represent a study section""" 64 65 class Meta: 66 verbose_name = _('section') 67 verbose_name_plural = _('sections') 68 69 name_en = models.CharField( 70 max_length=255, 71 verbose_name=_('English section name'), 72 help_text=_('Enter the name of the section'), 73 blank=False, 74 ) 75 76 name_sv = models.CharField( 77 max_length=255, 78 verbose_name=_('Swedish section name'), 79 help_text=_('Enter the name of the section'), 80 blank=False, 81 ) 82 83 name = TranslatedField('name_en', 'name_sv') 84 85 abbreviation = models.CharField( 86 max_length=130, 87 verbose_name=_('Section abbreviation'), 88 help_text=_('Enter the abbreviation for the section'), 89 blank=True, 90 ) 91 92 studies = ManyToManyField( 93 StudyProgram, 94 related_name='sections', 95 blank=True, 96 ) 97 98 def __str__(self) -> str: 99 if self.abbreviation: 100 return '%s - %s' % (self.abbreviation, self.name) 101 else: 102 return self.name.__str__() 103 104 105 class Member(SimpleEmailConfirmationUserMixin, AbstractUser): 106 """This class describes a member""" 107 108 # ---- Personal information ------ 109 110 birthday = models.DateField( 111 verbose_name=_('Birthday'), 112 null=True 113 ) 114 115 person_number_ext = models.CharField( 116 max_length=4, 117 verbose_name=_('Person number extension'), 118 help_text=_('Enter the last four digits of your Swedish person ' 119 'number, given by the Swedish tax authority'), 120 validators=[validators.RegexValidator( 121 regex=r'^\d{4}$', 122 message=_('The person number extension consists of four numbers'), 123 )], 124 unique_for_date="birthday", 125 blank=True, 126 ) 127 128 # ---- Membership information ------ 129 130 MEMBERSHIP_CHOICES = ( 131 ('unknown', _('Unknown')), 132 ('nonmember', _('Nonmember')), 133 ('member', _('Member')), 134 ('alumnus', _('Alumnus')), 135 ) 136 137 status = models.CharField( 138 max_length=20, 139 choices=MEMBERSHIP_CHOICES, 140 verbose_name=_('Membership status'), 141 blank=False, 142 default='unknown' 143 ) 144 status_changed = models.DateTimeField( 145 default=timezone.now, 146 null=False, 147 ) 148 149 # ---- Contact information ------ 150 151 phone_number = models.CharField( 152 max_length=20, 153 verbose_name=_('Phone number'), 154 help_text=_('Enter a phone number so UTN may reach you'), 155 validators=[validators.RegexValidator( 156 regex=r'^\+?\d+$', 157 message=_('Please enter a valid phone number'), 158 )], 159 blank=True, 160 ) 161 162 # ---- University information ------ 163 164 registration_year = models.CharField( 165 max_length=4, 166 verbose_name=_('Registration year'), 167 help_text=_('Enter the year you started studying at the TakNat ' 168 'faculty'), 169 validators=[validators.RegexValidator( 170 regex=r'^\d{4}$', 171 message=_('Please enter a valid year') 172 )], 173 blank=True, 174 ) 175 176 study = models.ForeignKey( 177 StudyProgram, 178 verbose_name=_('Study program'), 179 on_delete=models.SET_NULL, 180 null=True, 181 blank=True, 182 ) 183 184 section = models.ForeignKey( 185 Section, 186 verbose_name=_('Member of section'), 187 on_delete=models.SET_NULL, 188 null=True, 189 blank=True, 190 ) 191 192 def __str__(self) -> str: 193 if self.first_name and self.last_name: 194 return '%s %s' % (self.first_name, self.last_name) 195 else: 196 return self.username 197 198 def person_number(self) -> str: 199 if self.birthday is None or self.person_number_ext is None: 200 return '' 201 else: 202 return '%s-%s' % (self.birthday.strftime('%Y%m%d'), 203 self.person_number_ext) 204 205 def update_status(self, data=None): 206 if data is None: 207 if self.person_number() == '': 208 return 209 try: 210 r = requests.get( 211 'https://register.utn.se/api.php', 212 auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER, 213 settings.MEMBERSHIP_API_PASSWORD), 214 params={ 215 'action': 'check', 216 'person_number': self.person_number().replace('-', '') 217 }, 218 ) 219 data = r.json().get('status') 220 except requests.exceptions.ConnectionError: 221 data = 'unknown' 222 except ValueError: 223 return 224 225 if data == 'member': 226 self.status = 'member' 227 elif data == 'nonmember': 228 if self.status in ['unknown', 'nonmember']: 229 self.status = 'nonmember' 230 else: 231 self.status = 'alumnus' 232 233 self.status_changed = timezone.now() 234 235 def remove_old_email(self): 236 for email in self.get_unconfirmed_emails() or []: 237 self.remove_email(email) 238 for email in self.get_confirmed_emails(): 239 if email != self.email: 240 self.remove_email(email) 241 [end of src/members/models.py] [start of src/members/signals.py] 1 import datetime 2 from django.conf import settings 3 from django.core.mail import EmailMultiAlternatives 4 from django.db.models.signals import pre_save 5 from django.dispatch import receiver 6 from django.template import loader 7 from django.utils import timezone 8 from simple_email_confirmation import unconfirmed_email_created 9 10 from members.models import Member 11 12 13 @receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation') 14 def send_confirmation_email(sender, email, user=None, **kwargs): 15 user = user or sender 16 if user is not None: 17 context = { 18 'email': email, 19 'domain': settings.BASE_URL, 20 'site_name': settings.WAGTAIL_SITE_NAME, 21 'token': user.get_confirmation_key(email), 22 'new_user': user.get_confirmed_emails() == [] 23 } 24 25 subject = loader.render_to_string( 26 'members/email_change_subject.txt', context) 27 # Email subject *must not* contain newlines 28 subject = ''.join(subject.splitlines()) 29 body = loader.render_to_string('members/email_change_email.html', 30 context) 31 32 email_message = EmailMultiAlternatives(subject, body, None, [email]) 33 email_message.send() 34 35 36 @receiver(pre_save, sender=Member, dispatch_uid='member_check_membership') 37 def check_membership(sender, instance, **kwargs): 38 if timezone.now() - instance.status_changed > datetime.timedelta(1)\ 39 or instance.status == 'unknown': 40 instance.update_status() 41 [end of src/members/signals.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/members/models.py b/src/members/models.py --- a/src/members/models.py +++ b/src/members/models.py @@ -191,7 +191,7 @@ def __str__(self) -> str: if self.first_name and self.last_name: - return '%s %s' % (self.first_name, self.last_name) + return self.get_full_name() else: return self.username diff --git a/src/members/signals.py b/src/members/signals.py --- a/src/members/signals.py +++ b/src/members/signals.py @@ -12,7 +12,7 @@ @receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation') def send_confirmation_email(sender, email, user=None, **kwargs): - user = user or sender + user = user or sender # TODO: use user.send_email if user is not None: context = { 'email': email, diff --git a/src/members/wagtail_hooks.py b/src/members/wagtail_hooks.py --- a/src/members/wagtail_hooks.py +++ b/src/members/wagtail_hooks.py @@ -1,12 +1,14 @@ from django.forms import CheckboxSelectMultiple +from django.template import loader +from django.utils.translation import ugettext_lazy as _ from wagtail.contrib.modeladmin.options import ModelAdmin, ModelAdminGroup, \ modeladmin_register from wagtail.contrib.modeladmin.views import EditView, CreateView from wagtail.wagtailadmin.edit_handlers import TabbedInterface, ObjectList, \ FieldPanel +from wagtail.wagtailcore import hooks -from members.models import StudyProgram, Section -from django.utils.translation import ugettext_lazy as _ +from members.models import StudyProgram, Section, Member class StudyProgramEditHandler: @@ -90,3 +92,22 @@ modeladmin_register(EducationAdminGroup) + + +class SuperUserPanel(object): + order = 1000 + + def __init__(self, request): + self.request = request + + def render(self): + c = { + 'supers': Member.objects.filter(is_superuser=True), + 'user': self.request.user + } + return loader.get_template('members/admin_panel.html').render(c) + + [email protected]('construct_homepage_panels') +def add_super_user_panel(request, panels): + return panels.append(SuperUserPanel(request))
{"golden_diff": "diff --git a/src/members/models.py b/src/members/models.py\n--- a/src/members/models.py\n+++ b/src/members/models.py\n@@ -191,7 +191,7 @@\n \n def __str__(self) -> str:\n if self.first_name and self.last_name:\n- return '%s %s' % (self.first_name, self.last_name)\n+ return self.get_full_name()\n else:\n return self.username\n \ndiff --git a/src/members/signals.py b/src/members/signals.py\n--- a/src/members/signals.py\n+++ b/src/members/signals.py\n@@ -12,7 +12,7 @@\n \n @receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\n def send_confirmation_email(sender, email, user=None, **kwargs):\n- user = user or sender\n+ user = user or sender # TODO: use user.send_email\n if user is not None:\n context = {\n 'email': email,\ndiff --git a/src/members/wagtail_hooks.py b/src/members/wagtail_hooks.py\n--- a/src/members/wagtail_hooks.py\n+++ b/src/members/wagtail_hooks.py\n@@ -1,12 +1,14 @@\n from django.forms import CheckboxSelectMultiple\n+from django.template import loader\n+from django.utils.translation import ugettext_lazy as _\n from wagtail.contrib.modeladmin.options import ModelAdmin, ModelAdminGroup, \\\n modeladmin_register\n from wagtail.contrib.modeladmin.views import EditView, CreateView\n from wagtail.wagtailadmin.edit_handlers import TabbedInterface, ObjectList, \\\n FieldPanel\n+from wagtail.wagtailcore import hooks\n \n-from members.models import StudyProgram, Section\n-from django.utils.translation import ugettext_lazy as _\n+from members.models import StudyProgram, Section, Member\n \n \n class StudyProgramEditHandler:\n@@ -90,3 +92,22 @@\n \n \n modeladmin_register(EducationAdminGroup)\n+\n+\n+class SuperUserPanel(object):\n+ order = 1000\n+\n+ def __init__(self, request):\n+ self.request = request\n+\n+ def render(self):\n+ c = {\n+ 'supers': Member.objects.filter(is_superuser=True),\n+ 'user': self.request.user\n+ }\n+ return loader.get_template('members/admin_panel.html').render(c)\n+\n+\[email protected]('construct_homepage_panels')\n+def add_super_user_panel(request, panels):\n+ return panels.append(SuperUserPanel(request))\n", "issue": "List of Superusers\n<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->\r\n\r\n### Description\r\n\r\nMaking it possible to sort after lvel under /admin/users/ so admins shows up and you get a proper overview. \r\n\r\nOr: Make a list of users with superuser access to the website\r\n\r\n### Steps to Reproduce\r\n\r\n1. [First Step]\r\n2. [Second Step]\r\n3. [and so on...]\r\n\r\n<!-- Please select the appropriate \"topic category\"/blue and \"issue type\"/yellow label -->\r\n\n", "before_files": [{"content": "from django.forms import CheckboxSelectMultiple\nfrom wagtail.contrib.modeladmin.options import ModelAdmin, ModelAdminGroup, \\\n modeladmin_register\nfrom wagtail.contrib.modeladmin.views import EditView, CreateView\nfrom wagtail.wagtailadmin.edit_handlers import TabbedInterface, ObjectList, \\\n FieldPanel\n\nfrom members.models import StudyProgram, Section\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass StudyProgramEditHandler:\n def get_edit_handler_class(self):\n edit_handler = TabbedInterface([\n ObjectList([\n FieldPanel('name_en'),\n FieldPanel('name_sv'),\n FieldPanel('degree'),\n ], heading=_('General'),\n ),\n # TODO: http://stackoverflow.com/questions/43188124/\n # ObjectList([\n # FieldPanel('sections', widget=CheckboxSelectMultiple),\n # ], heading=_('Sections'),\n # ),\n ])\n return edit_handler.bind_to_model(self.model)\n\n\nclass StudyProgramEditView(StudyProgramEditHandler, EditView):\n pass\n\n\nclass StudyProgramCreateView(StudyProgramEditHandler, CreateView):\n pass\n\n\nclass StudyProgramAdmin(ModelAdmin):\n model = StudyProgram\n menu_label = _('Study Program')\n menu_icon = 'fa-graduation-cap'\n menu_order = 510\n add_to_settings_menu = False\n create_view_class = StudyProgramCreateView\n edit_view_class = StudyProgramEditView\n list_display = ('degree', 'name_en', 'name_sv')\n search_fields = ('name_en', 'name_sv')\n\n\nclass SectionEditHandler:\n def get_edit_handler_class(self):\n edit_handler = TabbedInterface([\n ObjectList([\n FieldPanel('name_en'),\n FieldPanel('name_sv'),\n FieldPanel('abbreviation'),\n ], heading=_('General'), ),\n ObjectList([\n FieldPanel('studies', widget=CheckboxSelectMultiple),\n ], heading=_('Studies'), ),\n ])\n return edit_handler.bind_to_model(self.model)\n\n\nclass SectionEditView(SectionEditHandler, EditView):\n pass\n\n\nclass SectionCreateView(SectionEditHandler, CreateView):\n pass\n\n\nclass SectionAdmin(ModelAdmin):\n model = Section\n menu_label = _('Sections')\n menu_icon = 'fa-eye'\n menu_order = 520\n add_to_settings_menu = False\n create_view_class = SectionCreateView\n edit_view_class = SectionEditView\n list_display = ('abbreviation', 'name_en', 'name_sv')\n search_fields = ('name_en', 'name_sv', 'abbreviation')\n\n\nclass EducationAdminGroup(ModelAdminGroup):\n menu_label = _('Education')\n menu_icon = 'fa-university'\n menu_order = 450\n items = (StudyProgramAdmin, SectionAdmin)\n\n\nmodeladmin_register(EducationAdminGroup)\n", "path": "src/members/wagtail_hooks.py"}, {"content": "import requests\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core import validators\nfrom django.db import models\nfrom django.db.models import ManyToManyField\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\nfrom requests.auth import HTTPDigestAuth\nfrom simple_email_confirmation.models import SimpleEmailConfirmationUserMixin\n\nfrom utils.translation import TranslatedField\n\n\nclass StudyProgram(models.Model):\n \"\"\"This class describes a university study program\"\"\"\n\n class Meta:\n verbose_name = _('study program')\n verbose_name_plural = _('study programs')\n\n DEGREE_CHOICES = (\n ('bsc', _('Bachelor of Science')),\n ('msc', _('Master of Science')),\n ('be', _('Bachelor of Engineering')),\n ('msceng', _('Master of Science in Engineering')),\n )\n\n name_en = models.CharField(\n max_length=255,\n verbose_name=_('English program name'),\n help_text=_('Enter the name of the study program'),\n blank=False,\n )\n\n name_sv = models.CharField(\n max_length=255,\n verbose_name=_('Swedish program name'),\n help_text=_('Enter the name of the study program'),\n blank=False,\n )\n\n name = TranslatedField('name_en', 'name_sv')\n\n degree = models.CharField(\n max_length=20,\n choices=DEGREE_CHOICES,\n verbose_name=_('Degree type'),\n blank=True,\n )\n\n def __str__(self) -> str:\n if self.degree:\n return _('%(degree_type)s in %(study_program)s') % {\n 'degree_type': self.get_degree_display(),\n 'study_program': self.name,\n }\n else:\n return self.name.__str__()\n\n\nclass Section(models.Model):\n \"\"\"This class represent a study section\"\"\"\n\n class Meta:\n verbose_name = _('section')\n verbose_name_plural = _('sections')\n\n name_en = models.CharField(\n max_length=255,\n verbose_name=_('English section name'),\n help_text=_('Enter the name of the section'),\n blank=False,\n )\n\n name_sv = models.CharField(\n max_length=255,\n verbose_name=_('Swedish section name'),\n help_text=_('Enter the name of the section'),\n blank=False,\n )\n\n name = TranslatedField('name_en', 'name_sv')\n\n abbreviation = models.CharField(\n max_length=130,\n verbose_name=_('Section abbreviation'),\n help_text=_('Enter the abbreviation for the section'),\n blank=True,\n )\n\n studies = ManyToManyField(\n StudyProgram,\n related_name='sections',\n blank=True,\n )\n\n def __str__(self) -> str:\n if self.abbreviation:\n return '%s - %s' % (self.abbreviation, self.name)\n else:\n return self.name.__str__()\n\n\nclass Member(SimpleEmailConfirmationUserMixin, AbstractUser):\n \"\"\"This class describes a member\"\"\"\n\n # ---- Personal information ------\n\n birthday = models.DateField(\n verbose_name=_('Birthday'),\n null=True\n )\n\n person_number_ext = models.CharField(\n max_length=4,\n verbose_name=_('Person number extension'),\n help_text=_('Enter the last four digits of your Swedish person '\n 'number, given by the Swedish tax authority'),\n validators=[validators.RegexValidator(\n regex=r'^\\d{4}$',\n message=_('The person number extension consists of four numbers'),\n )],\n unique_for_date=\"birthday\",\n blank=True,\n )\n\n # ---- Membership information ------\n\n MEMBERSHIP_CHOICES = (\n ('unknown', _('Unknown')),\n ('nonmember', _('Nonmember')),\n ('member', _('Member')),\n ('alumnus', _('Alumnus')),\n )\n\n status = models.CharField(\n max_length=20,\n choices=MEMBERSHIP_CHOICES,\n verbose_name=_('Membership status'),\n blank=False,\n default='unknown'\n )\n status_changed = models.DateTimeField(\n default=timezone.now,\n null=False,\n )\n\n # ---- Contact information ------\n\n phone_number = models.CharField(\n max_length=20,\n verbose_name=_('Phone number'),\n help_text=_('Enter a phone number so UTN may reach you'),\n validators=[validators.RegexValidator(\n regex=r'^\\+?\\d+$',\n message=_('Please enter a valid phone number'),\n )],\n blank=True,\n )\n\n # ---- University information ------\n\n registration_year = models.CharField(\n max_length=4,\n verbose_name=_('Registration year'),\n help_text=_('Enter the year you started studying at the TakNat '\n 'faculty'),\n validators=[validators.RegexValidator(\n regex=r'^\\d{4}$',\n message=_('Please enter a valid year')\n )],\n blank=True,\n )\n\n study = models.ForeignKey(\n StudyProgram,\n verbose_name=_('Study program'),\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n\n section = models.ForeignKey(\n Section,\n verbose_name=_('Member of section'),\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n\n def __str__(self) -> str:\n if self.first_name and self.last_name:\n return '%s %s' % (self.first_name, self.last_name)\n else:\n return self.username\n\n def person_number(self) -> str:\n if self.birthday is None or self.person_number_ext is None:\n return ''\n else:\n return '%s-%s' % (self.birthday.strftime('%Y%m%d'),\n self.person_number_ext)\n\n def update_status(self, data=None):\n if data is None:\n if self.person_number() == '':\n return\n try:\n r = requests.get(\n 'https://register.utn.se/api.php',\n auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,\n settings.MEMBERSHIP_API_PASSWORD),\n params={\n 'action': 'check',\n 'person_number': self.person_number().replace('-', '')\n },\n )\n data = r.json().get('status')\n except requests.exceptions.ConnectionError:\n data = 'unknown'\n except ValueError:\n return\n\n if data == 'member':\n self.status = 'member'\n elif data == 'nonmember':\n if self.status in ['unknown', 'nonmember']:\n self.status = 'nonmember'\n else:\n self.status = 'alumnus'\n\n self.status_changed = timezone.now()\n\n def remove_old_email(self):\n for email in self.get_unconfirmed_emails() or []:\n self.remove_email(email)\n for email in self.get_confirmed_emails():\n if email != self.email:\n self.remove_email(email)\n", "path": "src/members/models.py"}, {"content": "import datetime\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.template import loader\nfrom django.utils import timezone\nfrom simple_email_confirmation import unconfirmed_email_created\n\nfrom members.models import Member\n\n\n@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\ndef send_confirmation_email(sender, email, user=None, **kwargs):\n user = user or sender\n if user is not None:\n context = {\n 'email': email,\n 'domain': settings.BASE_URL,\n 'site_name': settings.WAGTAIL_SITE_NAME,\n 'token': user.get_confirmation_key(email),\n 'new_user': user.get_confirmed_emails() == []\n }\n\n subject = loader.render_to_string(\n 'members/email_change_subject.txt', context)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n body = loader.render_to_string('members/email_change_email.html',\n context)\n\n email_message = EmailMultiAlternatives(subject, body, None, [email])\n email_message.send()\n\n\n@receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')\ndef check_membership(sender, instance, **kwargs):\n if timezone.now() - instance.status_changed > datetime.timedelta(1)\\\n or instance.status == 'unknown':\n instance.update_status()\n", "path": "src/members/signals.py"}]}
3,914
552
gh_patches_debug_8940
rasdani/github-patches
git_diff
cowrie__cowrie-802
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> adding root:x:!password to userdb.txt doesn't exclude root/password as valid credentials Fresh install. I tried to exclude 'password' or 'abc123' from valid passwords for the user root Now file looks like ``` root:x:!root root:x:!123456 root:x:!password root:x:* ``` Retarted cowrie, but no way to deny login with root/password credentials Maybe, some sort of problem with new regexp checking? </issue> <code> [start of cowrie/core/auth.py] 1 # Copyright (c) 2009-2014 Upi Tamminen <[email protected]> 2 # See the COPYRIGHT file for more information 3 4 """ 5 This module contains ... 6 """ 7 8 from __future__ import division, absolute_import 9 10 import re 11 import json 12 from os import path 13 from random import randint 14 15 from twisted.python import log 16 17 from cowrie.core.config import CONFIG 18 19 class UserDB(object): 20 """ 21 By Walter de Jong <[email protected]> 22 """ 23 24 def __init__(self): 25 self.userdb = {} 26 self.userdb_file = '{}/userdb.txt'.format(CONFIG.get('honeypot', 'data_path')) 27 self.load() 28 29 30 def load(self): 31 """ 32 load the user db 33 """ 34 35 with open(self.userdb_file, 'rb') as f: 36 while True: 37 rawline = f.readline() 38 if not rawline: 39 break 40 41 line = rawline.strip() 42 if not line: 43 continue 44 45 if line.startswith(b'#'): 46 continue 47 48 login, passwd = re.split(br':\w+:', line, 1) 49 self.adduser(login, passwd) 50 51 52 def checklogin(self, thelogin, thepasswd, src_ip='0.0.0.0'): 53 for credentials, policy in self.userdb.items(): 54 login, passwd = credentials 55 56 if self.match_rule(login, thelogin): 57 if self.match_rule(passwd, thepasswd): 58 return policy 59 60 return False 61 62 63 def match_rule(self, rule, input): 64 if type(rule) is bytes: 65 return rule in [b'*', input] 66 else: 67 return bool(rule.search(input)) 68 69 70 def re_or_str(self, rule): 71 """ 72 Convert a /.../ type rule to a regex, otherwise return the string as-is 73 """ 74 res = re.match(br'/(.+)/(i)?$', rule) 75 if res: 76 return re.compile(res.group(1), re.IGNORECASE if res.group(2) else 0) 77 78 return rule 79 80 81 def adduser(self, login, passwd): 82 login = self.re_or_str(login) 83 84 if passwd.startswith(b'!'): 85 policy = False 86 passwd = passwd[1:] 87 else: 88 policy = True 89 90 passwd = self.re_or_str(passwd) 91 self.userdb[(login, passwd)] = policy 92 93 94 class AuthRandom(object): 95 """ 96 Alternative class that defines the checklogin() method. 97 Users will be authenticated after a random number of attempts. 98 """ 99 100 def __init__(self): 101 # Default values 102 self.mintry, self.maxtry, self.maxcache = 2, 5, 10 103 104 # Are there auth_class parameters? 105 if CONFIG.has_option('honeypot', 'auth_class_parameters'): 106 parameters = CONFIG.get('honeypot', 'auth_class_parameters') 107 parlist = parameters.split(',') 108 if len(parlist) == 3: 109 self.mintry = int(parlist[0]) 110 self.maxtry = int(parlist[1]) 111 self.maxcache = int(parlist[2]) 112 113 if self.maxtry < self.mintry: 114 self.maxtry = self.mintry + 1 115 log.msg("maxtry < mintry, adjusting maxtry to: {}".format(self.maxtry)) 116 self.uservar = {} 117 self.uservar_file = '{}/auth_random.json'.format(CONFIG.get('honeypot', 'state_path')) 118 self.loadvars() 119 120 121 def loadvars(self): 122 """ 123 Load user vars from json file 124 """ 125 if path.isfile(self.uservar_file): 126 with open(self.uservar_file, 'rb') as fp: 127 try: 128 self.uservar = json.load(fp) 129 except: 130 self.uservar = {} 131 132 133 def savevars(self): 134 """ 135 Save the user vars to json file 136 """ 137 data = self.uservar 138 # Note: this is subject to races between cowrie logins 139 with open(self.uservar_file, 'wb') as fp: 140 json.dump(data, fp) 141 142 143 def checklogin(self, thelogin, thepasswd, src_ip): 144 """ 145 Every new source IP will have to try a random number of times between 146 'mintry' and 'maxtry' before succeeding to login. 147 All username/password combinations must be different. 148 The successful login combination is stored with the IP address. 149 Successful username/passwords pairs are also cached for 'maxcache' times. 150 This is to allow access for returns from different IP addresses. 151 Variables are saved in 'uservar.json' in the data directory. 152 """ 153 154 auth = False 155 userpass = thelogin + ':' + thepasswd 156 157 if not 'cache' in self.uservar: 158 self.uservar['cache'] = [] 159 cache = self.uservar['cache'] 160 161 # Check if it is the first visit from src_ip 162 if src_ip not in self.uservar: 163 self.uservar[src_ip] = {} 164 ipinfo = self.uservar[src_ip] 165 ipinfo['try'] = 0 166 if userpass in cache: 167 log.msg("first time for {}, found cached: {}".format(src_ip, userpass)) 168 ipinfo['max'] = 1 169 ipinfo['user'] = thelogin 170 ipinfo['pw'] = thepasswd 171 auth = True 172 self.savevars() 173 return auth 174 else: 175 ipinfo['max'] = randint(self.mintry, self.maxtry) 176 log.msg("first time for {}, need: {}".format(src_ip, ipinfo['max'])) 177 178 ipinfo = self.uservar[src_ip] 179 180 # Fill in missing variables 181 if not 'max' in ipinfo: 182 ipinfo['max'] = randint(self.mintry, self.maxtry) 183 if not 'try' in ipinfo: 184 ipinfo['try'] = 0 185 if not 'tried' in ipinfo: 186 ipinfo['tried'] = [] 187 188 # Don't count repeated username/password combinations 189 if userpass in ipinfo['tried']: 190 log.msg('already tried this combination') 191 self.savevars() 192 return auth 193 194 ipinfo['try'] += 1 195 attempts = ipinfo['try'] 196 need = ipinfo['max'] 197 log.msg("login attempt: {}".format(attempts)) 198 199 # Check if enough login attempts are tried 200 if attempts < need: 201 self.uservar[src_ip]['tried'].append(userpass) 202 elif attempts == need: 203 ipinfo['user'] = thelogin 204 ipinfo['pw'] = thepasswd 205 cache.append(userpass) 206 if len(cache) > self.maxcache: 207 cache.pop(0) 208 auth = True 209 # Returning after successful login 210 elif attempts > need: 211 if not 'user' in ipinfo or not 'pw' in ipinfo: 212 log.msg('return, but username or password not set!!!') 213 ipinfo['tried'].append(userpass) 214 ipinfo['try'] = 1 215 else: 216 log.msg("login return, expect: [{}/{}]".format(ipinfo['user'], ipinfo['pw'])) 217 if thelogin == ipinfo['user'] and thepasswd == ipinfo['pw']: 218 auth = True 219 self.savevars() 220 return auth 221 222 [end of cowrie/core/auth.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cowrie/core/auth.py b/cowrie/core/auth.py --- a/cowrie/core/auth.py +++ b/cowrie/core/auth.py @@ -11,6 +11,7 @@ import json from os import path from random import randint +from collections import OrderedDict from twisted.python import log @@ -22,7 +23,7 @@ """ def __init__(self): - self.userdb = {} + self.userdb = OrderedDict() self.userdb_file = '{}/userdb.txt'.format(CONFIG.get('honeypot', 'data_path')) self.load()
{"golden_diff": "diff --git a/cowrie/core/auth.py b/cowrie/core/auth.py\n--- a/cowrie/core/auth.py\n+++ b/cowrie/core/auth.py\n@@ -11,6 +11,7 @@\n import json\n from os import path\n from random import randint\n+from collections import OrderedDict\n \n from twisted.python import log\n \n@@ -22,7 +23,7 @@\n \"\"\"\n \n def __init__(self):\n- self.userdb = {}\n+ self.userdb = OrderedDict()\n self.userdb_file = '{}/userdb.txt'.format(CONFIG.get('honeypot', 'data_path'))\n self.load()\n", "issue": "adding root:x:!password to userdb.txt doesn't exclude root/password as valid credentials\nFresh install.\r\n\r\nI tried to exclude 'password' or 'abc123' from valid passwords for the user root\r\n\r\nNow file looks like \r\n```\r\nroot:x:!root\r\nroot:x:!123456\r\nroot:x:!password\r\nroot:x:*\r\n```\r\n\r\nRetarted cowrie, but no way to deny login with root/password credentials\r\n\r\nMaybe, some sort of problem with new regexp checking?\n", "before_files": [{"content": "# Copyright (c) 2009-2014 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\n\"\"\"\nThis module contains ...\n\"\"\"\n\nfrom __future__ import division, absolute_import\n\nimport re\nimport json\nfrom os import path\nfrom random import randint\n\nfrom twisted.python import log\n\nfrom cowrie.core.config import CONFIG\n\nclass UserDB(object):\n \"\"\"\n By Walter de Jong <[email protected]>\n \"\"\"\n\n def __init__(self):\n self.userdb = {}\n self.userdb_file = '{}/userdb.txt'.format(CONFIG.get('honeypot', 'data_path'))\n self.load()\n\n\n def load(self):\n \"\"\"\n load the user db\n \"\"\"\n\n with open(self.userdb_file, 'rb') as f:\n while True:\n rawline = f.readline()\n if not rawline:\n break\n\n line = rawline.strip()\n if not line:\n continue\n\n if line.startswith(b'#'):\n continue\n\n login, passwd = re.split(br':\\w+:', line, 1)\n self.adduser(login, passwd)\n\n\n def checklogin(self, thelogin, thepasswd, src_ip='0.0.0.0'):\n for credentials, policy in self.userdb.items():\n login, passwd = credentials\n\n if self.match_rule(login, thelogin):\n if self.match_rule(passwd, thepasswd):\n return policy\n\n return False\n\n\n def match_rule(self, rule, input):\n if type(rule) is bytes:\n return rule in [b'*', input]\n else:\n return bool(rule.search(input))\n\n\n def re_or_str(self, rule):\n \"\"\"\n Convert a /.../ type rule to a regex, otherwise return the string as-is\n \"\"\"\n res = re.match(br'/(.+)/(i)?$', rule)\n if res:\n return re.compile(res.group(1), re.IGNORECASE if res.group(2) else 0)\n\n return rule\n\n\n def adduser(self, login, passwd):\n login = self.re_or_str(login)\n\n if passwd.startswith(b'!'):\n policy = False\n passwd = passwd[1:]\n else:\n policy = True\n\n passwd = self.re_or_str(passwd)\n self.userdb[(login, passwd)] = policy\n\n\nclass AuthRandom(object):\n \"\"\"\n Alternative class that defines the checklogin() method.\n Users will be authenticated after a random number of attempts.\n \"\"\"\n\n def __init__(self):\n # Default values\n self.mintry, self.maxtry, self.maxcache = 2, 5, 10\n\n # Are there auth_class parameters?\n if CONFIG.has_option('honeypot', 'auth_class_parameters'):\n parameters = CONFIG.get('honeypot', 'auth_class_parameters')\n parlist = parameters.split(',')\n if len(parlist) == 3:\n self.mintry = int(parlist[0])\n self.maxtry = int(parlist[1])\n self.maxcache = int(parlist[2])\n\n if self.maxtry < self.mintry:\n self.maxtry = self.mintry + 1\n log.msg(\"maxtry < mintry, adjusting maxtry to: {}\".format(self.maxtry))\n self.uservar = {}\n self.uservar_file = '{}/auth_random.json'.format(CONFIG.get('honeypot', 'state_path'))\n self.loadvars()\n\n\n def loadvars(self):\n \"\"\"\n Load user vars from json file\n \"\"\"\n if path.isfile(self.uservar_file):\n with open(self.uservar_file, 'rb') as fp:\n try:\n self.uservar = json.load(fp)\n except:\n self.uservar = {}\n\n\n def savevars(self):\n \"\"\"\n Save the user vars to json file\n \"\"\"\n data = self.uservar\n # Note: this is subject to races between cowrie logins\n with open(self.uservar_file, 'wb') as fp:\n json.dump(data, fp)\n\n\n def checklogin(self, thelogin, thepasswd, src_ip):\n \"\"\"\n Every new source IP will have to try a random number of times between\n 'mintry' and 'maxtry' before succeeding to login.\n All username/password combinations must be different.\n The successful login combination is stored with the IP address.\n Successful username/passwords pairs are also cached for 'maxcache' times.\n This is to allow access for returns from different IP addresses.\n Variables are saved in 'uservar.json' in the data directory.\n \"\"\"\n\n auth = False\n userpass = thelogin + ':' + thepasswd\n\n if not 'cache' in self.uservar:\n self.uservar['cache'] = []\n cache = self.uservar['cache']\n\n # Check if it is the first visit from src_ip\n if src_ip not in self.uservar:\n self.uservar[src_ip] = {}\n ipinfo = self.uservar[src_ip]\n ipinfo['try'] = 0\n if userpass in cache:\n log.msg(\"first time for {}, found cached: {}\".format(src_ip, userpass))\n ipinfo['max'] = 1\n ipinfo['user'] = thelogin\n ipinfo['pw'] = thepasswd\n auth = True\n self.savevars()\n return auth\n else:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n log.msg(\"first time for {}, need: {}\".format(src_ip, ipinfo['max']))\n\n ipinfo = self.uservar[src_ip]\n\n # Fill in missing variables\n if not 'max' in ipinfo:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n if not 'try' in ipinfo:\n ipinfo['try'] = 0\n if not 'tried' in ipinfo:\n ipinfo['tried'] = []\n\n # Don't count repeated username/password combinations\n if userpass in ipinfo['tried']:\n log.msg('already tried this combination')\n self.savevars()\n return auth\n\n ipinfo['try'] += 1\n attempts = ipinfo['try']\n need = ipinfo['max']\n log.msg(\"login attempt: {}\".format(attempts))\n\n # Check if enough login attempts are tried\n if attempts < need:\n self.uservar[src_ip]['tried'].append(userpass)\n elif attempts == need:\n ipinfo['user'] = thelogin\n ipinfo['pw'] = thepasswd\n cache.append(userpass)\n if len(cache) > self.maxcache:\n cache.pop(0)\n auth = True\n # Returning after successful login\n elif attempts > need:\n if not 'user' in ipinfo or not 'pw' in ipinfo:\n log.msg('return, but username or password not set!!!')\n ipinfo['tried'].append(userpass)\n ipinfo['try'] = 1\n else:\n log.msg(\"login return, expect: [{}/{}]\".format(ipinfo['user'], ipinfo['pw']))\n if thelogin == ipinfo['user'] and thepasswd == ipinfo['pw']:\n auth = True\n self.savevars()\n return auth\n\n", "path": "cowrie/core/auth.py"}]}
2,806
138
gh_patches_debug_1572
rasdani/github-patches
git_diff
hylang__hy-2070
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The manual is missing module names It looks like the big doc reorganization ended up omitting the actual module names of the various contrib and extra modules; for example, the section named "Walk" documents the functions and macros in `hy.contrib.walk`, but doesn't mention the name `hy.contrib.walk` or otherwise indicate how to bring the enumerated names into scope. Pointed out in https://github.com/hylang/hy/issues/2065#issuecomment-842377526. </issue> <code> [start of docs/conf.py] 1 # -*- coding: utf-8 -*- 2 # 3 # This file is execfile()d with the current directory set to its containing dir. 4 5 import re, os, sys, time, html 6 7 sys.path.insert(0, os.path.abspath('..')) 8 9 extensions = [ 10 'sphinx.ext.napoleon', 11 'sphinx.ext.intersphinx', 12 'sphinx.ext.autodoc', 13 'sphinxcontrib.hydomain', 14 ] 15 16 from get_version import __version__ as hy_version 17 18 # Read the Docs might dirty its checkout, so strip the dirty flag. 19 hy_version = re.sub(r'[+.]dirty\Z', '', hy_version) 20 21 templates_path = ['_templates'] 22 source_suffix = '.rst' 23 24 master_doc = 'index' 25 26 # General information about the project. 27 project = 'hy' 28 copyright = '%s the authors' % time.strftime('%Y') 29 30 # The version info for the project you're documenting, acts as replacement for 31 # |version| and |release|, also used in various other places throughout the 32 # built documents. 33 # 34 # The short X.Y version. 35 version = ".".join(hy_version.split(".")[:-1]) 36 # The full version, including alpha/beta/rc tags. 37 release = hy_version 38 hy_descriptive_version = html.escape(hy_version) 39 if "+" in hy_version: 40 hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>" 41 42 exclude_patterns = ['_build', 'coreteam.rst'] 43 add_module_names = False 44 45 pygments_style = 'sphinx' 46 47 import sphinx_rtd_theme 48 html_theme = 'sphinx_rtd_theme' 49 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 50 51 # Add any paths that contain custom static files (such as style sheets) here, 52 # relative to this directory. They are copied after the builtin static files, 53 # so a file named "default.css" will overwrite the builtin "default.css". 54 html_static_path = ['_static'] 55 56 html_use_smartypants = False 57 html_show_sphinx = False 58 59 html_context = dict( 60 hy_descriptive_version = hy_descriptive_version) 61 62 highlight_language = 'clojure' 63 64 intersphinx_mapping = dict( 65 py = ('https://docs.python.org/3/', None)) 66 # ** Generate Cheatsheet 67 import json 68 from pathlib import Path 69 from itertools import zip_longest 70 71 def refize(spec): 72 role = ':hy:func:' 73 if isinstance(spec, dict): 74 _name = spec['name'] 75 uri = spec['uri'] 76 if spec.get('internal'): 77 role = ':ref:' 78 else: 79 uri = spec 80 _name = str.split(uri, '.')[-1] 81 return '{}`{} <{}>`'.format(role, _name, uri) 82 83 84 def format_refs(refs, indent): 85 args = [iter(map(refize, refs))] 86 ref_groups = zip_longest(*args, fillvalue="") 87 return str.join( 88 ' \\\n' + ' ' * (indent + 3), 89 [str.join(' ', ref_group) for ref_group in ref_groups], 90 ) 91 92 93 def format_row(category, divider_loc): 94 return '{title: <{width}} | {methods}'.format( 95 width=divider_loc, 96 title=category['name'], 97 methods=format_refs(category['methods'], divider_loc) 98 ) 99 100 101 def format_table(table_spec): 102 table_name = table_spec['name'] 103 categories = table_spec['categories'] 104 longest_cat_name = max([len(category['name']) for category in categories]) 105 table = [ 106 table_name, 107 '-' * len(table_name), 108 '', 109 '=' * longest_cat_name + ' ' + '=' * 25, 110 *(format_row(category, longest_cat_name) for category in categories), 111 '=' * longest_cat_name + ' ' + '=' * 25, 112 '' 113 ] 114 return '\n'.join(table) 115 116 117 # Modifications to the cheatsheet should be added in `cheatsheet.json` 118 cheatsheet_spec = json.loads(Path('./docs/cheatsheet.json').read_text()) 119 cheatsheet = [ 120 '..', 121 ' DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``', 122 ' If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``', 123 '', 124 '.. _cheatsheet:', 125 '', 126 'Cheatsheet', 127 '==========', 128 '', 129 *map(format_table, cheatsheet_spec), 130 ] 131 Path('./docs/cheatsheet.rst').write_text('\n'.join(cheatsheet)) 132 133 134 # ** Sphinx App Setup 135 136 137 def setup(app): 138 app.add_css_file('overrides.css') 139 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -40,7 +40,7 @@ hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>" exclude_patterns = ['_build', 'coreteam.rst'] -add_module_names = False +add_module_names = True pygments_style = 'sphinx'
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -40,7 +40,7 @@\n hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n \n exclude_patterns = ['_build', 'coreteam.rst']\n-add_module_names = False\n+add_module_names = True\n \n pygments_style = 'sphinx'\n", "issue": "The manual is missing module names\nIt looks like the big doc reorganization ended up omitting the actual module names of the various contrib and extra modules; for example, the section named \"Walk\" documents the functions and macros in `hy.contrib.walk`, but doesn't mention the name `hy.contrib.walk` or otherwise indicate how to bring the enumerated names into scope.\r\n\r\nPointed out in https://github.com/hylang/hy/issues/2065#issuecomment-842377526.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# This file is execfile()d with the current directory set to its containing dir.\n\nimport re, os, sys, time, html\n\nsys.path.insert(0, os.path.abspath('..'))\n\nextensions = [\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.autodoc',\n 'sphinxcontrib.hydomain',\n]\n\nfrom get_version import __version__ as hy_version\n\n# Read the Docs might dirty its checkout, so strip the dirty flag.\nhy_version = re.sub(r'[+.]dirty\\Z', '', hy_version)\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\n\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'hy'\ncopyright = '%s the authors' % time.strftime('%Y')\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \".\".join(hy_version.split(\".\")[:-1])\n# The full version, including alpha/beta/rc tags.\nrelease = hy_version\nhy_descriptive_version = html.escape(hy_version)\nif \"+\" in hy_version:\n hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n\nexclude_patterns = ['_build', 'coreteam.rst']\nadd_module_names = False\n\npygments_style = 'sphinx'\n\nimport sphinx_rtd_theme\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_use_smartypants = False\nhtml_show_sphinx = False\n\nhtml_context = dict(\n hy_descriptive_version = hy_descriptive_version)\n\nhighlight_language = 'clojure'\n\nintersphinx_mapping = dict(\n py = ('https://docs.python.org/3/', None))\n# ** Generate Cheatsheet\nimport json\nfrom pathlib import Path\nfrom itertools import zip_longest\n\ndef refize(spec):\n role = ':hy:func:'\n if isinstance(spec, dict):\n _name = spec['name']\n uri = spec['uri']\n if spec.get('internal'):\n role = ':ref:'\n else:\n uri = spec\n _name = str.split(uri, '.')[-1]\n return '{}`{} <{}>`'.format(role, _name, uri)\n\n\ndef format_refs(refs, indent):\n args = [iter(map(refize, refs))]\n ref_groups = zip_longest(*args, fillvalue=\"\")\n return str.join(\n ' \\\\\\n' + ' ' * (indent + 3),\n [str.join(' ', ref_group) for ref_group in ref_groups],\n )\n\n\ndef format_row(category, divider_loc):\n return '{title: <{width}} | {methods}'.format(\n width=divider_loc,\n title=category['name'],\n methods=format_refs(category['methods'], divider_loc)\n )\n\n\ndef format_table(table_spec):\n table_name = table_spec['name']\n categories = table_spec['categories']\n longest_cat_name = max([len(category['name']) for category in categories])\n table = [\n table_name,\n '-' * len(table_name),\n '',\n '=' * longest_cat_name + ' ' + '=' * 25,\n *(format_row(category, longest_cat_name) for category in categories),\n '=' * longest_cat_name + ' ' + '=' * 25,\n ''\n ]\n return '\\n'.join(table)\n\n\n# Modifications to the cheatsheet should be added in `cheatsheet.json`\ncheatsheet_spec = json.loads(Path('./docs/cheatsheet.json').read_text())\ncheatsheet = [\n '..',\n ' DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``',\n ' If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``',\n '',\n '.. _cheatsheet:',\n '',\n 'Cheatsheet',\n '==========',\n '',\n *map(format_table, cheatsheet_spec),\n]\nPath('./docs/cheatsheet.rst').write_text('\\n'.join(cheatsheet))\n\n\n# ** Sphinx App Setup\n\n\ndef setup(app):\n app.add_css_file('overrides.css')\n", "path": "docs/conf.py"}]}
1,943
92
gh_patches_debug_23171
rasdani/github-patches
git_diff
vas3k__vas3k.club-1117
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Убрать атрибуты Autoplay и Loop для видео, заливаемых напрямую в клуб ### Discussed in https://github.com/vas3k/vas3k.club/discussions/1113 <div type='discussions-op-text'> <sup>Originally posted by **resaixe** November 4, 2020</sup> Сейчас видео, которые заливаются напрямую в клуб, имеют атрибуты `autoplay="autoplay" loop="loop"`. Например, в [этом комментарии](https://vas3k.club/post/5942/#comment-6e78d5e4-7e7d-496e-a75a-e6516f0255f8) сейчас так: ```html <video src="https://i.vas3k.club/d6600209ae05582854a384bb1ce5cfe23c64697b8f111b21ceabdfd72fce72bc.mp4" controls="controls" autoplay="autoplay" loop="loop" muted="muted" playsinline=""></video> ``` Предлагаю убрать эти атрибуты, чтобы пользователи сами решали, когда и сколько раз они хотят посмотреть видео. Чтобы стало так: ```html <video src="https://i.vas3k.club/d6600209ae05582854a384bb1ce5cfe23c64697b8f111b21ceabdfd72fce72bc.mp4" controls="controls" muted="muted" playsinline=""></video> ```</div> </issue> <code> [start of common/markdown/email_renderer.py] 1 from mistune import escape_html 2 3 from common.markdown.club_renderer import ClubRenderer 4 from common.regexp import YOUTUBE_RE 5 6 7 class EmailRenderer(ClubRenderer): 8 def simple_image(self, src, alt="", title=None): 9 return f"""<img src="{src}" alt="{alt}" width="600" border="0"><br>{title or ""}""" 10 11 def youtube(self, src, alt="", title=None): 12 youtube_match = YOUTUBE_RE.match(src) 13 youtube_id = escape_html(youtube_match.group(1) or "") 14 return f'<a href="{escape_html(src)}"><span class="ratio-16-9 video-preview" ' \ 15 f'style="background-image: url(\'https://img.youtube.com/vi/{escape_html(youtube_id)}/0.jpg\');">' \ 16 f'</span></a><br>{escape_html(title or "")}' 17 18 def video(self, src, alt="", title=None): 19 return f'<video src="{escape_html(src)}" controls autoplay loop muted playsinline>{alt}</video><br>{title or ""}' 20 21 def tweet(self, src, alt="", title=None): 22 return f'<a href="{escape_html(src)}">{escape_html(src)}</a><br>{escape_html(title or "")}' 23 24 def heading(self, text, level): 25 tag = f"h{level}" 26 return f"<{tag}>{text}</{tag}>\n" 27 [end of common/markdown/email_renderer.py] [start of common/markdown/club_renderer.py] 1 import html 2 import mistune 3 from urllib.parse import unquote 4 from mistune import escape_html 5 from slugify import slugify 6 7 from common.regexp import IMAGE_RE, VIDEO_RE, YOUTUBE_RE, TWITTER_RE, USERNAME_RE 8 9 IMAGE_CSS_CLASSES = { 10 "-": "text-body-image-full" 11 } 12 13 14 class ClubRenderer(mistune.HTMLRenderer): 15 def text(self, text): 16 text = escape_html(text) 17 text = USERNAME_RE.sub(r' <a href="/user/\1/">@\1</a>', text) 18 return text 19 20 def paragraph(self, text): 21 text = text.replace("\n", "<br>\n") # Mistune 2.0 broke newlines, let's hack it =/ 22 return f"<p>{text}</p>\n" 23 24 def heading(self, text, level): 25 tag = f"h{level}" 26 anchor = slugify(text[:24]) 27 return f"<{tag} id=\"{anchor}\"><a href=\"#{anchor}\">{text}</a></{tag}>\n" 28 29 def link(self, link, text=None, title=None): 30 if not text and not title: 31 # it's a pure link (without link tag) and we can try to parse it 32 embed = self.embed(link, text or "", title or "") 33 if embed: 34 return embed 35 36 if text is None: 37 text = link 38 39 # here's some magic of unescape->unquote->escape 40 # to fix cyrillic (and other non-latin) wikipedia URLs 41 return f'<a href="{self._safe_url(link)}">{html.escape(unquote(html.unescape(text or link)))}</a>' 42 43 def image(self, src, alt="", title=None): 44 embed = self.embed(src, alt, title) 45 if embed: 46 return embed 47 48 # users can try to "hack" our parser by using non-image urls 49 # so, if its not an image or video, display it as a link to avoid auto-loading 50 return f'<a href="{escape_html(src)}">{escape_html(src)}</a>' 51 52 def embed(self, src, alt="", title=None): 53 if IMAGE_RE.match(src): 54 return self.simple_image(src, alt, title) 55 56 if YOUTUBE_RE.match(src): 57 return self.youtube(src, alt, title) 58 59 if VIDEO_RE.match(src): 60 return self.video(src, alt, title) 61 62 if TWITTER_RE.match(src): 63 return self.tweet(src, alt, title) 64 65 return None 66 67 def simple_image(self, src, alt="", title=None): 68 css_classes = "" 69 title = title or alt 70 if title in IMAGE_CSS_CLASSES: 71 css_classes = IMAGE_CSS_CLASSES[title] 72 73 image_tag = f'<img src="{escape_html(src)}" alt="{escape_html(title)}">' 74 caption = f"<figcaption>{escape_html(title)}</figcaption>" if title else "" 75 return f'<figure class="{css_classes}">{image_tag}{caption}</figure>' 76 77 def youtube(self, src, alt="", title=None): 78 youtube_match = YOUTUBE_RE.match(src) 79 playlist = "" 80 if youtube_match.group(2): 81 playlist = f"list={escape_html(youtube_match.group(2))}&listType=playlist&" 82 video_tag = ( 83 f'<span class="ratio-16-9">' 84 f'<iframe loading="lazy" src="https://www.youtube.com/embed/{escape_html(youtube_match.group(1) or "")}' 85 f'?{playlist}autoplay=0&amp;controls=1&amp;showinfo=1&amp;vq=hd1080"' 86 f'allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; fullscreen"' 87 f'allowfullscreen></iframe>' 88 f"</span>" 89 ) 90 caption = f"<figcaption>{escape_html(title)}</figcaption>" if title else "" 91 return f"<figure>{video_tag}{caption}</figure>" 92 93 def video(self, src, alt="", title=None): 94 video_tag = ( 95 f'<video src="{escape_html(src)}" controls autoplay loop muted playsinline>{escape_html(alt)}</video>' 96 ) 97 caption = f"<figcaption>{escape_html(title)}</figcaption>" if title else "" 98 return f"<figure>{video_tag}{caption}</figure>" 99 100 def tweet(self, src, alt="", title=None): 101 tweet_match = TWITTER_RE.match(src) 102 twitter_tag = f'<blockquote class="twitter-tweet" tw-align-center>' \ 103 f'<a href="{tweet_match.group(1)}"></a></blockquote><br>' \ 104 f'<a href="{src}" target="_blank">{src}</a>' 105 return twitter_tag 106 [end of common/markdown/club_renderer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/common/markdown/club_renderer.py b/common/markdown/club_renderer.py --- a/common/markdown/club_renderer.py +++ b/common/markdown/club_renderer.py @@ -92,7 +92,7 @@ def video(self, src, alt="", title=None): video_tag = ( - f'<video src="{escape_html(src)}" controls autoplay loop muted playsinline>{escape_html(alt)}</video>' + f'<video src="{escape_html(src)}" controls muted playsinline>{escape_html(alt)}</video>' ) caption = f"<figcaption>{escape_html(title)}</figcaption>" if title else "" return f"<figure>{video_tag}{caption}</figure>" diff --git a/common/markdown/email_renderer.py b/common/markdown/email_renderer.py --- a/common/markdown/email_renderer.py +++ b/common/markdown/email_renderer.py @@ -16,7 +16,7 @@ f'</span></a><br>{escape_html(title or "")}' def video(self, src, alt="", title=None): - return f'<video src="{escape_html(src)}" controls autoplay loop muted playsinline>{alt}</video><br>{title or ""}' + return f'<video src="{escape_html(src)}" controls muted playsinline>{alt}</video><br>{title or ""}' def tweet(self, src, alt="", title=None): return f'<a href="{escape_html(src)}">{escape_html(src)}</a><br>{escape_html(title or "")}'
{"golden_diff": "diff --git a/common/markdown/club_renderer.py b/common/markdown/club_renderer.py\n--- a/common/markdown/club_renderer.py\n+++ b/common/markdown/club_renderer.py\n@@ -92,7 +92,7 @@\n \n def video(self, src, alt=\"\", title=None):\n video_tag = (\n- f'<video src=\"{escape_html(src)}\" controls autoplay loop muted playsinline>{escape_html(alt)}</video>'\n+ f'<video src=\"{escape_html(src)}\" controls muted playsinline>{escape_html(alt)}</video>'\n )\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f\"<figure>{video_tag}{caption}</figure>\"\ndiff --git a/common/markdown/email_renderer.py b/common/markdown/email_renderer.py\n--- a/common/markdown/email_renderer.py\n+++ b/common/markdown/email_renderer.py\n@@ -16,7 +16,7 @@\n f'</span></a><br>{escape_html(title or \"\")}'\n \n def video(self, src, alt=\"\", title=None):\n- return f'<video src=\"{escape_html(src)}\" controls autoplay loop muted playsinline>{alt}</video><br>{title or \"\"}'\n+ return f'<video src=\"{escape_html(src)}\" controls muted playsinline>{alt}</video><br>{title or \"\"}'\n \n def tweet(self, src, alt=\"\", title=None):\n return f'<a href=\"{escape_html(src)}\">{escape_html(src)}</a><br>{escape_html(title or \"\")}'\n", "issue": "\u0423\u0431\u0440\u0430\u0442\u044c \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u044b Autoplay \u0438 Loop \u0434\u043b\u044f \u0432\u0438\u0434\u0435\u043e, \u0437\u0430\u043b\u0438\u0432\u0430\u0435\u043c\u044b\u0445 \u043d\u0430\u043f\u0440\u044f\u043c\u0443\u044e \u0432 \u043a\u043b\u0443\u0431\n### Discussed in https://github.com/vas3k/vas3k.club/discussions/1113\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **resaixe** November 4, 2020</sup>\r\n\u0421\u0435\u0439\u0447\u0430\u0441 \u0432\u0438\u0434\u0435\u043e, \u043a\u043e\u0442\u043e\u0440\u044b\u0435 \u0437\u0430\u043b\u0438\u0432\u0430\u044e\u0442\u0441\u044f \u043d\u0430\u043f\u0440\u044f\u043c\u0443\u044e \u0432 \u043a\u043b\u0443\u0431, \u0438\u043c\u0435\u044e\u0442 \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u044b `autoplay=\"autoplay\" loop=\"loop\"`. \r\n\u041d\u0430\u043f\u0440\u0438\u043c\u0435\u0440, \u0432 [\u044d\u0442\u043e\u043c \u043a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u0438](https://vas3k.club/post/5942/#comment-6e78d5e4-7e7d-496e-a75a-e6516f0255f8) \u0441\u0435\u0439\u0447\u0430\u0441 \u0442\u0430\u043a:\r\n```html\r\n<video src=\"https://i.vas3k.club/d6600209ae05582854a384bb1ce5cfe23c64697b8f111b21ceabdfd72fce72bc.mp4\" controls=\"controls\" autoplay=\"autoplay\" loop=\"loop\" muted=\"muted\" playsinline=\"\"></video>\r\n```\r\n\r\n\u041f\u0440\u0435\u0434\u043b\u0430\u0433\u0430\u044e \u0443\u0431\u0440\u0430\u0442\u044c \u044d\u0442\u0438 \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u044b, \u0447\u0442\u043e\u0431\u044b \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u0438 \u0441\u0430\u043c\u0438 \u0440\u0435\u0448\u0430\u043b\u0438, \u043a\u043e\u0433\u0434\u0430 \u0438 \u0441\u043a\u043e\u043b\u044c\u043a\u043e \u0440\u0430\u0437 \u043e\u043d\u0438 \u0445\u043e\u0442\u044f\u0442 \u043f\u043e\u0441\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u0432\u0438\u0434\u0435\u043e. \u0427\u0442\u043e\u0431\u044b \u0441\u0442\u0430\u043b\u043e \u0442\u0430\u043a:\r\n```html\r\n<video src=\"https://i.vas3k.club/d6600209ae05582854a384bb1ce5cfe23c64697b8f111b21ceabdfd72fce72bc.mp4\" controls=\"controls\" muted=\"muted\" playsinline=\"\"></video>\r\n```</div>\n", "before_files": [{"content": "from mistune import escape_html\n\nfrom common.markdown.club_renderer import ClubRenderer\nfrom common.regexp import YOUTUBE_RE\n\n\nclass EmailRenderer(ClubRenderer):\n def simple_image(self, src, alt=\"\", title=None):\n return f\"\"\"<img src=\"{src}\" alt=\"{alt}\" width=\"600\" border=\"0\"><br>{title or \"\"}\"\"\"\n\n def youtube(self, src, alt=\"\", title=None):\n youtube_match = YOUTUBE_RE.match(src)\n youtube_id = escape_html(youtube_match.group(1) or \"\")\n return f'<a href=\"{escape_html(src)}\"><span class=\"ratio-16-9 video-preview\" ' \\\n f'style=\"background-image: url(\\'https://img.youtube.com/vi/{escape_html(youtube_id)}/0.jpg\\');\">' \\\n f'</span></a><br>{escape_html(title or \"\")}'\n\n def video(self, src, alt=\"\", title=None):\n return f'<video src=\"{escape_html(src)}\" controls autoplay loop muted playsinline>{alt}</video><br>{title or \"\"}'\n\n def tweet(self, src, alt=\"\", title=None):\n return f'<a href=\"{escape_html(src)}\">{escape_html(src)}</a><br>{escape_html(title or \"\")}'\n\n def heading(self, text, level):\n tag = f\"h{level}\"\n return f\"<{tag}>{text}</{tag}>\\n\"\n", "path": "common/markdown/email_renderer.py"}, {"content": "import html\nimport mistune\nfrom urllib.parse import unquote\nfrom mistune import escape_html\nfrom slugify import slugify\n\nfrom common.regexp import IMAGE_RE, VIDEO_RE, YOUTUBE_RE, TWITTER_RE, USERNAME_RE\n\nIMAGE_CSS_CLASSES = {\n \"-\": \"text-body-image-full\"\n}\n\n\nclass ClubRenderer(mistune.HTMLRenderer):\n def text(self, text):\n text = escape_html(text)\n text = USERNAME_RE.sub(r' <a href=\"/user/\\1/\">@\\1</a>', text)\n return text\n\n def paragraph(self, text):\n text = text.replace(\"\\n\", \"<br>\\n\") # Mistune 2.0 broke newlines, let's hack it =/\n return f\"<p>{text}</p>\\n\"\n\n def heading(self, text, level):\n tag = f\"h{level}\"\n anchor = slugify(text[:24])\n return f\"<{tag} id=\\\"{anchor}\\\"><a href=\\\"#{anchor}\\\">{text}</a></{tag}>\\n\"\n\n def link(self, link, text=None, title=None):\n if not text and not title:\n # it's a pure link (without link tag) and we can try to parse it\n embed = self.embed(link, text or \"\", title or \"\")\n if embed:\n return embed\n\n if text is None:\n text = link\n\n # here's some magic of unescape->unquote->escape\n # to fix cyrillic (and other non-latin) wikipedia URLs\n return f'<a href=\"{self._safe_url(link)}\">{html.escape(unquote(html.unescape(text or link)))}</a>'\n\n def image(self, src, alt=\"\", title=None):\n embed = self.embed(src, alt, title)\n if embed:\n return embed\n\n # users can try to \"hack\" our parser by using non-image urls\n # so, if its not an image or video, display it as a link to avoid auto-loading\n return f'<a href=\"{escape_html(src)}\">{escape_html(src)}</a>'\n\n def embed(self, src, alt=\"\", title=None):\n if IMAGE_RE.match(src):\n return self.simple_image(src, alt, title)\n\n if YOUTUBE_RE.match(src):\n return self.youtube(src, alt, title)\n\n if VIDEO_RE.match(src):\n return self.video(src, alt, title)\n\n if TWITTER_RE.match(src):\n return self.tweet(src, alt, title)\n\n return None\n\n def simple_image(self, src, alt=\"\", title=None):\n css_classes = \"\"\n title = title or alt\n if title in IMAGE_CSS_CLASSES:\n css_classes = IMAGE_CSS_CLASSES[title]\n\n image_tag = f'<img src=\"{escape_html(src)}\" alt=\"{escape_html(title)}\">'\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f'<figure class=\"{css_classes}\">{image_tag}{caption}</figure>'\n\n def youtube(self, src, alt=\"\", title=None):\n youtube_match = YOUTUBE_RE.match(src)\n playlist = \"\"\n if youtube_match.group(2):\n playlist = f\"list={escape_html(youtube_match.group(2))}&listType=playlist&\"\n video_tag = (\n f'<span class=\"ratio-16-9\">'\n f'<iframe loading=\"lazy\" src=\"https://www.youtube.com/embed/{escape_html(youtube_match.group(1) or \"\")}'\n f'?{playlist}autoplay=0&amp;controls=1&amp;showinfo=1&amp;vq=hd1080\"'\n f'allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; fullscreen\"'\n f'allowfullscreen></iframe>'\n f\"</span>\"\n )\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f\"<figure>{video_tag}{caption}</figure>\"\n\n def video(self, src, alt=\"\", title=None):\n video_tag = (\n f'<video src=\"{escape_html(src)}\" controls autoplay loop muted playsinline>{escape_html(alt)}</video>'\n )\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f\"<figure>{video_tag}{caption}</figure>\"\n\n def tweet(self, src, alt=\"\", title=None):\n tweet_match = TWITTER_RE.match(src)\n twitter_tag = f'<blockquote class=\"twitter-tweet\" tw-align-center>' \\\n f'<a href=\"{tweet_match.group(1)}\"></a></blockquote><br>' \\\n f'<a href=\"{src}\" target=\"_blank\">{src}</a>'\n return twitter_tag\n", "path": "common/markdown/club_renderer.py"}]}
2,537
325
gh_patches_debug_16394
rasdani/github-patches
git_diff
yt-project__yt-2265
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Dynamic parallel objects StopIteration error in Python 3.7 ### Bug report **Bug summary** Dynamic parallel objects would result in [`StopIteration` error stemming from a Python 3.7 new behavior](https://stackoverflow.com/a/51701040/10892982). **Code for reproduction** Given `test.py`: ```python import yt yt.enable_parallelism() for x in yt.parallel_objects(range(10), dynamic=True): print(x) ``` Run: ```console $ mpirun python test.py ``` **Actual outcome** ```console ... File "test.py", line 4, in <module> for x in yt.parallel_objects(range(10), dynamic=True): File "/Users/qobilidop/work/rse/yt-project/yt-dev/yt/utilities/parallel_tools/parallel_analysis_interface.py", line 481, in parallel_objects storage=storage): File "/Users/qobilidop/work/rse/yt-project/yt-dev/yt/utilities/parallel_tools/task_queue.py", line 174, in dynamic_parallel_objects for task in my_q: P001 yt : [ERROR ] 2019-05-27 12:40:34,817 RuntimeError: generator raised StopIteration ... ``` **Expected outcome** No error. **Version Information** * Python Version: 3.7 * yt version: master branch (3.6.dev0) </issue> <code> [start of yt/utilities/parallel_tools/task_queue.py] 1 """ 2 Task queue in yt 3 4 5 6 """ 7 8 #----------------------------------------------------------------------------- 9 # Copyright (c) 2013, yt Development Team. 10 # 11 # Distributed under the terms of the Modified BSD License. 12 # 13 # The full license is in the file COPYING.txt, distributed with this software. 14 #----------------------------------------------------------------------------- 15 16 import numpy as np 17 18 from yt.funcs import mylog 19 from .parallel_analysis_interface import \ 20 communication_system, \ 21 _get_comm, \ 22 parallel_capable, \ 23 ResultsStorage 24 25 messages = dict( 26 task = dict(msg = 'next'), 27 result = dict(msg = 'result'), 28 task_req = dict(msg = 'task_req'), 29 end = dict(msg = 'no_more_tasks'), 30 ) 31 32 class TaskQueueNonRoot(object): 33 def __init__(self, tasks, comm, subcomm): 34 self.tasks = tasks 35 self.results = {} 36 self.comm = comm 37 self.subcomm = subcomm 38 39 def send_result(self, result): 40 new_msg = messages['result'].copy() 41 new_msg['value'] = result 42 if self.subcomm.rank == 0: 43 self.comm.comm.send(new_msg, dest = 0, tag=1) 44 self.subcomm.barrier() 45 46 def get_next(self): 47 msg = messages['task_req'].copy() 48 if self.subcomm.rank == 0: 49 self.comm.comm.send(msg, dest = 0, tag=1) 50 msg = self.comm.comm.recv(source = 0, tag=2) 51 msg = self.subcomm.bcast(msg, root=0) 52 if msg['msg'] == messages['end']['msg']: 53 mylog.debug("Notified to end") 54 raise StopIteration 55 return msg['value'] 56 57 def __iter__(self): 58 while 1: 59 yield self.get_next() 60 61 def run(self, callable): 62 for task in self: 63 self.send_result(callable(task)) 64 return self.finalize() 65 66 def finalize(self, vals = None): 67 return self.comm.comm.bcast(vals, root = 0) 68 69 class TaskQueueRoot(TaskQueueNonRoot): 70 def __init__(self, tasks, comm, njobs): 71 self.njobs = njobs 72 self.tasks = tasks 73 self.results = {} 74 self.assignments = {} 75 self._notified = 0 76 self._current = 0 77 self._remaining = len(self.tasks) 78 self.comm = comm 79 # Set up threading here 80 # self.dist = threading.Thread(target=self.handle_assignments) 81 # self.dist.daemon = True 82 # self.dist.start() 83 84 def run(self, func = None): 85 self.comm.probe_loop(1, self.handle_assignment) 86 return self.finalize(self.results) 87 88 def insert_result(self, source_id, result): 89 task_id = self.assignments[source_id] 90 self.results[task_id] = result 91 92 def assign_task(self, source_id): 93 if self._remaining == 0: 94 mylog.debug("Notifying %s to end", source_id) 95 msg = messages['end'].copy() 96 self._notified += 1 97 else: 98 msg = messages['task'].copy() 99 task_id = self._current 100 task = self.tasks[task_id] 101 self.assignments[source_id] = task_id 102 self._current += 1 103 self._remaining -= 1 104 msg['value'] = task 105 self.comm.comm.send(msg, dest = source_id, tag = 2) 106 107 def handle_assignment(self, status): 108 msg = self.comm.comm.recv(source = status.source, tag = 1) 109 if msg['msg'] == messages['result']['msg']: 110 self.insert_result(status.source, msg['value']) 111 elif msg['msg'] == messages['task_req']['msg']: 112 self.assign_task(status.source) 113 else: 114 mylog.error("GOT AN UNKNOWN MESSAGE: %s", msg) 115 raise RuntimeError 116 if self._notified >= self.njobs: 117 raise StopIteration 118 119 def task_queue(func, tasks, njobs=0): 120 comm = _get_comm(()) 121 if not parallel_capable: 122 mylog.error("Cannot create task queue for serial process.") 123 raise RuntimeError 124 my_size = comm.comm.size 125 if njobs <= 0: 126 njobs = my_size - 1 127 if njobs >= my_size: 128 mylog.error("You have asked for %s jobs, but only %s processors are available.", 129 njobs, (my_size - 1)) 130 raise RuntimeError 131 my_rank = comm.rank 132 all_new_comms = np.array_split(np.arange(1, my_size), njobs) 133 all_new_comms.insert(0, np.array([0])) 134 for i,comm_set in enumerate(all_new_comms): 135 if my_rank in comm_set: 136 my_new_id = i 137 break 138 subcomm = communication_system.push_with_ids(all_new_comms[my_new_id].tolist()) 139 140 if comm.comm.rank == 0: 141 my_q = TaskQueueRoot(tasks, comm, njobs) 142 else: 143 my_q = TaskQueueNonRoot(None, comm, subcomm) 144 communication_system.pop() 145 return my_q.run(func) 146 147 def dynamic_parallel_objects(tasks, njobs=0, storage=None, broadcast=True): 148 comm = _get_comm(()) 149 if not parallel_capable: 150 mylog.error("Cannot create task queue for serial process.") 151 raise RuntimeError 152 my_size = comm.comm.size 153 if njobs <= 0: 154 njobs = my_size - 1 155 if njobs >= my_size: 156 mylog.error("You have asked for %s jobs, but only %s processors are available.", 157 njobs, (my_size - 1)) 158 raise RuntimeError 159 my_rank = comm.rank 160 all_new_comms = np.array_split(np.arange(1, my_size), njobs) 161 all_new_comms.insert(0, np.array([0])) 162 for i,comm_set in enumerate(all_new_comms): 163 if my_rank in comm_set: 164 my_new_id = i 165 break 166 subcomm = communication_system.push_with_ids(all_new_comms[my_new_id].tolist()) 167 168 if comm.comm.rank == 0: 169 my_q = TaskQueueRoot(tasks, comm, njobs) 170 my_q.comm.probe_loop(1, my_q.handle_assignment) 171 else: 172 my_q = TaskQueueNonRoot(None, comm, subcomm) 173 if storage is None: 174 for task in my_q: 175 yield task 176 else: 177 for task in my_q: 178 rstore = ResultsStorage() 179 yield rstore, task 180 my_q.send_result(rstore.result) 181 182 if storage is not None: 183 if broadcast: 184 my_results = my_q.comm.comm.bcast(my_q.results, root=0) 185 else: 186 my_results = my_q.results 187 storage.update(my_results) 188 189 communication_system.pop() 190 [end of yt/utilities/parallel_tools/task_queue.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/yt/utilities/parallel_tools/task_queue.py b/yt/utilities/parallel_tools/task_queue.py --- a/yt/utilities/parallel_tools/task_queue.py +++ b/yt/utilities/parallel_tools/task_queue.py @@ -43,7 +43,7 @@ self.comm.comm.send(new_msg, dest = 0, tag=1) self.subcomm.barrier() - def get_next(self): + def __next__(self): msg = messages['task_req'].copy() if self.subcomm.rank == 0: self.comm.comm.send(msg, dest = 0, tag=1) @@ -54,9 +54,11 @@ raise StopIteration return msg['value'] + # For Python 2 compatibility + next = __next__ + def __iter__(self): - while 1: - yield self.get_next() + return self def run(self, callable): for task in self:
{"golden_diff": "diff --git a/yt/utilities/parallel_tools/task_queue.py b/yt/utilities/parallel_tools/task_queue.py\n--- a/yt/utilities/parallel_tools/task_queue.py\n+++ b/yt/utilities/parallel_tools/task_queue.py\n@@ -43,7 +43,7 @@\n self.comm.comm.send(new_msg, dest = 0, tag=1)\n self.subcomm.barrier()\n \n- def get_next(self):\n+ def __next__(self):\n msg = messages['task_req'].copy()\n if self.subcomm.rank == 0:\n self.comm.comm.send(msg, dest = 0, tag=1)\n@@ -54,9 +54,11 @@\n raise StopIteration\n return msg['value']\n \n+ # For Python 2 compatibility\n+ next = __next__\n+\n def __iter__(self):\n- while 1:\n- yield self.get_next()\n+ return self\n \n def run(self, callable):\n for task in self:\n", "issue": "Dynamic parallel objects StopIteration error in Python 3.7\n### Bug report\r\n\r\n**Bug summary**\r\n\r\nDynamic parallel objects would result in [`StopIteration` error stemming from a Python 3.7 new behavior](https://stackoverflow.com/a/51701040/10892982).\r\n\r\n**Code for reproduction**\r\n\r\nGiven `test.py`:\r\n```python\r\nimport yt\r\nyt.enable_parallelism()\r\n\r\nfor x in yt.parallel_objects(range(10), dynamic=True):\r\n print(x)\r\n```\r\n\r\nRun:\r\n```console\r\n$ mpirun python test.py\r\n```\r\n\r\n**Actual outcome**\r\n\r\n```console\r\n...\r\n File \"test.py\", line 4, in <module>\r\n for x in yt.parallel_objects(range(10), dynamic=True):\r\n File \"/Users/qobilidop/work/rse/yt-project/yt-dev/yt/utilities/parallel_tools/parallel_analysis_interface.py\", line 481, in parallel_objects\r\n storage=storage):\r\n File \"/Users/qobilidop/work/rse/yt-project/yt-dev/yt/utilities/parallel_tools/task_queue.py\", line 174, in dynamic_parallel_objects\r\n for task in my_q:\r\nP001 yt : [ERROR ] 2019-05-27 12:40:34,817 RuntimeError: generator raised StopIteration\r\n...\r\n```\r\n\r\n**Expected outcome**\r\n\r\nNo error.\r\n\r\n**Version Information**\r\n\r\n * Python Version: 3.7\r\n * yt version: master branch (3.6.dev0)\r\n\n", "before_files": [{"content": "\"\"\"\nTask queue in yt\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport numpy as np\n\nfrom yt.funcs import mylog\nfrom .parallel_analysis_interface import \\\n communication_system, \\\n _get_comm, \\\n parallel_capable, \\\n ResultsStorage\n\nmessages = dict(\n task = dict(msg = 'next'),\n result = dict(msg = 'result'),\n task_req = dict(msg = 'task_req'),\n end = dict(msg = 'no_more_tasks'),\n)\n\nclass TaskQueueNonRoot(object):\n def __init__(self, tasks, comm, subcomm):\n self.tasks = tasks\n self.results = {}\n self.comm = comm\n self.subcomm = subcomm\n\n def send_result(self, result):\n new_msg = messages['result'].copy()\n new_msg['value'] = result\n if self.subcomm.rank == 0:\n self.comm.comm.send(new_msg, dest = 0, tag=1)\n self.subcomm.barrier()\n\n def get_next(self):\n msg = messages['task_req'].copy()\n if self.subcomm.rank == 0:\n self.comm.comm.send(msg, dest = 0, tag=1)\n msg = self.comm.comm.recv(source = 0, tag=2)\n msg = self.subcomm.bcast(msg, root=0)\n if msg['msg'] == messages['end']['msg']:\n mylog.debug(\"Notified to end\")\n raise StopIteration\n return msg['value']\n\n def __iter__(self):\n while 1:\n yield self.get_next()\n\n def run(self, callable):\n for task in self:\n self.send_result(callable(task))\n return self.finalize()\n\n def finalize(self, vals = None):\n return self.comm.comm.bcast(vals, root = 0)\n\nclass TaskQueueRoot(TaskQueueNonRoot):\n def __init__(self, tasks, comm, njobs):\n self.njobs = njobs\n self.tasks = tasks\n self.results = {}\n self.assignments = {}\n self._notified = 0\n self._current = 0\n self._remaining = len(self.tasks)\n self.comm = comm\n # Set up threading here\n # self.dist = threading.Thread(target=self.handle_assignments)\n # self.dist.daemon = True\n # self.dist.start()\n\n def run(self, func = None):\n self.comm.probe_loop(1, self.handle_assignment)\n return self.finalize(self.results)\n\n def insert_result(self, source_id, result):\n task_id = self.assignments[source_id]\n self.results[task_id] = result\n\n def assign_task(self, source_id):\n if self._remaining == 0:\n mylog.debug(\"Notifying %s to end\", source_id)\n msg = messages['end'].copy()\n self._notified += 1\n else:\n msg = messages['task'].copy()\n task_id = self._current\n task = self.tasks[task_id]\n self.assignments[source_id] = task_id\n self._current += 1\n self._remaining -= 1\n msg['value'] = task\n self.comm.comm.send(msg, dest = source_id, tag = 2)\n\n def handle_assignment(self, status):\n msg = self.comm.comm.recv(source = status.source, tag = 1)\n if msg['msg'] == messages['result']['msg']:\n self.insert_result(status.source, msg['value'])\n elif msg['msg'] == messages['task_req']['msg']:\n self.assign_task(status.source)\n else:\n mylog.error(\"GOT AN UNKNOWN MESSAGE: %s\", msg)\n raise RuntimeError\n if self._notified >= self.njobs:\n raise StopIteration\n\ndef task_queue(func, tasks, njobs=0):\n comm = _get_comm(())\n if not parallel_capable:\n mylog.error(\"Cannot create task queue for serial process.\")\n raise RuntimeError\n my_size = comm.comm.size\n if njobs <= 0:\n njobs = my_size - 1\n if njobs >= my_size:\n mylog.error(\"You have asked for %s jobs, but only %s processors are available.\",\n njobs, (my_size - 1))\n raise RuntimeError\n my_rank = comm.rank\n all_new_comms = np.array_split(np.arange(1, my_size), njobs)\n all_new_comms.insert(0, np.array([0]))\n for i,comm_set in enumerate(all_new_comms):\n if my_rank in comm_set:\n my_new_id = i\n break\n subcomm = communication_system.push_with_ids(all_new_comms[my_new_id].tolist())\n \n if comm.comm.rank == 0:\n my_q = TaskQueueRoot(tasks, comm, njobs)\n else:\n my_q = TaskQueueNonRoot(None, comm, subcomm)\n communication_system.pop()\n return my_q.run(func)\n\ndef dynamic_parallel_objects(tasks, njobs=0, storage=None, broadcast=True):\n comm = _get_comm(())\n if not parallel_capable:\n mylog.error(\"Cannot create task queue for serial process.\")\n raise RuntimeError\n my_size = comm.comm.size\n if njobs <= 0:\n njobs = my_size - 1\n if njobs >= my_size:\n mylog.error(\"You have asked for %s jobs, but only %s processors are available.\",\n njobs, (my_size - 1))\n raise RuntimeError\n my_rank = comm.rank\n all_new_comms = np.array_split(np.arange(1, my_size), njobs)\n all_new_comms.insert(0, np.array([0]))\n for i,comm_set in enumerate(all_new_comms):\n if my_rank in comm_set:\n my_new_id = i\n break\n subcomm = communication_system.push_with_ids(all_new_comms[my_new_id].tolist())\n \n if comm.comm.rank == 0:\n my_q = TaskQueueRoot(tasks, comm, njobs)\n my_q.comm.probe_loop(1, my_q.handle_assignment)\n else:\n my_q = TaskQueueNonRoot(None, comm, subcomm)\n if storage is None:\n for task in my_q:\n yield task\n else:\n for task in my_q:\n rstore = ResultsStorage()\n yield rstore, task\n my_q.send_result(rstore.result)\n\n if storage is not None:\n if broadcast:\n my_results = my_q.comm.comm.bcast(my_q.results, root=0)\n else:\n my_results = my_q.results\n storage.update(my_results)\n\n communication_system.pop()\n", "path": "yt/utilities/parallel_tools/task_queue.py"}]}
2,834
220
gh_patches_debug_7312
rasdani/github-patches
git_diff
spack__spack-41502
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Installation issue: tcl with <= gcc@11 ### Steps to reproduce I started the most up to date version of spack, 05-12-2023, and had a glitch when trying to compile `tcl`: spack install -v [email protected] it appears to be as a result of the addition of this edit [40946](https://github.com/spack/spack/pull/40946) filter_compiler_wrappers("tclConfig.sh", relative_root="lib") I think the error is related to the `filter_compile_wrappers function` trying to replace a line containing `gcc` and `g++`, however the file in question, `tclConfig.sh` only has one line containing `gcc`, no `g++`: # C compiler to use for compilation. TCL_CC='/mnt/sda/cadtools/spack_git/lib/spack/env/gcc/gcc' My base system, ubuntu22.04, has `[email protected]` as its default. I have stripped this back to a clean checkout of spack and go straight into a compile of `tcl`. If I compile and instead use `[email protected].` it do not get the error, the installation is successful. Also if I comment out that one line from the `tcl/package.py` file, the installation is successful using `[email protected]`. ### Error message ``` ==> [2023-12-05-10:43:44.657378] Find complete: /mnt/sda/cadtools/spack_install/opt/linux-ubuntu22.04-skylake/gcc-11.4.0/tcl/8.6.11-yvwju3kstvxvughqov6ommkuxr7mzrpm/lib ('tclConfig.sh',) ==> [2023-12-05-10:43:44.658905] FILTER FILE: /mnt/sda/cadtools/spack_install/opt/linux-ubuntu22.04-skylake/gcc-11.4.0/tcl/8.6.11-yvwju3kstvxvughqov6ommkuxr7mzrpm/lib/tclConfig.sh [replacing "/mnt/sda/cadtools/spack_git/lib/spack/env/gcc/gcc"] ==> [2023-12-05-10:43:44.659794] FILTER FILE: /mnt/sda/cadtools/spack_install/opt/linux-ubuntu22.04-skylake/gcc-11.4.0/tcl/8.6.11-yvwju3kstvxvughqov6ommkuxr7mzrpm/lib/tclConfig.sh [replacing "/mnt/sda/cadtools/spack_git/lib/spack/env/gcc/g\+\+"] ==> Error: AttributeError: 'NoneType' object has no attribute 'replace' ``` ### Information on your system * **Spack:** 0.22.0.dev0 (798770f9e54bfd3cb1a52af4a9bd2937f826018e) * **Python:** 3.11.6 * **Platform:** linux-ubuntu22.04-skylake * **Concretizer:** clingo ### General information - [X] I have run `spack debug report` and reported the version of Spack/Python/Platform - [X] I have searched the issues of this repo and believe this is not a duplicate - [X] I have run the failing commands in debug mode and reported the output </issue> <code> [start of lib/spack/spack/mixins.py] 1 # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 6 """This module contains additional behavior that can be attached to any given 7 package. 8 """ 9 import os 10 11 import llnl.util.filesystem 12 13 import spack.builder 14 15 16 def filter_compiler_wrappers(*files, **kwargs): 17 """Substitutes any path referring to a Spack compiler wrapper with the 18 path of the underlying compiler that has been used. 19 20 If this isn't done, the files will have CC, CXX, F77, and FC set to 21 Spack's generic cc, c++, f77, and f90. We want them to be bound to 22 whatever compiler they were built with. 23 24 Args: 25 *files: files to be filtered relative to the search root (which is, 26 by default, the installation prefix) 27 28 **kwargs: allowed keyword arguments 29 30 after 31 specifies after which phase the files should be 32 filtered (defaults to 'install') 33 34 relative_root 35 path relative to prefix where to start searching for 36 the files to be filtered. If not set the install prefix 37 wil be used as the search root. **It is highly recommended 38 to set this, as searching from the installation prefix may 39 affect performance severely in some cases**. 40 41 ignore_absent, backup 42 these two keyword arguments, if present, will be forwarded 43 to ``filter_file`` (see its documentation for more information 44 on their behavior) 45 46 recursive 47 this keyword argument, if present, will be forwarded to 48 ``find`` (see its documentation for more information on the 49 behavior) 50 """ 51 after = kwargs.get("after", "install") 52 relative_root = kwargs.get("relative_root", None) 53 54 filter_kwargs = { 55 "ignore_absent": kwargs.get("ignore_absent", True), 56 "backup": kwargs.get("backup", False), 57 "string": True, 58 } 59 60 find_kwargs = {"recursive": kwargs.get("recursive", False)} 61 62 def _filter_compiler_wrappers_impl(pkg_or_builder): 63 pkg = getattr(pkg_or_builder, "pkg", pkg_or_builder) 64 # Compute the absolute path of the search root 65 root = os.path.join(pkg.prefix, relative_root) if relative_root else pkg.prefix 66 67 # Compute the absolute path of the files to be filtered and 68 # remove links from the list. 69 abs_files = llnl.util.filesystem.find(root, files, **find_kwargs) 70 abs_files = [x for x in abs_files if not os.path.islink(x)] 71 72 x = llnl.util.filesystem.FileFilter(*abs_files) 73 74 compiler_vars = [ 75 ("CC", pkg.compiler.cc), 76 ("CXX", pkg.compiler.cxx), 77 ("F77", pkg.compiler.f77), 78 ("FC", pkg.compiler.fc), 79 ] 80 81 # Some paths to the compiler wrappers might be substrings of the others. 82 # For example: 83 # CC=/path/to/spack/lib/spack/env/cc (realpath to the wrapper) 84 # FC=/path/to/spack/lib/spack/env/cce/ftn 85 # Therefore, we perform the filtering in the reversed sorted order of 86 # the substituted strings. If, however, the strings are identical (e.g. 87 # both CC and FC are set using realpath), the filtering is done 88 # according to the order in compiler_vars. To achieve that, we populate 89 # the following array with tuples of three elements: path to the 90 # wrapper, negated index of the variable in compiler_vars, path to the 91 # real compiler. This way, the reversed sorted order of the resulting 92 # array is the order of replacements that we need. 93 replacements = [] 94 95 for idx, (env_var, compiler_path) in enumerate(compiler_vars): 96 if env_var in os.environ: 97 # filter spack wrapper and links to spack wrapper in case 98 # build system runs realpath 99 wrapper = os.environ[env_var] 100 for wrapper_path in (wrapper, os.path.realpath(wrapper)): 101 replacements.append((wrapper_path, -idx, compiler_path)) 102 103 for wrapper_path, _, compiler_path in sorted(replacements, reverse=True): 104 x.filter(wrapper_path, compiler_path, **filter_kwargs) 105 106 # Remove this linking flag if present (it turns RPATH into RUNPATH) 107 x.filter("{0}--enable-new-dtags".format(pkg.compiler.linker_arg), "", **filter_kwargs) 108 109 # NAG compiler is usually mixed with GCC, which has a different 110 # prefix for linker arguments. 111 if pkg.compiler.name == "nag": 112 x.filter("-Wl,--enable-new-dtags", "", **filter_kwargs) 113 114 spack.builder.run_after(after)(_filter_compiler_wrappers_impl) 115 [end of lib/spack/spack/mixins.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/spack/spack/mixins.py b/lib/spack/spack/mixins.py --- a/lib/spack/spack/mixins.py +++ b/lib/spack/spack/mixins.py @@ -93,7 +93,7 @@ replacements = [] for idx, (env_var, compiler_path) in enumerate(compiler_vars): - if env_var in os.environ: + if env_var in os.environ and compiler_path is not None: # filter spack wrapper and links to spack wrapper in case # build system runs realpath wrapper = os.environ[env_var]
{"golden_diff": "diff --git a/lib/spack/spack/mixins.py b/lib/spack/spack/mixins.py\n--- a/lib/spack/spack/mixins.py\n+++ b/lib/spack/spack/mixins.py\n@@ -93,7 +93,7 @@\n replacements = []\n \n for idx, (env_var, compiler_path) in enumerate(compiler_vars):\n- if env_var in os.environ:\n+ if env_var in os.environ and compiler_path is not None:\n # filter spack wrapper and links to spack wrapper in case\n # build system runs realpath\n wrapper = os.environ[env_var]\n", "issue": "Installation issue: tcl with <= gcc@11\n### Steps to reproduce\r\n\r\nI started the most up to date version of spack, 05-12-2023, and had a glitch when trying to compile `tcl`:\r\n\r\n spack install -v [email protected]\r\n \r\nit appears to be as a result of the addition of this edit [40946](https://github.com/spack/spack/pull/40946)\r\n\r\n filter_compiler_wrappers(\"tclConfig.sh\", relative_root=\"lib\")\r\n \r\nI think the error is related to the `filter_compile_wrappers function` trying to replace a line containing `gcc` and `g++`, however the file in question, `tclConfig.sh` only has one line containing `gcc`, no `g++`:\r\n\r\n # C compiler to use for compilation.\r\n TCL_CC='/mnt/sda/cadtools/spack_git/lib/spack/env/gcc/gcc'\r\n\r\n\r\nMy base system, ubuntu22.04, has `[email protected]` as its default. I have stripped this back to a clean checkout of spack and go straight into a compile of `tcl`.\r\n\r\nIf I compile and instead use `[email protected].` it do not get the error, the installation is successful. Also if I comment out that one line from the `tcl/package.py` file, the installation is successful using `[email protected]`.\r\n\r\n### Error message\r\n\r\n```\r\n==> [2023-12-05-10:43:44.657378] Find complete: /mnt/sda/cadtools/spack_install/opt/linux-ubuntu22.04-skylake/gcc-11.4.0/tcl/8.6.11-yvwju3kstvxvughqov6ommkuxr7mzrpm/lib ('tclConfig.sh',)\r\n==> [2023-12-05-10:43:44.658905] FILTER FILE: /mnt/sda/cadtools/spack_install/opt/linux-ubuntu22.04-skylake/gcc-11.4.0/tcl/8.6.11-yvwju3kstvxvughqov6ommkuxr7mzrpm/lib/tclConfig.sh [replacing \"/mnt/sda/cadtools/spack_git/lib/spack/env/gcc/gcc\"]\r\n==> [2023-12-05-10:43:44.659794] FILTER FILE: /mnt/sda/cadtools/spack_install/opt/linux-ubuntu22.04-skylake/gcc-11.4.0/tcl/8.6.11-yvwju3kstvxvughqov6ommkuxr7mzrpm/lib/tclConfig.sh [replacing \"/mnt/sda/cadtools/spack_git/lib/spack/env/gcc/g\\+\\+\"]\r\n==> Error: AttributeError: 'NoneType' object has no attribute 'replace'\r\n```\r\n\r\n### Information on your system\r\n\r\n* **Spack:** 0.22.0.dev0 (798770f9e54bfd3cb1a52af4a9bd2937f826018e)\r\n* **Python:** 3.11.6\r\n* **Platform:** linux-ubuntu22.04-skylake\r\n* **Concretizer:** clingo\r\n\r\n\r\n### General information\r\n\r\n- [X] I have run `spack debug report` and reported the version of Spack/Python/Platform\r\n- [X] I have searched the issues of this repo and believe this is not a duplicate\r\n- [X] I have run the failing commands in debug mode and reported the output\n", "before_files": [{"content": "# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\"\"\"This module contains additional behavior that can be attached to any given\npackage.\n\"\"\"\nimport os\n\nimport llnl.util.filesystem\n\nimport spack.builder\n\n\ndef filter_compiler_wrappers(*files, **kwargs):\n \"\"\"Substitutes any path referring to a Spack compiler wrapper with the\n path of the underlying compiler that has been used.\n\n If this isn't done, the files will have CC, CXX, F77, and FC set to\n Spack's generic cc, c++, f77, and f90. We want them to be bound to\n whatever compiler they were built with.\n\n Args:\n *files: files to be filtered relative to the search root (which is,\n by default, the installation prefix)\n\n **kwargs: allowed keyword arguments\n\n after\n specifies after which phase the files should be\n filtered (defaults to 'install')\n\n relative_root\n path relative to prefix where to start searching for\n the files to be filtered. If not set the install prefix\n wil be used as the search root. **It is highly recommended\n to set this, as searching from the installation prefix may\n affect performance severely in some cases**.\n\n ignore_absent, backup\n these two keyword arguments, if present, will be forwarded\n to ``filter_file`` (see its documentation for more information\n on their behavior)\n\n recursive\n this keyword argument, if present, will be forwarded to\n ``find`` (see its documentation for more information on the\n behavior)\n \"\"\"\n after = kwargs.get(\"after\", \"install\")\n relative_root = kwargs.get(\"relative_root\", None)\n\n filter_kwargs = {\n \"ignore_absent\": kwargs.get(\"ignore_absent\", True),\n \"backup\": kwargs.get(\"backup\", False),\n \"string\": True,\n }\n\n find_kwargs = {\"recursive\": kwargs.get(\"recursive\", False)}\n\n def _filter_compiler_wrappers_impl(pkg_or_builder):\n pkg = getattr(pkg_or_builder, \"pkg\", pkg_or_builder)\n # Compute the absolute path of the search root\n root = os.path.join(pkg.prefix, relative_root) if relative_root else pkg.prefix\n\n # Compute the absolute path of the files to be filtered and\n # remove links from the list.\n abs_files = llnl.util.filesystem.find(root, files, **find_kwargs)\n abs_files = [x for x in abs_files if not os.path.islink(x)]\n\n x = llnl.util.filesystem.FileFilter(*abs_files)\n\n compiler_vars = [\n (\"CC\", pkg.compiler.cc),\n (\"CXX\", pkg.compiler.cxx),\n (\"F77\", pkg.compiler.f77),\n (\"FC\", pkg.compiler.fc),\n ]\n\n # Some paths to the compiler wrappers might be substrings of the others.\n # For example:\n # CC=/path/to/spack/lib/spack/env/cc (realpath to the wrapper)\n # FC=/path/to/spack/lib/spack/env/cce/ftn\n # Therefore, we perform the filtering in the reversed sorted order of\n # the substituted strings. If, however, the strings are identical (e.g.\n # both CC and FC are set using realpath), the filtering is done\n # according to the order in compiler_vars. To achieve that, we populate\n # the following array with tuples of three elements: path to the\n # wrapper, negated index of the variable in compiler_vars, path to the\n # real compiler. This way, the reversed sorted order of the resulting\n # array is the order of replacements that we need.\n replacements = []\n\n for idx, (env_var, compiler_path) in enumerate(compiler_vars):\n if env_var in os.environ:\n # filter spack wrapper and links to spack wrapper in case\n # build system runs realpath\n wrapper = os.environ[env_var]\n for wrapper_path in (wrapper, os.path.realpath(wrapper)):\n replacements.append((wrapper_path, -idx, compiler_path))\n\n for wrapper_path, _, compiler_path in sorted(replacements, reverse=True):\n x.filter(wrapper_path, compiler_path, **filter_kwargs)\n\n # Remove this linking flag if present (it turns RPATH into RUNPATH)\n x.filter(\"{0}--enable-new-dtags\".format(pkg.compiler.linker_arg), \"\", **filter_kwargs)\n\n # NAG compiler is usually mixed with GCC, which has a different\n # prefix for linker arguments.\n if pkg.compiler.name == \"nag\":\n x.filter(\"-Wl,--enable-new-dtags\", \"\", **filter_kwargs)\n\n spack.builder.run_after(after)(_filter_compiler_wrappers_impl)\n", "path": "lib/spack/spack/mixins.py"}]}
2,684
137
gh_patches_debug_6727
rasdani/github-patches
git_diff
saulpw__visidata-2018
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Parquet Large String columns are typed vlen by default **Small description** I have a parquet file which contains string columns: [australian_public_holidays_2014_2020.zip](https://github.com/saulpw/visidata/files/12389440/australian_public_holidays_2014_2020.zip) **Expected result** In DuckDB, it looks like this: ``` D select * from 'australian_public_holidays_2014_2020.parquet'; ┌────────────┬──────────────────────┬──────────────────────┬────────────────────────────────────────────────────┬──────────────┐ │ Date │ Holiday_Name │ Information │ More_Information │ Jurisdiction │ │ date │ varchar │ varchar │ varchar │ varchar │ ├────────────┼──────────────────────┼──────────────────────┼────────────────────────────────────────────────────┼──────────────┤ │ 2014-01-01 │ New Year's Day │ New Year's Day is . │ │ act │ │ 2014-01-27 │ Australia Day │ Always celebrated . │ │ act │ │ 2014-03-10 │ Canberra Day │ Held on the second. │ http://www.cmd.act.gov.au/communication/holidays │ act │ ``` **Actual result with screenshot** In Visidata with pyarrow installed, it looks like this: ``` Date Holiday_Name Information More_Information Jurisdiction 2014-01-01 14 87 3 2014-01-27 13 31 3 2014-03-10 12 148 48 3 ``` Note that the numbers are the _exact_ length of the missing strings! **Steps to reproduce with sample data and a .vd** ``` vd australian_public_holidays_2014_2020.parquet ``` (Skipping the command-log as simply opening the file reproduces the issue) **Additional context** ``` ❯ vd --version saul.pw/VisiData v2.11.1 ``` Python 3.10.12 </issue> <code> [start of visidata/loaders/arrow.py] 1 from collections import defaultdict 2 3 from visidata import Sheet, VisiData, TypedWrapper, anytype, date, vlen, Column, vd 4 5 6 7 @VisiData.api 8 def open_arrow(vd, p): 9 'Apache Arrow IPC file format' 10 return ArrowSheet(p.name, source=p) 11 12 13 @VisiData.api 14 def open_arrows(vd, p): 15 'Apache Arrow IPC streaming format' 16 return ArrowSheet(p.name, source=p) 17 18 19 def arrow_to_vdtype(t): 20 pa = vd.importExternal('pyarrow') 21 22 arrow_to_vd_typemap = { 23 pa.lib.Type_BOOL: bool, 24 pa.lib.Type_UINT8: int, 25 pa.lib.Type_UINT16: int, 26 pa.lib.Type_UINT32: int, 27 pa.lib.Type_UINT64: int, 28 pa.lib.Type_INT8: int, 29 pa.lib.Type_INT16: int, 30 pa.lib.Type_INT32: int, 31 pa.lib.Type_INT64: int, 32 pa.lib.Type_HALF_FLOAT: float, 33 pa.lib.Type_FLOAT: float, 34 pa.lib.Type_DOUBLE: float, 35 # pa.lib.Type_DECIMAL128: Decimal128Scalar, 36 # pa.lib.Type_DECIMAL256: Decimal256Scalar, 37 pa.lib.Type_DATE32: date, 38 pa.lib.Type_DATE64: date, 39 pa.lib.Type_TIME32: date, 40 pa.lib.Type_TIME64: date, 41 pa.lib.Type_TIMESTAMP: date, 42 pa.lib.Type_DURATION: int, 43 pa.lib.Type_BINARY: bytes, 44 pa.lib.Type_LARGE_BINARY: vlen, 45 # pa.lib.Type_FIXED_SIZE_BINARY: bytes, 46 # pa.lib.Type_STRING: str, 47 pa.lib.Type_LARGE_STRING: vlen, 48 # pa.lib.Type_LIST: list, 49 # pa.lib.Type_LARGE_LIST: list, 50 # pa.lib.Type_FIXED_SIZE_LIST: list, 51 # pa.lib.Type_STRUCT: dict, 52 # pa.lib.Type_MAP: dict, 53 # pa.lib.Type_DICTIONARY: dict, 54 # pa.lib.Type_SPARSE_UNION: UnionScalar, 55 # pa.lib.Type_DENSE_UNION: UnionScalar, 56 } 57 return arrow_to_vd_typemap.get(t.id, anytype) 58 59 class ArrowSheet(Sheet): 60 def iterload(self): 61 pa = vd.importExternal('pyarrow') 62 63 try: 64 with pa.OSFile(str(self.source), 'rb') as fp: 65 self.coldata = pa.ipc.open_file(fp).read_all() 66 except pa.lib.ArrowInvalid as e: 67 with pa.OSFile(str(self.source), 'rb') as fp: 68 self.coldata = pa.ipc.open_stream(fp).read_all() 69 70 self.columns = [] 71 for colnum, col in enumerate(self.coldata): 72 coltype = arrow_to_vdtype(self.coldata.schema.types[colnum]) 73 colname = self.coldata.schema.names[colnum] 74 75 self.addColumn(Column(colname, type=coltype, expr=colnum, 76 getter=lambda c,r: c.sheet.coldata[c.expr][r[0]].as_py())) 77 78 for rownum in range(max(len(c) for c in self.coldata)): 79 yield [rownum] 80 81 82 @VisiData.api 83 def save_arrow(vd, p, sheet, streaming=False): 84 pa = vd.importExternal('pyarrow') 85 np = vd.importExternal('numpy') 86 87 typemap = { 88 anytype: pa.string(), 89 int: pa.int64(), 90 vlen: pa.int64(), 91 float: pa.float64(), 92 str: pa.string(), 93 date: pa.date64(), 94 } 95 96 for t in vd.numericTypes: 97 if t not in typemap: 98 typemap[t] = pa.float64() 99 100 databycol = defaultdict(list) # col -> [values] 101 102 for typedvals in sheet.iterdispvals(format=False): 103 for col, val in typedvals.items(): 104 if isinstance(val, TypedWrapper): 105 val = None 106 107 databycol[col].append(val) 108 109 data = [pa.array(vals, type=typemap.get(col.type, pa.string())) for col, vals in databycol.items()] 110 111 schema = pa.schema([ 112 (c.name, typemap.get(c.type, pa.string())) 113 for c in sheet.visibleCols 114 ]) 115 with p.open_bytes(mode='w') as outf: 116 if streaming: 117 with pa.ipc.new_stream(outf, schema) as writer: 118 writer.write_batch(pa.record_batch(data, names=[c.name for c in sheet.visibleCols])) 119 else: 120 with pa.ipc.new_file(outf, schema) as writer: 121 writer.write_batch(pa.record_batch(data, names=[c.name for c in sheet.visibleCols])) 122 123 124 @VisiData.api 125 def save_arrows(vd, p, sheet): 126 return vd.save_arrow(p, sheet, streaming=True) 127 [end of visidata/loaders/arrow.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/visidata/loaders/arrow.py b/visidata/loaders/arrow.py --- a/visidata/loaders/arrow.py +++ b/visidata/loaders/arrow.py @@ -44,7 +44,7 @@ pa.lib.Type_LARGE_BINARY: vlen, # pa.lib.Type_FIXED_SIZE_BINARY: bytes, # pa.lib.Type_STRING: str, - pa.lib.Type_LARGE_STRING: vlen, +# pa.lib.Type_LARGE_STRING: vlen, #2003 # pa.lib.Type_LIST: list, # pa.lib.Type_LARGE_LIST: list, # pa.lib.Type_FIXED_SIZE_LIST: list,
{"golden_diff": "diff --git a/visidata/loaders/arrow.py b/visidata/loaders/arrow.py\n--- a/visidata/loaders/arrow.py\n+++ b/visidata/loaders/arrow.py\n@@ -44,7 +44,7 @@\n pa.lib.Type_LARGE_BINARY: vlen,\n # pa.lib.Type_FIXED_SIZE_BINARY: bytes,\n # pa.lib.Type_STRING: str,\n- pa.lib.Type_LARGE_STRING: vlen,\n+# pa.lib.Type_LARGE_STRING: vlen, #2003\n # pa.lib.Type_LIST: list,\n # pa.lib.Type_LARGE_LIST: list,\n # pa.lib.Type_FIXED_SIZE_LIST: list,\n", "issue": "Parquet Large String columns are typed vlen by default\n**Small description**\r\n\r\nI have a parquet file which contains string columns:\r\n[australian_public_holidays_2014_2020.zip](https://github.com/saulpw/visidata/files/12389440/australian_public_holidays_2014_2020.zip)\r\n\r\n\r\n**Expected result**\r\n\r\nIn DuckDB, it looks like this:\r\n\r\n\r\n```\r\nD select * from 'australian_public_holidays_2014_2020.parquet';\r\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\r\n\u2502 Date \u2502 Holiday_Name \u2502 Information \u2502 More_Information \u2502 Jurisdiction \u2502\r\n\u2502 date \u2502 varchar \u2502 varchar \u2502 varchar \u2502 varchar \u2502\r\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\r\n\u2502 2014-01-01 \u2502 New Year's Day \u2502 New Year's Day is . \u2502 \u2502 act \u2502\r\n\u2502 2014-01-27 \u2502 Australia Day \u2502 Always celebrated . \u2502 \u2502 act \u2502\r\n\u2502 2014-03-10 \u2502 Canberra Day \u2502 Held on the second. \u2502 http://www.cmd.act.gov.au/communication/holidays \u2502 act \u2502\r\n```\r\n\r\n**Actual result with screenshot**\r\n\r\nIn Visidata with pyarrow installed, it looks like this:\r\n\r\n```\r\nDate Holiday_Name Information More_Information Jurisdiction \r\n2014-01-01 14 87 3 \r\n2014-01-27 13 31 3 \r\n2014-03-10 12 148 48 3 \r\n```\r\n\r\nNote that the numbers are the _exact_ length of the missing strings!\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\n\r\n```\r\nvd australian_public_holidays_2014_2020.parquet\r\n```\r\n\r\n(Skipping the command-log as simply opening the file reproduces the issue)\r\n\r\n**Additional context**\r\n\r\n```\r\n\u276f vd --version\r\nsaul.pw/VisiData v2.11.1\r\n```\r\n\r\nPython 3.10.12\r\n\n", "before_files": [{"content": "from collections import defaultdict\n\nfrom visidata import Sheet, VisiData, TypedWrapper, anytype, date, vlen, Column, vd\n\n\n\[email protected]\ndef open_arrow(vd, p):\n 'Apache Arrow IPC file format'\n return ArrowSheet(p.name, source=p)\n\n\[email protected]\ndef open_arrows(vd, p):\n 'Apache Arrow IPC streaming format'\n return ArrowSheet(p.name, source=p)\n\n\ndef arrow_to_vdtype(t):\n pa = vd.importExternal('pyarrow')\n\n arrow_to_vd_typemap = {\n pa.lib.Type_BOOL: bool,\n pa.lib.Type_UINT8: int,\n pa.lib.Type_UINT16: int,\n pa.lib.Type_UINT32: int,\n pa.lib.Type_UINT64: int,\n pa.lib.Type_INT8: int,\n pa.lib.Type_INT16: int,\n pa.lib.Type_INT32: int,\n pa.lib.Type_INT64: int,\n pa.lib.Type_HALF_FLOAT: float,\n pa.lib.Type_FLOAT: float,\n pa.lib.Type_DOUBLE: float,\n# pa.lib.Type_DECIMAL128: Decimal128Scalar,\n# pa.lib.Type_DECIMAL256: Decimal256Scalar,\n pa.lib.Type_DATE32: date,\n pa.lib.Type_DATE64: date,\n pa.lib.Type_TIME32: date,\n pa.lib.Type_TIME64: date,\n pa.lib.Type_TIMESTAMP: date,\n pa.lib.Type_DURATION: int,\n pa.lib.Type_BINARY: bytes,\n pa.lib.Type_LARGE_BINARY: vlen,\n# pa.lib.Type_FIXED_SIZE_BINARY: bytes,\n# pa.lib.Type_STRING: str,\n pa.lib.Type_LARGE_STRING: vlen,\n# pa.lib.Type_LIST: list,\n# pa.lib.Type_LARGE_LIST: list,\n# pa.lib.Type_FIXED_SIZE_LIST: list,\n# pa.lib.Type_STRUCT: dict,\n# pa.lib.Type_MAP: dict,\n# pa.lib.Type_DICTIONARY: dict,\n# pa.lib.Type_SPARSE_UNION: UnionScalar,\n# pa.lib.Type_DENSE_UNION: UnionScalar,\n }\n return arrow_to_vd_typemap.get(t.id, anytype)\n\nclass ArrowSheet(Sheet):\n def iterload(self):\n pa = vd.importExternal('pyarrow')\n\n try:\n with pa.OSFile(str(self.source), 'rb') as fp:\n self.coldata = pa.ipc.open_file(fp).read_all()\n except pa.lib.ArrowInvalid as e:\n with pa.OSFile(str(self.source), 'rb') as fp:\n self.coldata = pa.ipc.open_stream(fp).read_all()\n\n self.columns = []\n for colnum, col in enumerate(self.coldata):\n coltype = arrow_to_vdtype(self.coldata.schema.types[colnum])\n colname = self.coldata.schema.names[colnum]\n\n self.addColumn(Column(colname, type=coltype, expr=colnum,\n getter=lambda c,r: c.sheet.coldata[c.expr][r[0]].as_py()))\n\n for rownum in range(max(len(c) for c in self.coldata)):\n yield [rownum]\n\n\[email protected]\ndef save_arrow(vd, p, sheet, streaming=False):\n pa = vd.importExternal('pyarrow')\n np = vd.importExternal('numpy')\n\n typemap = {\n anytype: pa.string(),\n int: pa.int64(),\n vlen: pa.int64(),\n float: pa.float64(),\n str: pa.string(),\n date: pa.date64(),\n }\n\n for t in vd.numericTypes:\n if t not in typemap:\n typemap[t] = pa.float64()\n\n databycol = defaultdict(list) # col -> [values]\n\n for typedvals in sheet.iterdispvals(format=False):\n for col, val in typedvals.items():\n if isinstance(val, TypedWrapper):\n val = None\n\n databycol[col].append(val)\n\n data = [pa.array(vals, type=typemap.get(col.type, pa.string())) for col, vals in databycol.items()]\n\n schema = pa.schema([\n (c.name, typemap.get(c.type, pa.string()))\n for c in sheet.visibleCols\n ])\n with p.open_bytes(mode='w') as outf:\n if streaming:\n with pa.ipc.new_stream(outf, schema) as writer:\n writer.write_batch(pa.record_batch(data, names=[c.name for c in sheet.visibleCols]))\n else:\n with pa.ipc.new_file(outf, schema) as writer:\n writer.write_batch(pa.record_batch(data, names=[c.name for c in sheet.visibleCols]))\n\n\[email protected]\ndef save_arrows(vd, p, sheet):\n return vd.save_arrow(p, sheet, streaming=True)\n", "path": "visidata/loaders/arrow.py"}]}
2,413
145
gh_patches_debug_2849
rasdani/github-patches
git_diff
ipython__ipython-9202
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> IPythonLexer test failure with pygments 2.1 Jenkins is showing us a test failure on Windows: https://jenkins.jupyter.org/job/windows-multi/_pyversion=3.5,label=windows/504/testReport/ It looks like `$HOME` is unexpectedly being split into two tokens. I suspect it's failing since pygments 2.1 was released. I see the same locally on Linux, but it's not failing on Travis (maybe we're not installing pygments so those tests are skipped?). @abalkin and @chebee7i have done the most signficant work on the lexer machinery. Any ideas? </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """Setup script for IPython. 4 5 Under Posix environments it works like a typical setup.py script. 6 Under Windows, the command sdist is not supported, since IPython 7 requires utilities which are not available under Windows.""" 8 9 #----------------------------------------------------------------------------- 10 # Copyright (c) 2008-2011, IPython Development Team. 11 # Copyright (c) 2001-2007, Fernando Perez <[email protected]> 12 # Copyright (c) 2001, Janko Hauser <[email protected]> 13 # Copyright (c) 2001, Nathaniel Gray <[email protected]> 14 # 15 # Distributed under the terms of the Modified BSD License. 16 # 17 # The full license is in the file COPYING.rst, distributed with this software. 18 #----------------------------------------------------------------------------- 19 20 #----------------------------------------------------------------------------- 21 # Minimal Python version sanity check 22 #----------------------------------------------------------------------------- 23 from __future__ import print_function 24 25 import sys 26 27 # This check is also made in IPython/__init__, don't forget to update both when 28 # changing Python version requirements. 29 v = sys.version_info 30 if v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)): 31 error = "ERROR: IPython requires Python version 2.7 or 3.3 or above." 32 print(error, file=sys.stderr) 33 sys.exit(1) 34 35 PY3 = (sys.version_info[0] >= 3) 36 37 # At least we're on the python version we need, move on. 38 39 #------------------------------------------------------------------------------- 40 # Imports 41 #------------------------------------------------------------------------------- 42 43 # Stdlib imports 44 import os 45 46 from glob import glob 47 48 # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly 49 # update it when the contents of directories change. 50 if os.path.exists('MANIFEST'): os.remove('MANIFEST') 51 52 from distutils.core import setup 53 54 # Our own imports 55 from setupbase import target_update 56 57 from setupbase import ( 58 setup_args, 59 find_packages, 60 find_package_data, 61 check_package_data_first, 62 find_entry_points, 63 build_scripts_entrypt, 64 find_data_files, 65 git_prebuild, 66 install_symlinked, 67 install_lib_symlink, 68 install_scripts_for_symlink, 69 unsymlink, 70 ) 71 72 isfile = os.path.isfile 73 pjoin = os.path.join 74 75 #------------------------------------------------------------------------------- 76 # Handle OS specific things 77 #------------------------------------------------------------------------------- 78 79 if os.name in ('nt','dos'): 80 os_name = 'windows' 81 else: 82 os_name = os.name 83 84 # Under Windows, 'sdist' has not been supported. Now that the docs build with 85 # Sphinx it might work, but let's not turn it on until someone confirms that it 86 # actually works. 87 if os_name == 'windows' and 'sdist' in sys.argv: 88 print('The sdist command is not available under Windows. Exiting.') 89 sys.exit(1) 90 91 92 #------------------------------------------------------------------------------- 93 # Things related to the IPython documentation 94 #------------------------------------------------------------------------------- 95 96 # update the manuals when building a source dist 97 if len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'): 98 99 # List of things to be updated. Each entry is a triplet of args for 100 # target_update() 101 to_update = [ 102 ('docs/man/ipython.1.gz', 103 ['docs/man/ipython.1'], 104 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'), 105 ] 106 107 108 [ target_update(*t) for t in to_update ] 109 110 #--------------------------------------------------------------------------- 111 # Find all the packages, package data, and data_files 112 #--------------------------------------------------------------------------- 113 114 packages = find_packages() 115 package_data = find_package_data() 116 117 data_files = find_data_files() 118 119 setup_args['packages'] = packages 120 setup_args['package_data'] = package_data 121 setup_args['data_files'] = data_files 122 123 #--------------------------------------------------------------------------- 124 # custom distutils commands 125 #--------------------------------------------------------------------------- 126 # imports here, so they are after setuptools import if there was one 127 from distutils.command.sdist import sdist 128 from distutils.command.upload import upload 129 130 class UploadWindowsInstallers(upload): 131 132 description = "Upload Windows installers to PyPI (only used from tools/release_windows.py)" 133 user_options = upload.user_options + [ 134 ('files=', 'f', 'exe file (or glob) to upload') 135 ] 136 def initialize_options(self): 137 upload.initialize_options(self) 138 meta = self.distribution.metadata 139 base = '{name}-{version}'.format( 140 name=meta.get_name(), 141 version=meta.get_version() 142 ) 143 self.files = os.path.join('dist', '%s.*.exe' % base) 144 145 def run(self): 146 for dist_file in glob(self.files): 147 self.upload_file('bdist_wininst', 'any', dist_file) 148 149 setup_args['cmdclass'] = { 150 'build_py': \ 151 check_package_data_first(git_prebuild('IPython')), 152 'sdist' : git_prebuild('IPython', sdist), 153 'upload_wininst' : UploadWindowsInstallers, 154 'symlink': install_symlinked, 155 'install_lib_symlink': install_lib_symlink, 156 'install_scripts_sym': install_scripts_for_symlink, 157 'unsymlink': unsymlink, 158 } 159 160 161 #--------------------------------------------------------------------------- 162 # Handle scripts, dependencies, and setuptools specific things 163 #--------------------------------------------------------------------------- 164 165 # For some commands, use setuptools. Note that we do NOT list install here! 166 # If you want a setuptools-enhanced install, just run 'setupegg.py install' 167 needs_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm', 168 'bdist', 'bdist_dumb', 'bdist_wininst', 'bdist_wheel', 169 'egg_info', 'easy_install', 'upload', 'install_egg_info', 170 )) 171 172 if len(needs_setuptools.intersection(sys.argv)) > 0: 173 import setuptools 174 175 # This dict is used for passing extra arguments that are setuptools 176 # specific to setup 177 setuptools_extra_args = {} 178 179 # setuptools requirements 180 181 extras_require = dict( 182 parallel = ['ipyparallel'], 183 qtconsole = ['qtconsole'], 184 doc = ['Sphinx>=1.3'], 185 test = ['nose>=0.10.1', 'requests', 'testpath'], 186 terminal = [], 187 kernel = ['ipykernel'], 188 nbformat = ['nbformat'], 189 notebook = ['notebook', 'ipywidgets'], 190 nbconvert = ['nbconvert'], 191 ) 192 install_requires = [ 193 'setuptools>=18.5', 194 'decorator', 195 'pickleshare', 196 'simplegeneric>0.8', 197 'traitlets', 198 ] 199 200 # Platform-specific dependencies: 201 # This is the correct way to specify these, 202 # but requires pip >= 6. pip < 6 ignores these. 203 204 extras_require.update({ 205 ':sys_platform != "win32"': ['pexpect'], 206 ':sys_platform == "darwin"': ['appnope'], 207 ':sys_platform == "darwin" and platform_python_implementation == "CPython"': ['gnureadline'], 208 'terminal:sys_platform == "win32"': ['pyreadline>=2'], 209 'test:python_version == "2.7"': ['mock'], 210 }) 211 # FIXME: re-specify above platform dependencies for pip < 6 212 # These would result in non-portable bdists. 213 if not any(arg.startswith('bdist') for arg in sys.argv): 214 if sys.version_info < (3, 3): 215 extras_require['test'].append('mock') 216 217 if sys.platform == 'darwin': 218 install_requires.extend(['appnope']) 219 have_readline = False 220 try: 221 import readline 222 except ImportError: 223 pass 224 else: 225 if 'libedit' not in readline.__doc__: 226 have_readline = True 227 if not have_readline: 228 install_requires.extend(['gnureadline']) 229 230 if sys.platform.startswith('win'): 231 extras_require['terminal'].append('pyreadline>=2.0') 232 else: 233 install_requires.append('pexpect') 234 235 # workaround pypa/setuptools#147, where setuptools misspells 236 # platform_python_implementation as python_implementation 237 if 'setuptools' in sys.modules: 238 for key in list(extras_require): 239 if 'platform_python_implementation' in key: 240 new_key = key.replace('platform_python_implementation', 'python_implementation') 241 extras_require[new_key] = extras_require.pop(key) 242 243 everything = set() 244 for key, deps in extras_require.items(): 245 if ':' not in key: 246 everything.update(deps) 247 extras_require['all'] = everything 248 249 if 'setuptools' in sys.modules: 250 setuptools_extra_args['zip_safe'] = False 251 setuptools_extra_args['entry_points'] = { 252 'console_scripts': find_entry_points(), 253 'pygments.lexers': [ 254 'ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer', 255 'ipython = IPython.lib.lexers:IPythonLexer', 256 'ipython3 = IPython.lib.lexers:IPython3Lexer', 257 ], 258 } 259 setup_args['extras_require'] = extras_require 260 requires = setup_args['install_requires'] = install_requires 261 262 # Script to be run by the windows binary installer after the default setup 263 # routine, to add shortcuts and similar windows-only things. Windows 264 # post-install scripts MUST reside in the scripts/ dir, otherwise distutils 265 # doesn't find them. 266 if 'bdist_wininst' in sys.argv: 267 if len(sys.argv) > 2 and \ 268 ('sdist' in sys.argv or 'bdist_rpm' in sys.argv): 269 print("ERROR: bdist_wininst must be run alone. Exiting.", file=sys.stderr) 270 sys.exit(1) 271 setup_args['data_files'].append( 272 ['Scripts', ('scripts/ipython.ico', 'scripts/ipython_nb.ico')]) 273 setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')] 274 setup_args['options'] = {"bdist_wininst": 275 {"install_script": 276 "ipython_win_post_install.py"}} 277 278 else: 279 # scripts has to be a non-empty list, or install_scripts isn't called 280 setup_args['scripts'] = [e.split('=')[0].strip() for e in find_entry_points()] 281 282 setup_args['cmdclass']['build_scripts'] = build_scripts_entrypt 283 284 #--------------------------------------------------------------------------- 285 # Do the actual setup now 286 #--------------------------------------------------------------------------- 287 288 setup_args.update(setuptools_extra_args) 289 290 291 292 def main(): 293 setup(**setup_args) 294 295 if __name__ == '__main__': 296 main() 297 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -182,7 +182,7 @@ parallel = ['ipyparallel'], qtconsole = ['qtconsole'], doc = ['Sphinx>=1.3'], - test = ['nose>=0.10.1', 'requests', 'testpath'], + test = ['nose>=0.10.1', 'requests', 'testpath', 'pygments'], terminal = [], kernel = ['ipykernel'], nbformat = ['nbformat'],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -182,7 +182,7 @@\n parallel = ['ipyparallel'],\n qtconsole = ['qtconsole'],\n doc = ['Sphinx>=1.3'],\n- test = ['nose>=0.10.1', 'requests', 'testpath'],\n+ test = ['nose>=0.10.1', 'requests', 'testpath', 'pygments'],\n terminal = [],\n kernel = ['ipykernel'],\n nbformat = ['nbformat'],\n", "issue": "IPythonLexer test failure with pygments 2.1\nJenkins is showing us a test failure on Windows:\n\nhttps://jenkins.jupyter.org/job/windows-multi/_pyversion=3.5,label=windows/504/testReport/\n\nIt looks like `$HOME` is unexpectedly being split into two tokens. I suspect it's failing since pygments 2.1 was released. I see the same locally on Linux, but it's not failing on Travis (maybe we're not installing pygments so those tests are skipped?).\n\n@abalkin and @chebee7i have done the most signficant work on the lexer machinery. Any ideas?\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Setup script for IPython.\n\nUnder Posix environments it works like a typical setup.py script.\nUnder Windows, the command sdist is not supported, since IPython\nrequires utilities which are not available under Windows.\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2008-2011, IPython Development Team.\n# Copyright (c) 2001-2007, Fernando Perez <[email protected]>\n# Copyright (c) 2001, Janko Hauser <[email protected]>\n# Copyright (c) 2001, Nathaniel Gray <[email protected]>\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.rst, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\nimport sys\n\n# This check is also made in IPython/__init__, don't forget to update both when\n# changing Python version requirements.\nv = sys.version_info\nif v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):\n error = \"ERROR: IPython requires Python version 2.7 or 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\nPY3 = (sys.version_info[0] >= 3)\n\n# At least we're on the python version we need, move on.\n\n#-------------------------------------------------------------------------------\n# Imports\n#-------------------------------------------------------------------------------\n\n# Stdlib imports\nimport os\n\nfrom glob import glob\n\n# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly\n# update it when the contents of directories change.\nif os.path.exists('MANIFEST'): os.remove('MANIFEST')\n\nfrom distutils.core import setup\n\n# Our own imports\nfrom setupbase import target_update\n\nfrom setupbase import (\n setup_args,\n find_packages,\n find_package_data,\n check_package_data_first,\n find_entry_points,\n build_scripts_entrypt,\n find_data_files,\n git_prebuild,\n install_symlinked,\n install_lib_symlink,\n install_scripts_for_symlink,\n unsymlink,\n)\n\nisfile = os.path.isfile\npjoin = os.path.join\n\n#-------------------------------------------------------------------------------\n# Handle OS specific things\n#-------------------------------------------------------------------------------\n\nif os.name in ('nt','dos'):\n os_name = 'windows'\nelse:\n os_name = os.name\n\n# Under Windows, 'sdist' has not been supported. Now that the docs build with\n# Sphinx it might work, but let's not turn it on until someone confirms that it\n# actually works.\nif os_name == 'windows' and 'sdist' in sys.argv:\n print('The sdist command is not available under Windows. Exiting.')\n sys.exit(1)\n\n\n#-------------------------------------------------------------------------------\n# Things related to the IPython documentation\n#-------------------------------------------------------------------------------\n\n# update the manuals when building a source dist\nif len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):\n\n # List of things to be updated. Each entry is a triplet of args for\n # target_update()\n to_update = [\n ('docs/man/ipython.1.gz',\n ['docs/man/ipython.1'],\n 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),\n ]\n\n\n [ target_update(*t) for t in to_update ]\n\n#---------------------------------------------------------------------------\n# Find all the packages, package data, and data_files\n#---------------------------------------------------------------------------\n\npackages = find_packages()\npackage_data = find_package_data()\n\ndata_files = find_data_files()\n\nsetup_args['packages'] = packages\nsetup_args['package_data'] = package_data\nsetup_args['data_files'] = data_files\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n# imports here, so they are after setuptools import if there was one\nfrom distutils.command.sdist import sdist\nfrom distutils.command.upload import upload\n\nclass UploadWindowsInstallers(upload):\n\n description = \"Upload Windows installers to PyPI (only used from tools/release_windows.py)\"\n user_options = upload.user_options + [\n ('files=', 'f', 'exe file (or glob) to upload')\n ]\n def initialize_options(self):\n upload.initialize_options(self)\n meta = self.distribution.metadata\n base = '{name}-{version}'.format(\n name=meta.get_name(),\n version=meta.get_version()\n )\n self.files = os.path.join('dist', '%s.*.exe' % base)\n\n def run(self):\n for dist_file in glob(self.files):\n self.upload_file('bdist_wininst', 'any', dist_file)\n\nsetup_args['cmdclass'] = {\n 'build_py': \\\n check_package_data_first(git_prebuild('IPython')),\n 'sdist' : git_prebuild('IPython', sdist),\n 'upload_wininst' : UploadWindowsInstallers,\n 'symlink': install_symlinked,\n 'install_lib_symlink': install_lib_symlink,\n 'install_scripts_sym': install_scripts_for_symlink,\n 'unsymlink': unsymlink,\n}\n\n\n#---------------------------------------------------------------------------\n# Handle scripts, dependencies, and setuptools specific things\n#---------------------------------------------------------------------------\n\n# For some commands, use setuptools. Note that we do NOT list install here!\n# If you want a setuptools-enhanced install, just run 'setupegg.py install'\nneeds_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',\n 'bdist', 'bdist_dumb', 'bdist_wininst', 'bdist_wheel',\n 'egg_info', 'easy_install', 'upload', 'install_egg_info',\n ))\n\nif len(needs_setuptools.intersection(sys.argv)) > 0:\n import setuptools\n\n# This dict is used for passing extra arguments that are setuptools\n# specific to setup\nsetuptools_extra_args = {}\n\n# setuptools requirements\n\nextras_require = dict(\n parallel = ['ipyparallel'],\n qtconsole = ['qtconsole'],\n doc = ['Sphinx>=1.3'],\n test = ['nose>=0.10.1', 'requests', 'testpath'],\n terminal = [],\n kernel = ['ipykernel'],\n nbformat = ['nbformat'],\n notebook = ['notebook', 'ipywidgets'],\n nbconvert = ['nbconvert'],\n)\ninstall_requires = [\n 'setuptools>=18.5',\n 'decorator',\n 'pickleshare',\n 'simplegeneric>0.8',\n 'traitlets',\n]\n\n# Platform-specific dependencies:\n# This is the correct way to specify these,\n# but requires pip >= 6. pip < 6 ignores these.\n\nextras_require.update({\n ':sys_platform != \"win32\"': ['pexpect'],\n ':sys_platform == \"darwin\"': ['appnope'],\n ':sys_platform == \"darwin\" and platform_python_implementation == \"CPython\"': ['gnureadline'],\n 'terminal:sys_platform == \"win32\"': ['pyreadline>=2'],\n 'test:python_version == \"2.7\"': ['mock'],\n})\n# FIXME: re-specify above platform dependencies for pip < 6\n# These would result in non-portable bdists.\nif not any(arg.startswith('bdist') for arg in sys.argv):\n if sys.version_info < (3, 3):\n extras_require['test'].append('mock')\n\n if sys.platform == 'darwin':\n install_requires.extend(['appnope'])\n have_readline = False\n try:\n import readline\n except ImportError:\n pass\n else:\n if 'libedit' not in readline.__doc__:\n have_readline = True\n if not have_readline:\n install_requires.extend(['gnureadline'])\n\n if sys.platform.startswith('win'):\n extras_require['terminal'].append('pyreadline>=2.0')\n else:\n install_requires.append('pexpect')\n \n # workaround pypa/setuptools#147, where setuptools misspells\n # platform_python_implementation as python_implementation\n if 'setuptools' in sys.modules:\n for key in list(extras_require):\n if 'platform_python_implementation' in key:\n new_key = key.replace('platform_python_implementation', 'python_implementation')\n extras_require[new_key] = extras_require.pop(key)\n\neverything = set()\nfor key, deps in extras_require.items():\n if ':' not in key:\n everything.update(deps)\nextras_require['all'] = everything\n\nif 'setuptools' in sys.modules:\n setuptools_extra_args['zip_safe'] = False\n setuptools_extra_args['entry_points'] = {\n 'console_scripts': find_entry_points(),\n 'pygments.lexers': [\n 'ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer',\n 'ipython = IPython.lib.lexers:IPythonLexer',\n 'ipython3 = IPython.lib.lexers:IPython3Lexer',\n ],\n }\n setup_args['extras_require'] = extras_require\n requires = setup_args['install_requires'] = install_requires\n\n # Script to be run by the windows binary installer after the default setup\n # routine, to add shortcuts and similar windows-only things. Windows\n # post-install scripts MUST reside in the scripts/ dir, otherwise distutils\n # doesn't find them.\n if 'bdist_wininst' in sys.argv:\n if len(sys.argv) > 2 and \\\n ('sdist' in sys.argv or 'bdist_rpm' in sys.argv):\n print(\"ERROR: bdist_wininst must be run alone. Exiting.\", file=sys.stderr)\n sys.exit(1)\n setup_args['data_files'].append(\n ['Scripts', ('scripts/ipython.ico', 'scripts/ipython_nb.ico')])\n setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')]\n setup_args['options'] = {\"bdist_wininst\":\n {\"install_script\":\n \"ipython_win_post_install.py\"}}\n\nelse:\n # scripts has to be a non-empty list, or install_scripts isn't called\n setup_args['scripts'] = [e.split('=')[0].strip() for e in find_entry_points()]\n\n setup_args['cmdclass']['build_scripts'] = build_scripts_entrypt\n\n#---------------------------------------------------------------------------\n# Do the actual setup now\n#---------------------------------------------------------------------------\n\nsetup_args.update(setuptools_extra_args)\n\n\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}]}
3,780
129
gh_patches_debug_11472
rasdani/github-patches
git_diff
google__trax-1138
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Optimizer tree_init returns a slots list, but tree_update returns a slots tuple ### Description `tree_init` and `tree_update` are not consistent. One returns a list for the slots, the other a tuple. It is a super minor detail but I was trying to conditionally run a `tree_update` with `jax.cond`, and this minor difference made that break, since the PyTreeDefs were different. Casting the slots list comprehension to a tuple ([here](https://github.com/google/trax/blob/0ca17db895c7d9bb203e66e074f49e9481b87513/trax/optimizers/base.py#L119-L120)) solved this for me, but I'm not sure if you want to go with tuple or list so I raise an issue instead of PR. ### Environment information ``` OS: Ubuntu 18.04 $ pip freeze | grep trax -e [email protected]:google/trax.git@0ca17db895c7d9bb203e66e074f49e9481b87513#egg=trax (latest commit from Sep 30) $ pip freeze | grep tensor tensorflow==2.3.1 $ pip freeze | grep jax jax==0.2.0 jaxlib @ https://storage.googleapis.com/jax-releases/cuda110/jaxlib-0.1.55-cp36-none-manylinux2010_x86_64.whl $ python -V Python 3.6.9 ``` ### For bugs: reproduction and error logs You can add the following lines to `optimizers_test.py` and see the behavior. ``` # Steps to reproduce: # Show that tree_update returns slots in a tuple not list old_slots = opt_2.slots grad_tree = np.zeros_like(weight_tree) _, new_slots, _ = opt_2.tree_update(1, grad_tree, weight_tree, opt_2.slots, opt_2.opt_params) self.assertIsInstance(old_slots, list) # PASS self.assertIsInstance(opt_2.slots, list) # FAIL. it's a tuple self.assertIsInstance(new_slots, list) # FAIL. it's a tuple ``` ``` # Error logs: TypeError: true_fun and false_fun output must have same type structure, got PyTreeDef(tuple, [PyTreeDef(dict[['dyn']], [PyTreeDef(dict[['ff']], [PyTreeDef(dict[['dense0', 'dense1', 'dense2', 'dense_last']], [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])])])]),PyTreeDef(tuple, [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])]),PyTreeDef(dict[['gradients_l2', 'weights_l2']], [*,*])]) and PyTreeDef(tuple, [PyTreeDef(dict[['dyn']], [PyTreeDef(dict[['ff']], [PyTreeDef(dict[['dense0', 'dense1', 'dense2', 'dense_last']], [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])])])]),PyTreeDef(list, [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])]),PyTreeDef(dict[['gradients_l2', 'weights_l2']], [*,*])]). ``` </issue> <code> [start of trax/optimizers/base.py] 1 # coding=utf-8 2 # Copyright 2020 The Trax Authors. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 # Lint as: python3 17 """Trax base optimizer class.""" 18 19 from trax import fastmath 20 from trax.fastmath import numpy as jnp 21 22 23 class Optimizer(object): 24 """Base class for optimizers that work hand in hand with Trax layers. 25 26 To define an optimizer subclass, specify its behavior with respect to a 27 single level/node in the network (e.g., a single dense layer): 28 29 - `init`: how to create/initialize optimizer-internal weights ("slots") 30 whose shape matches the node's weight shape. 31 - `update`: how to use gradient information to update node weights and 32 optimizer slots. 33 34 The Trax runtime combines these node-local computations into weight updates 35 and slot updates for the whole tree of layers in the model. 36 """ 37 38 def __init__(self, learning_rate=0.01, clip_grad_norm=None, 39 **init_opt_params): 40 """Sets initial hyperparameter values for this optimizer. 41 42 Takes initial optimizer parameters as keyword arguments. These values can 43 be changed between training steps, e.g., for learning rate schedules. 44 45 If you want your subclass to expose hyperparameters for gin configuration, 46 override this constructor and use explicitly named keyword arguments. See 47 `momentum.Momentum.__init__` for one such example. 48 49 Args: 50 learning_rate: The initial learning rate. 51 clip_grad_norm: float; the value to which gradients will be clipped. 52 **init_opt_params: Initial values of any additional optimizer parameters. 53 """ 54 init_opt_params['learning_rate'] = learning_rate 55 self._init_opt_params = { 56 name: jnp.array(value) for (name, value) in init_opt_params.items() 57 } 58 self._slots = None 59 # Gradient clipping happens with respect to the norm of the whole gradient 60 # tree, so it is not passed to single-slot updates, but done in this class 61 # for the whole gradient tree. 62 self._clip_grad_norm = clip_grad_norm 63 64 def init(self, weights): 65 """Creates optimizer slots for the given parameters. 66 67 Args: 68 weights: Trainable weights for one layer. Optimizer slots typically match 69 the data shape and type of the given layer weights. 70 """ 71 raise NotImplementedError 72 73 def update(self, step, grads, weights, slots, opt_params): 74 """Computes one step's worth of updates. 75 76 The update computes both new weights for the layer/node and new slot values 77 for the optimizer. 78 79 Args: 80 step: Current step number in the training process. 81 grads: Gradients for the weights of the sublayer. 82 weights: Current weights for the sublayer. 83 slots: Optimizer slots. 84 opt_params: Optimizer hyperparameters (e.g. learning rate, momentum). 85 86 Returns: 87 Tuple of (new_weights, new_slots). 88 """ 89 raise NotImplementedError 90 91 @property 92 def slots(self): 93 return self._slots 94 95 @slots.setter 96 def slots(self, slots): 97 self._slots = slots 98 99 @property 100 def opt_params(self): 101 return self._init_opt_params 102 103 @opt_params.setter 104 def opt_params(self, opt_params): 105 self._init_opt_params = opt_params 106 107 def tree_init(self, weight_tree): 108 """Assembles node-local initializations into full-tree initialization. 109 110 Args: 111 weight_tree: Weights for an entire model, in a tree that matches the 112 model's layer structure. 113 114 Returns: 115 Tuple `(slots, opt_params)`, where `slots` are the initialized optimizer 116 slot values and `opt_params` are optimizer hyperparameters (e.g., 117 learning rate, momentum). 118 """ 119 self._slots = [self.init(weight) 120 for weight in fastmath.tree_flatten(weight_tree)] 121 return ( 122 self._slots, 123 self._init_opt_params, 124 ) 125 126 def tree_update(self, step, grad_tree, weight_tree, slots, opt_params): 127 """Assembles node-local weight and slot updates for the full layer tree. 128 129 Args: 130 step: Current step number in the training process. 131 grad_tree: Gradients for the entire model, in a tree that matches the 132 model's layer structure. 133 weight_tree: Current weights for the entire model, in a tree that matches 134 the model's layer structure. 135 slots: Optimizer slots. 136 opt_params: Optimizer hyperparameters (e.g. learning rate, momentum). 137 138 Returns: 139 Tuple `(weights, slots)`, where `weights` are the optimizer-updated 140 weights for the whole model (in a tree matching the model's layer 141 structure) and `slots` are the updated optimizer slot values. 142 """ 143 grads_flat = fastmath.tree_flatten(grad_tree) 144 grads_norm = self._l2_norm(grads_flat) 145 if self._clip_grad_norm is not None: 146 max_norm = self._clip_grad_norm 147 grads_flat = [jnp.where(grads_norm < max_norm, # pylint: disable=g-complex-comprehension 148 g, 149 g * (max_norm / grads_norm)) 150 for g in grads_flat] 151 weights_flat = fastmath.tree_flatten(weight_tree) 152 weights_norm = self._l2_norm(weights_flat) 153 updated_pairs = [ 154 self._update_and_check(step, grad, weight, slot, opt_params) 155 for (grad, weight, slot) in zip(grads_flat, weights_flat, slots) 156 ] 157 new_weights_flat, self.slots = zip(*updated_pairs) 158 new_weights, _ = fastmath.tree_unflatten(new_weights_flat, weight_tree) 159 metrics = {'gradients_l2': grads_norm, 'weights_l2': weights_norm} 160 return new_weights, self.slots, metrics 161 162 def _l2_norm(self, flat_list): 163 """Returns the aggregate L2 norm of a list of tensors.""" 164 if fastmath.is_backend(fastmath.Backend.JAX): 165 norm = jnp.sqrt(sum(jnp.vdot(x, x) for x in flat_list)) 166 else: # TODO(lukaszkaiser): add vdot to TF-numpy 167 norm = jnp.sqrt(sum(jnp.sum(x*x) for x in flat_list)) 168 return norm 169 170 def _update_and_check(self, step, grads, weights, slots, opt_params): 171 """Updates a single weight array and checks types.""" 172 new_weights, new_slots = self.update( 173 step, grads, weights, slots, opt_params) 174 if isinstance(weights, jnp.ndarray): 175 if not isinstance(new_weights, jnp.ndarray): 176 raise ValueError( 177 f'New weight values should be of type jnp.ndarray or a subclass; ' 178 f'instead got {type(new_weights)}.') 179 if new_weights.dtype != weights.dtype: 180 raise ValueError( 181 f'New weight values dtype ({new_weights.dtype}) does not match ' 182 f'the old one ({weights.dtype}).') 183 return new_weights, new_slots 184 185 186 class SGD(Optimizer): 187 """Stochastic gradient descent (SGD) optimizer. 188 189 A simple optimizer with no weights ("slots") of its own. 190 """ 191 192 def init(self, weights): 193 return None 194 195 def update(self, step, grads, weights, slots, opt_params): 196 del step, slots 197 lr = opt_params['learning_rate'] 198 new_weights = weights - (lr * grads).astype(weights.dtype) 199 return new_weights, None 200 201 202 # Utilities. 203 204 205 def l2_norm(tree): 206 """Compute the l2 norm of a pytree of arrays. Useful for weight decay.""" 207 leaves = fastmath.tree_flatten(tree) 208 return jnp.sqrt(sum(jnp.vdot(x, x) for x in leaves)) 209 210 211 def clip_grads(grad_tree, max_norm): 212 """Clip gradients stored as a pytree of arrays to maximum norm `max_norm`.""" 213 norm = l2_norm(grad_tree) 214 normalize = lambda g: jnp.where(norm < max_norm, g, g * (max_norm / norm)) 215 return fastmath.nested_map(grad_tree, normalize) 216 [end of trax/optimizers/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/trax/optimizers/base.py b/trax/optimizers/base.py --- a/trax/optimizers/base.py +++ b/trax/optimizers/base.py @@ -154,7 +154,7 @@ self._update_and_check(step, grad, weight, slot, opt_params) for (grad, weight, slot) in zip(grads_flat, weights_flat, slots) ] - new_weights_flat, self.slots = zip(*updated_pairs) + new_weights_flat, self.slots = map(list, zip(*updated_pairs)) new_weights, _ = fastmath.tree_unflatten(new_weights_flat, weight_tree) metrics = {'gradients_l2': grads_norm, 'weights_l2': weights_norm} return new_weights, self.slots, metrics
{"golden_diff": "diff --git a/trax/optimizers/base.py b/trax/optimizers/base.py\n--- a/trax/optimizers/base.py\n+++ b/trax/optimizers/base.py\n@@ -154,7 +154,7 @@\n self._update_and_check(step, grad, weight, slot, opt_params)\n for (grad, weight, slot) in zip(grads_flat, weights_flat, slots)\n ]\n- new_weights_flat, self.slots = zip(*updated_pairs)\n+ new_weights_flat, self.slots = map(list, zip(*updated_pairs))\n new_weights, _ = fastmath.tree_unflatten(new_weights_flat, weight_tree)\n metrics = {'gradients_l2': grads_norm, 'weights_l2': weights_norm}\n return new_weights, self.slots, metrics\n", "issue": "Optimizer tree_init returns a slots list, but tree_update returns a slots tuple\n### Description\r\n\r\n`tree_init` and `tree_update` are not consistent. One returns a list for the slots, the other a tuple.\r\n\r\nIt is a super minor detail but I was trying to conditionally run a `tree_update` with `jax.cond`, and this minor difference made that break, since the PyTreeDefs were different.\r\n\r\nCasting the slots list comprehension to a tuple ([here](https://github.com/google/trax/blob/0ca17db895c7d9bb203e66e074f49e9481b87513/trax/optimizers/base.py#L119-L120)) solved this for me, but I'm not sure if you want to go with tuple or list so I raise an issue instead of PR.\r\n\r\n### Environment information\r\n```\r\nOS: Ubuntu 18.04\r\n\r\n$ pip freeze | grep trax\r\n-e [email protected]:google/trax.git@0ca17db895c7d9bb203e66e074f49e9481b87513#egg=trax\r\n(latest commit from Sep 30)\r\n\r\n$ pip freeze | grep tensor\r\ntensorflow==2.3.1\r\n\r\n$ pip freeze | grep jax\r\njax==0.2.0\r\njaxlib @ https://storage.googleapis.com/jax-releases/cuda110/jaxlib-0.1.55-cp36-none-manylinux2010_x86_64.whl\r\n\r\n$ python -V\r\nPython 3.6.9\r\n```\r\n\r\n### For bugs: reproduction and error logs\r\n\r\nYou can add the following lines to `optimizers_test.py` and see the behavior.\r\n\r\n```\r\n# Steps to reproduce:\r\n\r\n\r\n # Show that tree_update returns slots in a tuple not list\r\n old_slots = opt_2.slots\r\n grad_tree = np.zeros_like(weight_tree)\r\n _, new_slots, _ = opt_2.tree_update(1, grad_tree, weight_tree, opt_2.slots, opt_2.opt_params)\r\n self.assertIsInstance(old_slots, list) # PASS\r\n self.assertIsInstance(opt_2.slots, list) # FAIL. it's a tuple\r\n self.assertIsInstance(new_slots, list) # FAIL. it's a tuple\r\n\r\n```\r\n\r\n```\r\n# Error logs:\r\n\r\n\r\nTypeError: true_fun and false_fun output must have same type structure, got PyTreeDef(tuple, [PyTreeDef(dict[['dyn']], [PyTreeDef(dict[['ff']], [PyTreeDef(dict[['dense0', 'dense1', 'dense2', 'dense_last']], [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])])])]),PyTreeDef(tuple, [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])]),PyTreeDef(dict[['gradients_l2', 'weights_l2']], [*,*])]) and PyTreeDef(tuple, [PyTreeDef(dict[['dyn']], [PyTreeDef(dict[['ff']], [PyTreeDef(dict[['dense0', 'dense1', 'dense2', 'dense_last']], [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])])])]),PyTreeDef(list, [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])]),PyTreeDef(dict[['gradients_l2', 'weights_l2']], [*,*])]).\r\n\r\n```\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2020 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Trax base optimizer class.\"\"\"\n\nfrom trax import fastmath\nfrom trax.fastmath import numpy as jnp\n\n\nclass Optimizer(object):\n \"\"\"Base class for optimizers that work hand in hand with Trax layers.\n\n To define an optimizer subclass, specify its behavior with respect to a\n single level/node in the network (e.g., a single dense layer):\n\n - `init`: how to create/initialize optimizer-internal weights (\"slots\")\n whose shape matches the node's weight shape.\n - `update`: how to use gradient information to update node weights and\n optimizer slots.\n\n The Trax runtime combines these node-local computations into weight updates\n and slot updates for the whole tree of layers in the model.\n \"\"\"\n\n def __init__(self, learning_rate=0.01, clip_grad_norm=None,\n **init_opt_params):\n \"\"\"Sets initial hyperparameter values for this optimizer.\n\n Takes initial optimizer parameters as keyword arguments. These values can\n be changed between training steps, e.g., for learning rate schedules.\n\n If you want your subclass to expose hyperparameters for gin configuration,\n override this constructor and use explicitly named keyword arguments. See\n `momentum.Momentum.__init__` for one such example.\n\n Args:\n learning_rate: The initial learning rate.\n clip_grad_norm: float; the value to which gradients will be clipped.\n **init_opt_params: Initial values of any additional optimizer parameters.\n \"\"\"\n init_opt_params['learning_rate'] = learning_rate\n self._init_opt_params = {\n name: jnp.array(value) for (name, value) in init_opt_params.items()\n }\n self._slots = None\n # Gradient clipping happens with respect to the norm of the whole gradient\n # tree, so it is not passed to single-slot updates, but done in this class\n # for the whole gradient tree.\n self._clip_grad_norm = clip_grad_norm\n\n def init(self, weights):\n \"\"\"Creates optimizer slots for the given parameters.\n\n Args:\n weights: Trainable weights for one layer. Optimizer slots typically match\n the data shape and type of the given layer weights.\n \"\"\"\n raise NotImplementedError\n\n def update(self, step, grads, weights, slots, opt_params):\n \"\"\"Computes one step's worth of updates.\n\n The update computes both new weights for the layer/node and new slot values\n for the optimizer.\n\n Args:\n step: Current step number in the training process.\n grads: Gradients for the weights of the sublayer.\n weights: Current weights for the sublayer.\n slots: Optimizer slots.\n opt_params: Optimizer hyperparameters (e.g. learning rate, momentum).\n\n Returns:\n Tuple of (new_weights, new_slots).\n \"\"\"\n raise NotImplementedError\n\n @property\n def slots(self):\n return self._slots\n\n @slots.setter\n def slots(self, slots):\n self._slots = slots\n\n @property\n def opt_params(self):\n return self._init_opt_params\n\n @opt_params.setter\n def opt_params(self, opt_params):\n self._init_opt_params = opt_params\n\n def tree_init(self, weight_tree):\n \"\"\"Assembles node-local initializations into full-tree initialization.\n\n Args:\n weight_tree: Weights for an entire model, in a tree that matches the\n model's layer structure.\n\n Returns:\n Tuple `(slots, opt_params)`, where `slots` are the initialized optimizer\n slot values and `opt_params` are optimizer hyperparameters (e.g.,\n learning rate, momentum).\n \"\"\"\n self._slots = [self.init(weight)\n for weight in fastmath.tree_flatten(weight_tree)]\n return (\n self._slots,\n self._init_opt_params,\n )\n\n def tree_update(self, step, grad_tree, weight_tree, slots, opt_params):\n \"\"\"Assembles node-local weight and slot updates for the full layer tree.\n\n Args:\n step: Current step number in the training process.\n grad_tree: Gradients for the entire model, in a tree that matches the\n model's layer structure.\n weight_tree: Current weights for the entire model, in a tree that matches\n the model's layer structure.\n slots: Optimizer slots.\n opt_params: Optimizer hyperparameters (e.g. learning rate, momentum).\n\n Returns:\n Tuple `(weights, slots)`, where `weights` are the optimizer-updated\n weights for the whole model (in a tree matching the model's layer\n structure) and `slots` are the updated optimizer slot values.\n \"\"\"\n grads_flat = fastmath.tree_flatten(grad_tree)\n grads_norm = self._l2_norm(grads_flat)\n if self._clip_grad_norm is not None:\n max_norm = self._clip_grad_norm\n grads_flat = [jnp.where(grads_norm < max_norm, # pylint: disable=g-complex-comprehension\n g,\n g * (max_norm / grads_norm))\n for g in grads_flat]\n weights_flat = fastmath.tree_flatten(weight_tree)\n weights_norm = self._l2_norm(weights_flat)\n updated_pairs = [\n self._update_and_check(step, grad, weight, slot, opt_params)\n for (grad, weight, slot) in zip(grads_flat, weights_flat, slots)\n ]\n new_weights_flat, self.slots = zip(*updated_pairs)\n new_weights, _ = fastmath.tree_unflatten(new_weights_flat, weight_tree)\n metrics = {'gradients_l2': grads_norm, 'weights_l2': weights_norm}\n return new_weights, self.slots, metrics\n\n def _l2_norm(self, flat_list):\n \"\"\"Returns the aggregate L2 norm of a list of tensors.\"\"\"\n if fastmath.is_backend(fastmath.Backend.JAX):\n norm = jnp.sqrt(sum(jnp.vdot(x, x) for x in flat_list))\n else: # TODO(lukaszkaiser): add vdot to TF-numpy\n norm = jnp.sqrt(sum(jnp.sum(x*x) for x in flat_list))\n return norm\n\n def _update_and_check(self, step, grads, weights, slots, opt_params):\n \"\"\"Updates a single weight array and checks types.\"\"\"\n new_weights, new_slots = self.update(\n step, grads, weights, slots, opt_params)\n if isinstance(weights, jnp.ndarray):\n if not isinstance(new_weights, jnp.ndarray):\n raise ValueError(\n f'New weight values should be of type jnp.ndarray or a subclass; '\n f'instead got {type(new_weights)}.')\n if new_weights.dtype != weights.dtype:\n raise ValueError(\n f'New weight values dtype ({new_weights.dtype}) does not match '\n f'the old one ({weights.dtype}).')\n return new_weights, new_slots\n\n\nclass SGD(Optimizer):\n \"\"\"Stochastic gradient descent (SGD) optimizer.\n\n A simple optimizer with no weights (\"slots\") of its own.\n \"\"\"\n\n def init(self, weights):\n return None\n\n def update(self, step, grads, weights, slots, opt_params):\n del step, slots\n lr = opt_params['learning_rate']\n new_weights = weights - (lr * grads).astype(weights.dtype)\n return new_weights, None\n\n\n# Utilities.\n\n\ndef l2_norm(tree):\n \"\"\"Compute the l2 norm of a pytree of arrays. Useful for weight decay.\"\"\"\n leaves = fastmath.tree_flatten(tree)\n return jnp.sqrt(sum(jnp.vdot(x, x) for x in leaves))\n\n\ndef clip_grads(grad_tree, max_norm):\n \"\"\"Clip gradients stored as a pytree of arrays to maximum norm `max_norm`.\"\"\"\n norm = l2_norm(grad_tree)\n normalize = lambda g: jnp.where(norm < max_norm, g, g * (max_norm / norm))\n return fastmath.nested_map(grad_tree, normalize)\n", "path": "trax/optimizers/base.py"}]}
3,862
175
gh_patches_debug_29513
rasdani/github-patches
git_diff
TheAlgorithms__Python-10140
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Concatenate/consolidate all algorithms with different implementations ### Feature description There are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file </issue> <code> [start of strings/reverse_letters.py] 1 def reverse_letters(input_str: str) -> str: 2 """ 3 Reverses letters in a given string without adjusting the position of the words 4 >>> reverse_letters('The cat in the hat') 5 'ehT tac ni eht tah' 6 >>> reverse_letters('The quick brown fox jumped over the lazy dog.') 7 'ehT kciuq nworb xof depmuj revo eht yzal .god' 8 >>> reverse_letters('Is this true?') 9 'sI siht ?eurt' 10 >>> reverse_letters("I love Python") 11 'I evol nohtyP' 12 """ 13 return " ".join([word[::-1] for word in input_str.split()]) 14 15 16 if __name__ == "__main__": 17 import doctest 18 19 doctest.testmod() 20 [end of strings/reverse_letters.py] [start of strings/reverse_long_words.py] 1 def reverse_long_words(sentence: str) -> str: 2 """ 3 Reverse all words that are longer than 4 characters in a sentence. 4 5 >>> reverse_long_words("Hey wollef sroirraw") 6 'Hey fellow warriors' 7 >>> reverse_long_words("nohtyP is nohtyP") 8 'Python is Python' 9 >>> reverse_long_words("1 12 123 1234 54321 654321") 10 '1 12 123 1234 12345 123456' 11 """ 12 return " ".join( 13 "".join(word[::-1]) if len(word) > 4 else word for word in sentence.split() 14 ) 15 16 17 if __name__ == "__main__": 18 import doctest 19 20 doctest.testmod() 21 print(reverse_long_words("Hey wollef sroirraw")) 22 [end of strings/reverse_long_words.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/strings/reverse_letters.py b/strings/reverse_letters.py --- a/strings/reverse_letters.py +++ b/strings/reverse_letters.py @@ -1,19 +1,24 @@ -def reverse_letters(input_str: str) -> str: +def reverse_letters(sentence: str, length: int = 0) -> str: """ - Reverses letters in a given string without adjusting the position of the words - >>> reverse_letters('The cat in the hat') - 'ehT tac ni eht tah' - >>> reverse_letters('The quick brown fox jumped over the lazy dog.') - 'ehT kciuq nworb xof depmuj revo eht yzal .god' - >>> reverse_letters('Is this true?') - 'sI siht ?eurt' - >>> reverse_letters("I love Python") - 'I evol nohtyP' + Reverse all words that are longer than the given length of characters in a sentence. + If unspecified, length is taken as 0 + + >>> reverse_letters("Hey wollef sroirraw", 3) + 'Hey fellow warriors' + >>> reverse_letters("nohtyP is nohtyP", 2) + 'Python is Python' + >>> reverse_letters("1 12 123 1234 54321 654321", 0) + '1 21 321 4321 12345 123456' + >>> reverse_letters("racecar") + 'racecar' """ - return " ".join([word[::-1] for word in input_str.split()]) + return " ".join( + "".join(word[::-1]) if len(word) > length else word for word in sentence.split() + ) if __name__ == "__main__": import doctest doctest.testmod() + print(reverse_letters("Hey wollef sroirraw")) diff --git a/strings/reverse_long_words.py b/strings/reverse_long_words.py deleted file mode 100644 --- a/strings/reverse_long_words.py +++ /dev/null @@ -1,21 +0,0 @@ -def reverse_long_words(sentence: str) -> str: - """ - Reverse all words that are longer than 4 characters in a sentence. - - >>> reverse_long_words("Hey wollef sroirraw") - 'Hey fellow warriors' - >>> reverse_long_words("nohtyP is nohtyP") - 'Python is Python' - >>> reverse_long_words("1 12 123 1234 54321 654321") - '1 12 123 1234 12345 123456' - """ - return " ".join( - "".join(word[::-1]) if len(word) > 4 else word for word in sentence.split() - ) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() - print(reverse_long_words("Hey wollef sroirraw"))
{"golden_diff": "diff --git a/strings/reverse_letters.py b/strings/reverse_letters.py\n--- a/strings/reverse_letters.py\n+++ b/strings/reverse_letters.py\n@@ -1,19 +1,24 @@\n-def reverse_letters(input_str: str) -> str:\n+def reverse_letters(sentence: str, length: int = 0) -> str:\n \"\"\"\n- Reverses letters in a given string without adjusting the position of the words\n- >>> reverse_letters('The cat in the hat')\n- 'ehT tac ni eht tah'\n- >>> reverse_letters('The quick brown fox jumped over the lazy dog.')\n- 'ehT kciuq nworb xof depmuj revo eht yzal .god'\n- >>> reverse_letters('Is this true?')\n- 'sI siht ?eurt'\n- >>> reverse_letters(\"I love Python\")\n- 'I evol nohtyP'\n+ Reverse all words that are longer than the given length of characters in a sentence.\n+ If unspecified, length is taken as 0\n+\n+ >>> reverse_letters(\"Hey wollef sroirraw\", 3)\n+ 'Hey fellow warriors'\n+ >>> reverse_letters(\"nohtyP is nohtyP\", 2)\n+ 'Python is Python'\n+ >>> reverse_letters(\"1 12 123 1234 54321 654321\", 0)\n+ '1 21 321 4321 12345 123456'\n+ >>> reverse_letters(\"racecar\")\n+ 'racecar'\n \"\"\"\n- return \" \".join([word[::-1] for word in input_str.split()])\n+ return \" \".join(\n+ \"\".join(word[::-1]) if len(word) > length else word for word in sentence.split()\n+ )\n \n \n if __name__ == \"__main__\":\n import doctest\n \n doctest.testmod()\n+ print(reverse_letters(\"Hey wollef sroirraw\"))\ndiff --git a/strings/reverse_long_words.py b/strings/reverse_long_words.py\ndeleted file mode 100644\n--- a/strings/reverse_long_words.py\n+++ /dev/null\n@@ -1,21 +0,0 @@\n-def reverse_long_words(sentence: str) -> str:\n- \"\"\"\n- Reverse all words that are longer than 4 characters in a sentence.\n-\n- >>> reverse_long_words(\"Hey wollef sroirraw\")\n- 'Hey fellow warriors'\n- >>> reverse_long_words(\"nohtyP is nohtyP\")\n- 'Python is Python'\n- >>> reverse_long_words(\"1 12 123 1234 54321 654321\")\n- '1 12 123 1234 12345 123456'\n- \"\"\"\n- return \" \".join(\n- \"\".join(word[::-1]) if len(word) > 4 else word for word in sentence.split()\n- )\n-\n-\n-if __name__ == \"__main__\":\n- import doctest\n-\n- doctest.testmod()\n- print(reverse_long_words(\"Hey wollef sroirraw\"))\n", "issue": "Concatenate/consolidate all algorithms with different implementations\n### Feature description\n\nThere are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file\n", "before_files": [{"content": "def reverse_letters(input_str: str) -> str:\n \"\"\"\n Reverses letters in a given string without adjusting the position of the words\n >>> reverse_letters('The cat in the hat')\n 'ehT tac ni eht tah'\n >>> reverse_letters('The quick brown fox jumped over the lazy dog.')\n 'ehT kciuq nworb xof depmuj revo eht yzal .god'\n >>> reverse_letters('Is this true?')\n 'sI siht ?eurt'\n >>> reverse_letters(\"I love Python\")\n 'I evol nohtyP'\n \"\"\"\n return \" \".join([word[::-1] for word in input_str.split()])\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "strings/reverse_letters.py"}, {"content": "def reverse_long_words(sentence: str) -> str:\n \"\"\"\n Reverse all words that are longer than 4 characters in a sentence.\n\n >>> reverse_long_words(\"Hey wollef sroirraw\")\n 'Hey fellow warriors'\n >>> reverse_long_words(\"nohtyP is nohtyP\")\n 'Python is Python'\n >>> reverse_long_words(\"1 12 123 1234 54321 654321\")\n '1 12 123 1234 12345 123456'\n \"\"\"\n return \" \".join(\n \"\".join(word[::-1]) if len(word) > 4 else word for word in sentence.split()\n )\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n print(reverse_long_words(\"Hey wollef sroirraw\"))\n", "path": "strings/reverse_long_words.py"}]}
1,039
732
gh_patches_debug_28763
rasdani/github-patches
git_diff
DataDog__dd-trace-py-2196
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DD_TAGS separator inconsistent with heroku-buildpack-datadog https://github.com/DataDog/heroku-buildpack-datadog sets `DD_TAGS` separated by spaces. However, dd-trace-py splits the tags by commas: https://github.com/DataDog/dd-trace-py/blob/master/ddtrace/utils/formats.py#L87-L116 ``` def parse_tags_str(tags_str): """Parse a string of tags typically provided via environment variables. The expected string is of the form:: "key1:value1,key2:value2" :param tags_str: A string of the above form to parse tags from. :return: A dict containing the tags that were parsed. """ parsed_tags = {} if not tags_str: return parsed_tags for tag in tags_str.split(","): try: key, value = tag.split(":", 1) # Validate the tag if key == "" or value == "" or value.endswith(":"): raise ValueError except ValueError: log.error( "Malformed tag in tag pair '%s' from tag string '%s'.", tag, tags_str, ) else: parsed_tags[key] = value return parsed_tags ``` This results in all of the tags being set as the value of the first tag. This looks to have been previously surfaced for the java tracer here: https://github.com/DataDog/heroku-buildpack-datadog/issues/187 And was initially changed in the buildpack but was reverted and dd-trace-java updated to accommodate commas or spaces: https://github.com/DataDog/dd-trace-java/pull/2011 ### Which version of dd-trace-py are you using? 0.47.0 ### How can we reproduce your problem? Perform a trace with `DD_TAGS` set by https://github.com/DataDog/heroku-buildpack-datadog. ### What is the result that you get? `dyno` tag contains all other tags. ### What is the result that you expected? Tags to be properly separated. </issue> <code> [start of ddtrace/utils/formats.py] 1 import logging 2 import os 3 4 from .deprecation import deprecation 5 6 7 log = logging.getLogger(__name__) 8 9 10 def get_env(*parts, **kwargs): 11 """Retrieves environment variables value for the given integration. It must be used 12 for consistency between integrations. The implementation is backward compatible 13 with legacy nomenclature: 14 15 * `DATADOG_` is a legacy prefix with lower priority 16 * `DD_` environment variables have the highest priority 17 * the environment variable is built concatenating `integration` and `variable` 18 arguments 19 * return `default` otherwise 20 21 :param parts: evironment variable parts that will be joined with ``_`` to generate the name 22 :type parts: :obj:`str` 23 :param kwargs: ``default`` is the only supported keyword argument which sets the default value 24 if no environment variable is found 25 :rtype: :obj:`str` | ``kwargs["default"]`` 26 :returns: The string environment variable value or the value of ``kwargs["default"]`` if not found 27 """ 28 default = kwargs.get("default") 29 30 key = "_".join(parts) 31 key = key.upper() 32 legacy_env = "DATADOG_{}".format(key) 33 env = "DD_{}".format(key) 34 35 value = os.getenv(env) 36 legacy = os.getenv(legacy_env) 37 if legacy: 38 # Deprecation: `DATADOG_` variables are deprecated 39 deprecation( 40 name="DATADOG_", 41 message="Use `DD_` prefix instead", 42 version="1.0.0", 43 ) 44 45 value = value or legacy 46 return value if value else default 47 48 49 def deep_getattr(obj, attr_string, default=None): 50 """ 51 Returns the attribute of `obj` at the dotted path given by `attr_string` 52 If no such attribute is reachable, returns `default` 53 54 >>> deep_getattr(cass, 'cluster') 55 <cassandra.cluster.Cluster object at 0xa20c350 56 57 >>> deep_getattr(cass, 'cluster.metadata.partitioner') 58 u'org.apache.cassandra.dht.Murmur3Partitioner' 59 60 >>> deep_getattr(cass, 'i.dont.exist', default='default') 61 'default' 62 """ 63 attrs = attr_string.split(".") 64 for attr in attrs: 65 try: 66 obj = getattr(obj, attr) 67 except AttributeError: 68 return default 69 70 return obj 71 72 73 def asbool(value): 74 """Convert the given String to a boolean object. 75 76 Accepted values are `True` and `1`. 77 """ 78 if value is None: 79 return False 80 81 if isinstance(value, bool): 82 return value 83 84 return value.lower() in ("true", "1") 85 86 87 def parse_tags_str(tags_str): 88 """Parse a string of tags typically provided via environment variables. 89 90 The expected string is of the form:: 91 "key1:value1,key2:value2" 92 93 :param tags_str: A string of the above form to parse tags from. 94 :return: A dict containing the tags that were parsed. 95 """ 96 parsed_tags = {} 97 if not tags_str: 98 return parsed_tags 99 100 for tag in tags_str.split(","): 101 try: 102 key, value = tag.split(":", 1) 103 104 # Validate the tag 105 if key == "" or value == "" or value.endswith(":"): 106 raise ValueError 107 except ValueError: 108 log.error( 109 "Malformed tag in tag pair '%s' from tag string '%s'.", 110 tag, 111 tags_str, 112 ) 113 else: 114 parsed_tags[key] = value 115 116 return parsed_tags 117 [end of ddtrace/utils/formats.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ddtrace/utils/formats.py b/ddtrace/utils/formats.py --- a/ddtrace/utils/formats.py +++ b/ddtrace/utils/formats.py @@ -1,9 +1,13 @@ import logging import os +import re from .deprecation import deprecation +# Tags `key:value` must be separated by either comma or space +_TAGS_NOT_SEPARATED = re.compile(r":[^,\s]+:") + log = logging.getLogger(__name__) @@ -89,6 +93,7 @@ The expected string is of the form:: "key1:value1,key2:value2" + "key1:value1 key2:value2" :param tags_str: A string of the above form to parse tags from. :return: A dict containing the tags that were parsed. @@ -97,7 +102,29 @@ if not tags_str: return parsed_tags - for tag in tags_str.split(","): + if _TAGS_NOT_SEPARATED.search(tags_str): + log.error("Malformed tag string with tags not separated by comma or space '%s'.", tags_str) + return parsed_tags + + # Identify separator based on which successfully identifies the correct + # number of valid tags + numtagseps = tags_str.count(":") + for sep in [",", " "]: + if sum(":" in _ for _ in tags_str.split(sep)) == numtagseps: + break + else: + log.error( + ( + "Failed to find separator for tag string: '%s'.\n" + "Tag strings must be comma or space separated:\n" + " key1:value1,key2:value2\n" + " key1:value1 key2:value2" + ), + tags_str, + ) + return parsed_tags + + for tag in tags_str.split(sep): try: key, value = tag.split(":", 1)
{"golden_diff": "diff --git a/ddtrace/utils/formats.py b/ddtrace/utils/formats.py\n--- a/ddtrace/utils/formats.py\n+++ b/ddtrace/utils/formats.py\n@@ -1,9 +1,13 @@\n import logging\n import os\n+import re\n \n from .deprecation import deprecation\n \n \n+# Tags `key:value` must be separated by either comma or space\n+_TAGS_NOT_SEPARATED = re.compile(r\":[^,\\s]+:\")\n+\n log = logging.getLogger(__name__)\n \n \n@@ -89,6 +93,7 @@\n \n The expected string is of the form::\n \"key1:value1,key2:value2\"\n+ \"key1:value1 key2:value2\"\n \n :param tags_str: A string of the above form to parse tags from.\n :return: A dict containing the tags that were parsed.\n@@ -97,7 +102,29 @@\n if not tags_str:\n return parsed_tags\n \n- for tag in tags_str.split(\",\"):\n+ if _TAGS_NOT_SEPARATED.search(tags_str):\n+ log.error(\"Malformed tag string with tags not separated by comma or space '%s'.\", tags_str)\n+ return parsed_tags\n+\n+ # Identify separator based on which successfully identifies the correct\n+ # number of valid tags\n+ numtagseps = tags_str.count(\":\")\n+ for sep in [\",\", \" \"]:\n+ if sum(\":\" in _ for _ in tags_str.split(sep)) == numtagseps:\n+ break\n+ else:\n+ log.error(\n+ (\n+ \"Failed to find separator for tag string: '%s'.\\n\"\n+ \"Tag strings must be comma or space separated:\\n\"\n+ \" key1:value1,key2:value2\\n\"\n+ \" key1:value1 key2:value2\"\n+ ),\n+ tags_str,\n+ )\n+ return parsed_tags\n+\n+ for tag in tags_str.split(sep):\n try:\n key, value = tag.split(\":\", 1)\n", "issue": "DD_TAGS separator inconsistent with heroku-buildpack-datadog\nhttps://github.com/DataDog/heroku-buildpack-datadog sets `DD_TAGS` separated by spaces.\r\n\r\nHowever, dd-trace-py splits the tags by commas:\r\n\r\nhttps://github.com/DataDog/dd-trace-py/blob/master/ddtrace/utils/formats.py#L87-L116\r\n```\r\ndef parse_tags_str(tags_str):\r\n \"\"\"Parse a string of tags typically provided via environment variables.\r\n The expected string is of the form::\r\n \"key1:value1,key2:value2\"\r\n :param tags_str: A string of the above form to parse tags from.\r\n :return: A dict containing the tags that were parsed.\r\n \"\"\"\r\n parsed_tags = {}\r\n if not tags_str:\r\n return parsed_tags\r\n\r\n for tag in tags_str.split(\",\"):\r\n try:\r\n key, value = tag.split(\":\", 1)\r\n\r\n # Validate the tag\r\n if key == \"\" or value == \"\" or value.endswith(\":\"):\r\n raise ValueError\r\n except ValueError:\r\n log.error(\r\n \"Malformed tag in tag pair '%s' from tag string '%s'.\",\r\n tag,\r\n tags_str,\r\n )\r\n else:\r\n parsed_tags[key] = value\r\n\r\n return parsed_tags\r\n```\r\n\r\nThis results in all of the tags being set as the value of the first tag.\r\n\r\nThis looks to have been previously surfaced for the java tracer here:\r\nhttps://github.com/DataDog/heroku-buildpack-datadog/issues/187\r\n\r\nAnd was initially changed in the buildpack but was reverted and dd-trace-java updated to accommodate commas or spaces:\r\nhttps://github.com/DataDog/dd-trace-java/pull/2011\r\n\r\n### Which version of dd-trace-py are you using?\r\n0.47.0\r\n\r\n### How can we reproduce your problem?\r\nPerform a trace with `DD_TAGS` set by https://github.com/DataDog/heroku-buildpack-datadog.\r\n\r\n### What is the result that you get?\r\n`dyno` tag contains all other tags.\r\n\r\n### What is the result that you expected?\r\nTags to be properly separated.\r\n\n", "before_files": [{"content": "import logging\nimport os\n\nfrom .deprecation import deprecation\n\n\nlog = logging.getLogger(__name__)\n\n\ndef get_env(*parts, **kwargs):\n \"\"\"Retrieves environment variables value for the given integration. It must be used\n for consistency between integrations. The implementation is backward compatible\n with legacy nomenclature:\n\n * `DATADOG_` is a legacy prefix with lower priority\n * `DD_` environment variables have the highest priority\n * the environment variable is built concatenating `integration` and `variable`\n arguments\n * return `default` otherwise\n\n :param parts: evironment variable parts that will be joined with ``_`` to generate the name\n :type parts: :obj:`str`\n :param kwargs: ``default`` is the only supported keyword argument which sets the default value\n if no environment variable is found\n :rtype: :obj:`str` | ``kwargs[\"default\"]``\n :returns: The string environment variable value or the value of ``kwargs[\"default\"]`` if not found\n \"\"\"\n default = kwargs.get(\"default\")\n\n key = \"_\".join(parts)\n key = key.upper()\n legacy_env = \"DATADOG_{}\".format(key)\n env = \"DD_{}\".format(key)\n\n value = os.getenv(env)\n legacy = os.getenv(legacy_env)\n if legacy:\n # Deprecation: `DATADOG_` variables are deprecated\n deprecation(\n name=\"DATADOG_\",\n message=\"Use `DD_` prefix instead\",\n version=\"1.0.0\",\n )\n\n value = value or legacy\n return value if value else default\n\n\ndef deep_getattr(obj, attr_string, default=None):\n \"\"\"\n Returns the attribute of `obj` at the dotted path given by `attr_string`\n If no such attribute is reachable, returns `default`\n\n >>> deep_getattr(cass, 'cluster')\n <cassandra.cluster.Cluster object at 0xa20c350\n\n >>> deep_getattr(cass, 'cluster.metadata.partitioner')\n u'org.apache.cassandra.dht.Murmur3Partitioner'\n\n >>> deep_getattr(cass, 'i.dont.exist', default='default')\n 'default'\n \"\"\"\n attrs = attr_string.split(\".\")\n for attr in attrs:\n try:\n obj = getattr(obj, attr)\n except AttributeError:\n return default\n\n return obj\n\n\ndef asbool(value):\n \"\"\"Convert the given String to a boolean object.\n\n Accepted values are `True` and `1`.\n \"\"\"\n if value is None:\n return False\n\n if isinstance(value, bool):\n return value\n\n return value.lower() in (\"true\", \"1\")\n\n\ndef parse_tags_str(tags_str):\n \"\"\"Parse a string of tags typically provided via environment variables.\n\n The expected string is of the form::\n \"key1:value1,key2:value2\"\n\n :param tags_str: A string of the above form to parse tags from.\n :return: A dict containing the tags that were parsed.\n \"\"\"\n parsed_tags = {}\n if not tags_str:\n return parsed_tags\n\n for tag in tags_str.split(\",\"):\n try:\n key, value = tag.split(\":\", 1)\n\n # Validate the tag\n if key == \"\" or value == \"\" or value.endswith(\":\"):\n raise ValueError\n except ValueError:\n log.error(\n \"Malformed tag in tag pair '%s' from tag string '%s'.\",\n tag,\n tags_str,\n )\n else:\n parsed_tags[key] = value\n\n return parsed_tags\n", "path": "ddtrace/utils/formats.py"}]}
2,022
448
gh_patches_debug_33706
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-470
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Length limit check on Route53 TXT records doesn't allow multiple values *cfn-lint version: (`cfn-lint --version`)* master *Description of issue.* The length limit check on TXT records is not quite right. Multiple values of up to 255 characters *are* allowed, separated by spaces. This valid template is thus categorized as invalid: ``` $ cat test.yml Resources: Example: Type: AWS::Route53::RecordSet Properties: HostedZoneId: abc123 Name: example.com. Type: TXT TTL: '14400' ResourceRecords: # 255 "a" characters within appropriate quotes, then a "b" - '"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "b"' $ cfn-lint test.yml E3020 The length of the TXT record (261) exceeds the limit (255) test.yml:9:7 ``` Verified it's valid by creating an equivalent record on the console. </issue> <code> [start of src/cfnlint/rules/resources/route53/RecordSet.py] 1 """ 2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 import re 18 from cfnlint import CloudFormationLintRule 19 from cfnlint import RuleMatch 20 21 from cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC 22 23 class RecordSet(CloudFormationLintRule): 24 """Check Route53 Recordset Configuration""" 25 id = 'E3020' 26 shortdesc = 'Validate Route53 RecordSets' 27 description = 'Check if all RecordSets are correctly configured' 28 source_url = 'https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html' 29 tags = ['resources', 'route53', 'record_set'] 30 31 # https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html 32 VALID_RECORD_TYPES = [ 33 'A', 34 'AAAA', 35 'CAA', 36 'CNAME', 37 'MX', 38 'NAPTR', 39 'NS', 40 'PTR', 41 'SOA' 42 'SPF', 43 'SRV', 44 'TXT' 45 ] 46 47 REGEX_CNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])(.)$') 48 49 def check_a_record(self, path, recordset): 50 """Check A record Configuration""" 51 matches = [] 52 53 resource_records = recordset.get('ResourceRecords') 54 for index, record in enumerate(resource_records): 55 56 if not isinstance(record, dict): 57 tree = path[:] + ['ResourceRecords', index] 58 59 # Check if a valid IPv4 address is specified 60 if not re.match(REGEX_IPV4, record): 61 message = 'A record ({}) is not a valid IPv4 address' 62 matches.append(RuleMatch(tree, message.format(record))) 63 64 return matches 65 66 def check_aaaa_record(self, path, recordset): 67 """Check AAAA record Configuration""" 68 matches = [] 69 70 resource_records = recordset.get('ResourceRecords') 71 for index, record in enumerate(resource_records): 72 73 if not isinstance(record, dict): 74 tree = path[:] + ['ResourceRecords', index] 75 76 # Check if a valid IPv4 address is specified 77 if not re.match(REGEX_IPV6, record): 78 message = 'AAAA record ({}) is not a valid IPv6 address' 79 matches.append(RuleMatch(tree, message.format(record))) 80 81 return matches 82 83 def check_caa_record(self, path, recordset): 84 """Check CAA record Configuration""" 85 matches = [] 86 87 resource_records = recordset.get('ResourceRecords') 88 89 for index, record in enumerate(resource_records): 90 tree = path[:] + ['ResourceRecords', index] 91 92 if not isinstance(record, dict): 93 # Split the record up to the mandatory settings (flags tag "value") 94 items = record.split(' ', 2) 95 96 # Check if the 3 settings are given. 97 if len(items) != 3: 98 message = 'CAA record must contain 3 settings (flags tag "value"), record contains {} settings.' 99 matches.append(RuleMatch(tree, message.format(len(items)))) 100 else: 101 # Check the flag value 102 if not items[0].isdigit(): 103 message = 'CAA record flag setting ({}) should be of type Integer.' 104 matches.append(RuleMatch(tree, message.format(items[0]))) 105 else: 106 if int(items[0]) not in [0, 128]: 107 message = 'Invalid CAA record flag setting ({}) given, must be 0 or 128.' 108 matches.append(RuleMatch(tree, message.format(items[0]))) 109 110 # Check the tag value 111 if not re.match(REGEX_ALPHANUMERIC, items[1]): 112 message = 'Invalid CAA record tag setting {}. Value has to be alphanumeric.' 113 matches.append(RuleMatch(tree, message.format(items[0]))) 114 115 # Check the value 116 if not items[2].startswith('"') or not items[2].endswith('"'): 117 message = 'CAA record value setting has to be enclosed in double quotation marks (").' 118 matches.append(RuleMatch(tree, message)) 119 120 return matches 121 122 def check_cname_record(self, path, recordset): 123 """Check CNAME record Configuration""" 124 matches = [] 125 126 resource_records = recordset.get('ResourceRecords') 127 if len(resource_records) > 1: 128 message = 'A CNAME recordset can only contain 1 value' 129 matches.append(RuleMatch(path + ['ResourceRecords'], message)) 130 else: 131 for index, record in enumerate(resource_records): 132 if not isinstance(record, dict): 133 tree = path[:] + ['ResourceRecords', index] 134 if (not re.match(self.REGEX_CNAME, record) 135 # ACM Route 53 validation uses invalid CNAMEs starting with `_`, 136 # special-case them rather than complicate the regex. 137 and not record.endswith('.acm-validations.aws.')): 138 message = 'CNAME record ({}) does not contain a valid domain name' 139 matches.append(RuleMatch(tree, message.format(record))) 140 141 return matches 142 143 def check_txt_record(self, path, recordset): 144 """Check TXT record Configuration""" 145 matches = [] 146 147 # Check quotation of the records 148 resource_records = recordset.get('ResourceRecords') 149 150 for index, record in enumerate(resource_records): 151 tree = path[:] + ['ResourceRecords', index] 152 153 if not isinstance(record, dict): 154 if not record.startswith('"') or not record.endswith('"'): 155 message = 'TXT record ({}) has to be enclosed in double quotation marks (")' 156 matches.append(RuleMatch(tree, message.format(record))) 157 elif len(record) > 257: # 2 extra characters for start and end double quotation marks 158 message = 'The length of the TXT record ({}) exceeds the limit (255)' 159 matches.append(RuleMatch(tree, message.format(len(record)))) 160 161 return matches 162 163 def check_recordset(self, path, recordset): 164 """Check record configuration""" 165 166 matches = [] 167 recordset_type = recordset.get('Type') 168 169 # Skip Intrinsic functions 170 if not isinstance(recordset_type, dict): 171 if recordset_type not in self.VALID_RECORD_TYPES: 172 message = 'Invalid record type "{0}" specified' 173 matches.append(RuleMatch(path + ['Type'], message.format(recordset_type))) 174 elif not recordset.get('AliasTarget'): 175 # Record type specific checks 176 if recordset_type == 'A': 177 matches.extend(self.check_a_record(path, recordset)) 178 elif recordset_type == 'AAAA': 179 matches.extend(self.check_aaaa_record(path, recordset)) 180 elif recordset_type == 'CAA': 181 matches.extend(self.check_caa_record(path, recordset)) 182 elif recordset_type == 'CNAME': 183 matches.extend(self.check_cname_record(path, recordset)) 184 elif recordset_type == 'TXT': 185 matches.extend(self.check_txt_record(path, recordset)) 186 187 return matches 188 189 def match(self, cfn): 190 """Check RecordSets and RecordSetGroups Properties""" 191 192 matches = [] 193 194 recordsets = cfn.get_resources(['AWS::Route53::RecordSet']) 195 196 for name, recordset in recordsets.items(): 197 path = ['Resources', name, 'Properties'] 198 199 if isinstance(recordset, dict): 200 props = recordset.get('Properties') 201 if props: 202 matches.extend(self.check_recordset(path, props)) 203 204 recordsetgroups = cfn.get_resource_properties(['AWS::Route53::RecordSetGroup', 'RecordSets']) 205 206 for recordsetgroup in recordsetgroups: 207 path = recordsetgroup['Path'] 208 value = recordsetgroup['Value'] 209 if isinstance(value, list): 210 for index, recordset in enumerate(value): 211 tree = path[:] + [index] 212 matches.extend(self.check_recordset(tree, recordset)) 213 214 return matches 215 [end of src/cfnlint/rules/resources/route53/RecordSet.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/resources/route53/RecordSet.py b/src/cfnlint/rules/resources/route53/RecordSet.py --- a/src/cfnlint/rules/resources/route53/RecordSet.py +++ b/src/cfnlint/rules/resources/route53/RecordSet.py @@ -17,9 +17,9 @@ import re from cfnlint import CloudFormationLintRule from cfnlint import RuleMatch - from cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC + class RecordSet(CloudFormationLintRule): """Check Route53 Recordset Configuration""" id = 'E3020' @@ -45,6 +45,7 @@ ] REGEX_CNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])(.)$') + REGEX_TXT = re.compile(r'^("[^"]{1,255}" *)*"[^"]{1,255}"$') def check_a_record(self, path, recordset): """Check A record Configuration""" @@ -150,13 +151,15 @@ for index, record in enumerate(resource_records): tree = path[:] + ['ResourceRecords', index] - if not isinstance(record, dict): - if not record.startswith('"') or not record.endswith('"'): - message = 'TXT record ({}) has to be enclosed in double quotation marks (")' - matches.append(RuleMatch(tree, message.format(record))) - elif len(record) > 257: # 2 extra characters for start and end double quotation marks - message = 'The length of the TXT record ({}) exceeds the limit (255)' - matches.append(RuleMatch(tree, message.format(len(record)))) + if not isinstance(record, dict) and not re.match(self.REGEX_TXT, record): + message = 'TXT record is not structured as one or more items up to 255 characters ' \ + 'enclosed in double quotation marks at {0}' + matches.append(RuleMatch( + tree, + ( + message.format('/'.join(map(str, tree))) + ), + )) return matches
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/route53/RecordSet.py b/src/cfnlint/rules/resources/route53/RecordSet.py\n--- a/src/cfnlint/rules/resources/route53/RecordSet.py\n+++ b/src/cfnlint/rules/resources/route53/RecordSet.py\n@@ -17,9 +17,9 @@\n import re\n from cfnlint import CloudFormationLintRule\n from cfnlint import RuleMatch\n-\n from cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC\n \n+\n class RecordSet(CloudFormationLintRule):\n \"\"\"Check Route53 Recordset Configuration\"\"\"\n id = 'E3020'\n@@ -45,6 +45,7 @@\n ]\n \n REGEX_CNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])(.)$')\n+ REGEX_TXT = re.compile(r'^(\"[^\"]{1,255}\" *)*\"[^\"]{1,255}\"$')\n \n def check_a_record(self, path, recordset):\n \"\"\"Check A record Configuration\"\"\"\n@@ -150,13 +151,15 @@\n for index, record in enumerate(resource_records):\n tree = path[:] + ['ResourceRecords', index]\n \n- if not isinstance(record, dict):\n- if not record.startswith('\"') or not record.endswith('\"'):\n- message = 'TXT record ({}) has to be enclosed in double quotation marks (\")'\n- matches.append(RuleMatch(tree, message.format(record)))\n- elif len(record) > 257: # 2 extra characters for start and end double quotation marks\n- message = 'The length of the TXT record ({}) exceeds the limit (255)'\n- matches.append(RuleMatch(tree, message.format(len(record))))\n+ if not isinstance(record, dict) and not re.match(self.REGEX_TXT, record):\n+ message = 'TXT record is not structured as one or more items up to 255 characters ' \\\n+ 'enclosed in double quotation marks at {0}'\n+ matches.append(RuleMatch(\n+ tree,\n+ (\n+ message.format('/'.join(map(str, tree)))\n+ ),\n+ ))\n \n return matches\n", "issue": "Length limit check on Route53 TXT records doesn't allow multiple values\n*cfn-lint version: (`cfn-lint --version`)* master\r\n\r\n*Description of issue.*\r\n\r\nThe length limit check on TXT records is not quite right. Multiple values of up to 255 characters *are* allowed, separated by spaces.\r\n\r\nThis valid template is thus categorized as invalid:\r\n\r\n```\r\n$ cat test.yml\r\nResources:\r\n Example:\r\n Type: AWS::Route53::RecordSet\r\n Properties:\r\n HostedZoneId: abc123\r\n Name: example.com.\r\n Type: TXT\r\n TTL: '14400'\r\n ResourceRecords:\r\n # 255 \"a\" characters within appropriate quotes, then a \"b\"\r\n - '\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\" \"b\"'\r\n$ cfn-lint test.yml\r\nE3020 The length of the TXT record (261) exceeds the limit (255)\r\ntest.yml:9:7\r\n```\r\n\r\nVerified it's valid by creating an equivalent record on the console.\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\nfrom cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC\n\nclass RecordSet(CloudFormationLintRule):\n \"\"\"Check Route53 Recordset Configuration\"\"\"\n id = 'E3020'\n shortdesc = 'Validate Route53 RecordSets'\n description = 'Check if all RecordSets are correctly configured'\n source_url = 'https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html'\n tags = ['resources', 'route53', 'record_set']\n\n # https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html\n VALID_RECORD_TYPES = [\n 'A',\n 'AAAA',\n 'CAA',\n 'CNAME',\n 'MX',\n 'NAPTR',\n 'NS',\n 'PTR',\n 'SOA'\n 'SPF',\n 'SRV',\n 'TXT'\n ]\n\n REGEX_CNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])(.)$')\n\n def check_a_record(self, path, recordset):\n \"\"\"Check A record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n for index, record in enumerate(resource_records):\n\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n\n # Check if a valid IPv4 address is specified\n if not re.match(REGEX_IPV4, record):\n message = 'A record ({}) is not a valid IPv4 address'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_aaaa_record(self, path, recordset):\n \"\"\"Check AAAA record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n for index, record in enumerate(resource_records):\n\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n\n # Check if a valid IPv4 address is specified\n if not re.match(REGEX_IPV6, record):\n message = 'AAAA record ({}) is not a valid IPv6 address'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_caa_record(self, path, recordset):\n \"\"\"Check CAA record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n\n for index, record in enumerate(resource_records):\n tree = path[:] + ['ResourceRecords', index]\n\n if not isinstance(record, dict):\n # Split the record up to the mandatory settings (flags tag \"value\")\n items = record.split(' ', 2)\n\n # Check if the 3 settings are given.\n if len(items) != 3:\n message = 'CAA record must contain 3 settings (flags tag \"value\"), record contains {} settings.'\n matches.append(RuleMatch(tree, message.format(len(items))))\n else:\n # Check the flag value\n if not items[0].isdigit():\n message = 'CAA record flag setting ({}) should be of type Integer.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n else:\n if int(items[0]) not in [0, 128]:\n message = 'Invalid CAA record flag setting ({}) given, must be 0 or 128.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n\n # Check the tag value\n if not re.match(REGEX_ALPHANUMERIC, items[1]):\n message = 'Invalid CAA record tag setting {}. Value has to be alphanumeric.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n\n # Check the value\n if not items[2].startswith('\"') or not items[2].endswith('\"'):\n message = 'CAA record value setting has to be enclosed in double quotation marks (\").'\n matches.append(RuleMatch(tree, message))\n\n return matches\n\n def check_cname_record(self, path, recordset):\n \"\"\"Check CNAME record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n if len(resource_records) > 1:\n message = 'A CNAME recordset can only contain 1 value'\n matches.append(RuleMatch(path + ['ResourceRecords'], message))\n else:\n for index, record in enumerate(resource_records):\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n if (not re.match(self.REGEX_CNAME, record)\n # ACM Route 53 validation uses invalid CNAMEs starting with `_`,\n # special-case them rather than complicate the regex.\n and not record.endswith('.acm-validations.aws.')):\n message = 'CNAME record ({}) does not contain a valid domain name'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_txt_record(self, path, recordset):\n \"\"\"Check TXT record Configuration\"\"\"\n matches = []\n\n # Check quotation of the records\n resource_records = recordset.get('ResourceRecords')\n\n for index, record in enumerate(resource_records):\n tree = path[:] + ['ResourceRecords', index]\n\n if not isinstance(record, dict):\n if not record.startswith('\"') or not record.endswith('\"'):\n message = 'TXT record ({}) has to be enclosed in double quotation marks (\")'\n matches.append(RuleMatch(tree, message.format(record)))\n elif len(record) > 257: # 2 extra characters for start and end double quotation marks\n message = 'The length of the TXT record ({}) exceeds the limit (255)'\n matches.append(RuleMatch(tree, message.format(len(record))))\n\n return matches\n\n def check_recordset(self, path, recordset):\n \"\"\"Check record configuration\"\"\"\n\n matches = []\n recordset_type = recordset.get('Type')\n\n # Skip Intrinsic functions\n if not isinstance(recordset_type, dict):\n if recordset_type not in self.VALID_RECORD_TYPES:\n message = 'Invalid record type \"{0}\" specified'\n matches.append(RuleMatch(path + ['Type'], message.format(recordset_type)))\n elif not recordset.get('AliasTarget'):\n # Record type specific checks\n if recordset_type == 'A':\n matches.extend(self.check_a_record(path, recordset))\n elif recordset_type == 'AAAA':\n matches.extend(self.check_aaaa_record(path, recordset))\n elif recordset_type == 'CAA':\n matches.extend(self.check_caa_record(path, recordset))\n elif recordset_type == 'CNAME':\n matches.extend(self.check_cname_record(path, recordset))\n elif recordset_type == 'TXT':\n matches.extend(self.check_txt_record(path, recordset))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check RecordSets and RecordSetGroups Properties\"\"\"\n\n matches = []\n\n recordsets = cfn.get_resources(['AWS::Route53::RecordSet'])\n\n for name, recordset in recordsets.items():\n path = ['Resources', name, 'Properties']\n\n if isinstance(recordset, dict):\n props = recordset.get('Properties')\n if props:\n matches.extend(self.check_recordset(path, props))\n\n recordsetgroups = cfn.get_resource_properties(['AWS::Route53::RecordSetGroup', 'RecordSets'])\n\n for recordsetgroup in recordsetgroups:\n path = recordsetgroup['Path']\n value = recordsetgroup['Value']\n if isinstance(value, list):\n for index, recordset in enumerate(value):\n tree = path[:] + [index]\n matches.extend(self.check_recordset(tree, recordset))\n\n return matches\n", "path": "src/cfnlint/rules/resources/route53/RecordSet.py"}]}
3,292
553
gh_patches_debug_38116
rasdani/github-patches
git_diff
sktime__sktime-4439
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [ENH] `SARIMAX` - implement `predict_interval` or `predict_quantiles` (for `statsmodels` `SARIMAX` interface) From discord, Huseyin Aytug: hi when I want to produce confidence intervals with `SARIMAX`, I get this error: `NotImplementedError: SARIMAX does not have the capability to return quantile predictions. If you think this estimator should have the capability, please open an issue on sktime.` However, it should be possible according to the documentation http://www.sktime.net/en/latest/api_reference/auto_generated/sktime.forecasting.sarimax.SARIMAX.html#sktime.forecasting.sarimax.SARIMAX.predict_interval </issue> <code> [start of sktime/forecasting/sarimax.py] 1 # -*- coding: utf-8 -*- 2 # !/usr/bin/env python3 -u 3 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file) 4 """Implements SARIMAX.""" 5 6 __all__ = ["SARIMAX"] 7 __author__ = ["TNTran92"] 8 9 from sktime.forecasting.base.adapters import _StatsModelsAdapter 10 11 12 class SARIMAX(_StatsModelsAdapter): 13 """SARIMAX forecaster. 14 15 Direct interface for `statsmodels.tsa.api.SARIMAX`. 16 17 Parameters 18 ---------- 19 order : iterable or iterable of iterables, optional, default=(1,0,0) 20 The (p,d,q) order of the model for the number of AR parameters, 21 differences, and MA parameters. `d` must be an integer 22 indicating the integration order of the process, while 23 `p` and `q` may either be an integers indicating the AR and MA 24 orders (so that all lags up to those orders are included) or else 25 iterables giving specific AR and / or MA lags to include. Default is 26 an AR(1) model: (1,0,0). 27 seasonal_order : iterable, optional, default=(0,0,0,0) 28 The (P,D,Q,s) order of the seasonal component of the model for the 29 AR parameters, differences, MA parameters, and periodicity. 30 `D` must be an integer indicating the integration order of the process, 31 while `P` and `Q` may either be an integers indicating the AR and MA 32 orders (so that all lags up to those orders are included) or else 33 iterables giving specific AR and / or MA lags to include. `s` is an 34 integer giving the periodicity (number of periods in season), often it 35 is 4 for quarterly data or 12 for monthly data. Default is no seasonal 36 effect. 37 trend : str{'n','c','t','ct'} or iterable, optional, default="c" 38 Parameter controlling the deterministic trend polynomial :math:`A(t)`. 39 Can be specified as a string where 'c' indicates a constant (i.e. a 40 degree zero component of the trend polynomial), 't' indicates a 41 linear trend with time, and 'ct' is both. Can also be specified as an 42 iterable defining the non-zero polynomial exponents to include, in 43 increasing order. For example, `[1,1,0,1]` denotes 44 :math:`a + bt + ct^3`. Default is to not include a trend component. 45 measurement_error : bool, optional, default=False 46 Whether or not to assume the endogenous observations `endog` were 47 measured with error. 48 time_varying_regression : bool, optional, default=False 49 Used when an explanatory variables, `exog`, are provided 50 to select whether or not coefficients on the exogenous regressors are 51 allowed to vary over time. 52 mle_regression : bool, optional, default=True 53 Whether or not to use estimate the regression coefficients for the 54 exogenous variables as part of maximum likelihood estimation or through 55 the Kalman filter (i.e. recursive least squares). If 56 `time_varying_regression` is True, this must be set to False. 57 simple_differencing : bool, optional, default=False 58 Whether or not to use partially conditional maximum likelihood 59 estimation. If True, differencing is performed prior to estimation, 60 which discards the first :math:`s D + d` initial rows but results in a 61 smaller state-space formulation. See the Notes section for important 62 details about interpreting results when this option is used. If False, 63 the full SARIMAX model is put in state-space form so that all 64 datapoints can be used in estimation. 65 enforce_stationarity : bool, optional, default=True 66 Whether or not to transform the AR parameters to enforce stationarity 67 in the autoregressive component of the model. 68 enforce_invertibility : bool, optional, default=True 69 Whether or not to transform the MA parameters to enforce invertibility 70 in the moving average component of the model. 71 hamilton_representation : bool, optional, default=False 72 Whether or not to use the Hamilton representation of an ARMA process 73 (if True) or the Harvey representation (if False). 74 concentrate_scale : bool, optional, default=False 75 Whether or not to concentrate the scale (variance of the error term) 76 out of the likelihood. This reduces the number of parameters estimated 77 by maximum likelihood by one, but standard errors will then not 78 be available for the scale parameter. 79 trend_offset : int, optional, default=1 80 The offset at which to start time trend values. Default is 1, so that 81 if `trend='t'` the trend is equal to 1, 2, ..., nobs. Typically is only 82 set when the model created by extending a previous dataset. 83 use_exact_diffuse : bool, optional, default=False 84 Whether or not to use exact diffuse initialization for non-stationary 85 states. Default is False (in which case approximate diffuse 86 initialization is used). 87 random_state : int, RandomState instance or None, optional , 88 default=None – If int, random_state is the seed used by the random 89 number generator; If RandomState instance, random_state is the random 90 number generator; If None, the random number generator is the 91 RandomState instance used by np.random. 92 93 See Also 94 -------- 95 ARIMA 96 AutoARIMA 97 StatsForecastAutoARIMA 98 99 References 100 ---------- 101 .. [1] Hyndman, Rob J., and George Athanasopoulos. Forecasting: principles 102 and practice. OTexts, 2014. 103 104 Examples 105 -------- 106 >>> from sktime.datasets import load_airline 107 >>> from sktime.forecasting.sarimax import SARIMAX 108 >>> y = load_airline() 109 >>> forecaster = SARIMAX( 110 ... order=(1, 0, 0), trend="t", seasonal_order=(1, 0, 0, 6)) # doctest: +SKIP 111 ... ) 112 >>> forecaster.fit(y) # doctest: +SKIP 113 SARIMAX(...) 114 >>> y_pred = forecaster.predict(fh=y.index) # doctest: +SKIP 115 """ 116 117 _tags = { 118 "ignores-exogeneous-X": False, 119 } 120 121 def __init__( 122 self, 123 order=(1, 0, 0), 124 seasonal_order=(0, 0, 0, 0), 125 trend="c", 126 measurement_error=False, 127 time_varying_regression=False, 128 mle_regression=True, 129 simple_differencing=False, 130 enforce_stationarity=True, 131 enforce_invertibility=True, 132 hamilton_representation=False, 133 concentrate_scale=False, 134 trend_offset=1, 135 use_exact_diffuse=False, 136 dates=None, 137 freq=None, 138 missing="none", 139 validate_specification=True, 140 random_state=None, 141 ): 142 143 self.order = order 144 self.seasonal_order = seasonal_order 145 self.trend = trend 146 self.measurement_error = measurement_error 147 self.time_varying_regression = time_varying_regression 148 self.mle_regression = mle_regression 149 self.simple_differencing = simple_differencing 150 self.enforce_stationarity = enforce_stationarity 151 self.enforce_invertibility = enforce_invertibility 152 self.hamilton_representation = hamilton_representation 153 self.concentrate_scale = concentrate_scale 154 self.trend_offset = trend_offset 155 self.use_exact_diffuse = use_exact_diffuse 156 self.dates = dates 157 self.freq = freq 158 self.missing = missing 159 self.validate_specification = validate_specification 160 161 super().__init__(random_state=random_state) 162 163 def _fit_forecaster(self, y, X=None): 164 from statsmodels.tsa.api import SARIMAX as _SARIMAX 165 166 self._forecaster = _SARIMAX( 167 endog=y, 168 exog=X, 169 order=self.order, 170 seasonal_order=self.seasonal_order, 171 trend=self.trend, 172 measurement_error=self.measurement_error, 173 time_varying_regression=self.time_varying_regression, 174 mle_regression=self.mle_regression, 175 simple_differencing=self.simple_differencing, 176 enforce_stationarity=self.enforce_stationarity, 177 enforce_invertibility=self.enforce_invertibility, 178 hamilton_representation=self.hamilton_representation, 179 concentrate_scale=self.concentrate_scale, 180 trend_offset=self.trend_offset, 181 use_exact_diffuse=self.use_exact_diffuse, 182 dates=self.dates, 183 freq=self.freq, 184 missing=self.missing, 185 validate_specification=self.validate_specification, 186 ) 187 self._fitted_forecaster = self._forecaster.fit() 188 189 def summary(self): 190 """Get a summary of the fitted forecaster. 191 192 This is the same as the implementation in statsmodels: 193 https://www.statsmodels.org/dev/examples/notebooks/generated/statespace_structural_harvey_jaeger.html 194 """ 195 return self._fitted_forecaster.summary() 196 [end of sktime/forecasting/sarimax.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sktime/forecasting/sarimax.py b/sktime/forecasting/sarimax.py --- a/sktime/forecasting/sarimax.py +++ b/sktime/forecasting/sarimax.py @@ -4,7 +4,9 @@ """Implements SARIMAX.""" __all__ = ["SARIMAX"] -__author__ = ["TNTran92"] +__author__ = ["TNTran92", "yarnabrina"] + +import pandas as pd from sktime.forecasting.base.adapters import _StatsModelsAdapter @@ -116,6 +118,7 @@ _tags = { "ignores-exogeneous-X": False, + "capability:pred_int": True, } def __init__( @@ -193,3 +196,59 @@ https://www.statsmodels.org/dev/examples/notebooks/generated/statespace_structural_harvey_jaeger.html """ return self._fitted_forecaster.summary() + + def _predict_interval(self, fh, X=None, coverage=0.95): + """Compute/return prediction interval forecasts. + + private _predict_interval containing the core logic, + called from predict_interval and default _predict_quantiles + + Parameters + ---------- + fh : guaranteed to be ForecastingHorizon + The forecasting horizon with the steps ahead to to predict. + X : optional (default=None) + guaranteed to be of a type in self.get_tag("X_inner_mtype") + Exogeneous time series to predict from. + coverage : float or list of float, optional (default=0.95) + nominal coverage(s) of predictive interval(s) + + Returns + ------- + pred_int : pd.DataFrame + Column has multi-index: first level is variable name from y in fit, + second level coverage fractions for which intervals were computed. + in the same order as in input `coverage`. + Third level is string "lower" or "upper", for lower/upper interval end. + Row index is fh, with additional (upper) levels equal to instance levels, + from y seen in fit, if y_inner_mtype is Panel or Hierarchical. + Entries are forecasts of lower/upper interval end, + for var in col index, at nominal coverage in second col index, + lower/upper depending on third col index, for the row index. + Upper/lower interval end forecasts are equivalent to + quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage. + """ + start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]] + valid_indices = fh.to_absolute(self.cutoff).to_pandas() + + prediction_results = self._fitted_forecaster.get_prediction( + start=start, end=end, exog=X + ) + + columns = pd.MultiIndex.from_product( + [["Coverage"], coverage, ["lower", "upper"]] + ) + pred_int = pd.DataFrame(index=valid_indices, columns=columns) + + for c in coverage: + pred_statsmodels = prediction_results.conf_int(alpha=(1 - c)) + pred_statsmodels.columns = ["lower", "upper"] + + pred_int[("Coverage", c, "lower")] = pred_statsmodels.loc[ + valid_indices, "lower" + ] + pred_int[("Coverage", c, "upper")] = pred_statsmodels.loc[ + valid_indices, "upper" + ] + + return pred_int
{"golden_diff": "diff --git a/sktime/forecasting/sarimax.py b/sktime/forecasting/sarimax.py\n--- a/sktime/forecasting/sarimax.py\n+++ b/sktime/forecasting/sarimax.py\n@@ -4,7 +4,9 @@\n \"\"\"Implements SARIMAX.\"\"\"\n \n __all__ = [\"SARIMAX\"]\n-__author__ = [\"TNTran92\"]\n+__author__ = [\"TNTran92\", \"yarnabrina\"]\n+\n+import pandas as pd\n \n from sktime.forecasting.base.adapters import _StatsModelsAdapter\n \n@@ -116,6 +118,7 @@\n \n _tags = {\n \"ignores-exogeneous-X\": False,\n+ \"capability:pred_int\": True,\n }\n \n def __init__(\n@@ -193,3 +196,59 @@\n https://www.statsmodels.org/dev/examples/notebooks/generated/statespace_structural_harvey_jaeger.html\n \"\"\"\n return self._fitted_forecaster.summary()\n+\n+ def _predict_interval(self, fh, X=None, coverage=0.95):\n+ \"\"\"Compute/return prediction interval forecasts.\n+\n+ private _predict_interval containing the core logic,\n+ called from predict_interval and default _predict_quantiles\n+\n+ Parameters\n+ ----------\n+ fh : guaranteed to be ForecastingHorizon\n+ The forecasting horizon with the steps ahead to to predict.\n+ X : optional (default=None)\n+ guaranteed to be of a type in self.get_tag(\"X_inner_mtype\")\n+ Exogeneous time series to predict from.\n+ coverage : float or list of float, optional (default=0.95)\n+ nominal coverage(s) of predictive interval(s)\n+\n+ Returns\n+ -------\n+ pred_int : pd.DataFrame\n+ Column has multi-index: first level is variable name from y in fit,\n+ second level coverage fractions for which intervals were computed.\n+ in the same order as in input `coverage`.\n+ Third level is string \"lower\" or \"upper\", for lower/upper interval end.\n+ Row index is fh, with additional (upper) levels equal to instance levels,\n+ from y seen in fit, if y_inner_mtype is Panel or Hierarchical.\n+ Entries are forecasts of lower/upper interval end,\n+ for var in col index, at nominal coverage in second col index,\n+ lower/upper depending on third col index, for the row index.\n+ Upper/lower interval end forecasts are equivalent to\n+ quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage.\n+ \"\"\"\n+ start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n+ valid_indices = fh.to_absolute(self.cutoff).to_pandas()\n+\n+ prediction_results = self._fitted_forecaster.get_prediction(\n+ start=start, end=end, exog=X\n+ )\n+\n+ columns = pd.MultiIndex.from_product(\n+ [[\"Coverage\"], coverage, [\"lower\", \"upper\"]]\n+ )\n+ pred_int = pd.DataFrame(index=valid_indices, columns=columns)\n+\n+ for c in coverage:\n+ pred_statsmodels = prediction_results.conf_int(alpha=(1 - c))\n+ pred_statsmodels.columns = [\"lower\", \"upper\"]\n+\n+ pred_int[(\"Coverage\", c, \"lower\")] = pred_statsmodels.loc[\n+ valid_indices, \"lower\"\n+ ]\n+ pred_int[(\"Coverage\", c, \"upper\")] = pred_statsmodels.loc[\n+ valid_indices, \"upper\"\n+ ]\n+\n+ return pred_int\n", "issue": "[ENH] `SARIMAX` - implement `predict_interval` or `predict_quantiles` (for `statsmodels` `SARIMAX` interface)\nFrom discord, Huseyin Aytug:\r\n\r\nhi when I want to produce confidence intervals with `SARIMAX`, I get this error:\r\n\r\n`NotImplementedError: SARIMAX does not have the capability to return quantile predictions. If you think this estimator should have the capability, please open an issue on sktime.`\r\n\r\nHowever, it should be possible according to the documentation http://www.sktime.net/en/latest/api_reference/auto_generated/sktime.forecasting.sarimax.SARIMAX.html#sktime.forecasting.sarimax.SARIMAX.predict_interval\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# !/usr/bin/env python3 -u\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements SARIMAX.\"\"\"\n\n__all__ = [\"SARIMAX\"]\n__author__ = [\"TNTran92\"]\n\nfrom sktime.forecasting.base.adapters import _StatsModelsAdapter\n\n\nclass SARIMAX(_StatsModelsAdapter):\n \"\"\"SARIMAX forecaster.\n\n Direct interface for `statsmodels.tsa.api.SARIMAX`.\n\n Parameters\n ----------\n order : iterable or iterable of iterables, optional, default=(1,0,0)\n The (p,d,q) order of the model for the number of AR parameters,\n differences, and MA parameters. `d` must be an integer\n indicating the integration order of the process, while\n `p` and `q` may either be an integers indicating the AR and MA\n orders (so that all lags up to those orders are included) or else\n iterables giving specific AR and / or MA lags to include. Default is\n an AR(1) model: (1,0,0).\n seasonal_order : iterable, optional, default=(0,0,0,0)\n The (P,D,Q,s) order of the seasonal component of the model for the\n AR parameters, differences, MA parameters, and periodicity.\n `D` must be an integer indicating the integration order of the process,\n while `P` and `Q` may either be an integers indicating the AR and MA\n orders (so that all lags up to those orders are included) or else\n iterables giving specific AR and / or MA lags to include. `s` is an\n integer giving the periodicity (number of periods in season), often it\n is 4 for quarterly data or 12 for monthly data. Default is no seasonal\n effect.\n trend : str{'n','c','t','ct'} or iterable, optional, default=\"c\"\n Parameter controlling the deterministic trend polynomial :math:`A(t)`.\n Can be specified as a string where 'c' indicates a constant (i.e. a\n degree zero component of the trend polynomial), 't' indicates a\n linear trend with time, and 'ct' is both. Can also be specified as an\n iterable defining the non-zero polynomial exponents to include, in\n increasing order. For example, `[1,1,0,1]` denotes\n :math:`a + bt + ct^3`. Default is to not include a trend component.\n measurement_error : bool, optional, default=False\n Whether or not to assume the endogenous observations `endog` were\n measured with error.\n time_varying_regression : bool, optional, default=False\n Used when an explanatory variables, `exog`, are provided\n to select whether or not coefficients on the exogenous regressors are\n allowed to vary over time.\n mle_regression : bool, optional, default=True\n Whether or not to use estimate the regression coefficients for the\n exogenous variables as part of maximum likelihood estimation or through\n the Kalman filter (i.e. recursive least squares). If\n `time_varying_regression` is True, this must be set to False.\n simple_differencing : bool, optional, default=False\n Whether or not to use partially conditional maximum likelihood\n estimation. If True, differencing is performed prior to estimation,\n which discards the first :math:`s D + d` initial rows but results in a\n smaller state-space formulation. See the Notes section for important\n details about interpreting results when this option is used. If False,\n the full SARIMAX model is put in state-space form so that all\n datapoints can be used in estimation.\n enforce_stationarity : bool, optional, default=True\n Whether or not to transform the AR parameters to enforce stationarity\n in the autoregressive component of the model.\n enforce_invertibility : bool, optional, default=True\n Whether or not to transform the MA parameters to enforce invertibility\n in the moving average component of the model.\n hamilton_representation : bool, optional, default=False\n Whether or not to use the Hamilton representation of an ARMA process\n (if True) or the Harvey representation (if False).\n concentrate_scale : bool, optional, default=False\n Whether or not to concentrate the scale (variance of the error term)\n out of the likelihood. This reduces the number of parameters estimated\n by maximum likelihood by one, but standard errors will then not\n be available for the scale parameter.\n trend_offset : int, optional, default=1\n The offset at which to start time trend values. Default is 1, so that\n if `trend='t'` the trend is equal to 1, 2, ..., nobs. Typically is only\n set when the model created by extending a previous dataset.\n use_exact_diffuse : bool, optional, default=False\n Whether or not to use exact diffuse initialization for non-stationary\n states. Default is False (in which case approximate diffuse\n initialization is used).\n random_state : int, RandomState instance or None, optional ,\n default=None \u2013 If int, random_state is the seed used by the random\n number generator; If RandomState instance, random_state is the random\n number generator; If None, the random number generator is the\n RandomState instance used by np.random.\n\n See Also\n --------\n ARIMA\n AutoARIMA\n StatsForecastAutoARIMA\n\n References\n ----------\n .. [1] Hyndman, Rob J., and George Athanasopoulos. Forecasting: principles\n and practice. OTexts, 2014.\n\n Examples\n --------\n >>> from sktime.datasets import load_airline\n >>> from sktime.forecasting.sarimax import SARIMAX\n >>> y = load_airline()\n >>> forecaster = SARIMAX(\n ... order=(1, 0, 0), trend=\"t\", seasonal_order=(1, 0, 0, 6)) # doctest: +SKIP\n ... )\n >>> forecaster.fit(y) # doctest: +SKIP\n SARIMAX(...)\n >>> y_pred = forecaster.predict(fh=y.index) # doctest: +SKIP\n \"\"\"\n\n _tags = {\n \"ignores-exogeneous-X\": False,\n }\n\n def __init__(\n self,\n order=(1, 0, 0),\n seasonal_order=(0, 0, 0, 0),\n trend=\"c\",\n measurement_error=False,\n time_varying_regression=False,\n mle_regression=True,\n simple_differencing=False,\n enforce_stationarity=True,\n enforce_invertibility=True,\n hamilton_representation=False,\n concentrate_scale=False,\n trend_offset=1,\n use_exact_diffuse=False,\n dates=None,\n freq=None,\n missing=\"none\",\n validate_specification=True,\n random_state=None,\n ):\n\n self.order = order\n self.seasonal_order = seasonal_order\n self.trend = trend\n self.measurement_error = measurement_error\n self.time_varying_regression = time_varying_regression\n self.mle_regression = mle_regression\n self.simple_differencing = simple_differencing\n self.enforce_stationarity = enforce_stationarity\n self.enforce_invertibility = enforce_invertibility\n self.hamilton_representation = hamilton_representation\n self.concentrate_scale = concentrate_scale\n self.trend_offset = trend_offset\n self.use_exact_diffuse = use_exact_diffuse\n self.dates = dates\n self.freq = freq\n self.missing = missing\n self.validate_specification = validate_specification\n\n super().__init__(random_state=random_state)\n\n def _fit_forecaster(self, y, X=None):\n from statsmodels.tsa.api import SARIMAX as _SARIMAX\n\n self._forecaster = _SARIMAX(\n endog=y,\n exog=X,\n order=self.order,\n seasonal_order=self.seasonal_order,\n trend=self.trend,\n measurement_error=self.measurement_error,\n time_varying_regression=self.time_varying_regression,\n mle_regression=self.mle_regression,\n simple_differencing=self.simple_differencing,\n enforce_stationarity=self.enforce_stationarity,\n enforce_invertibility=self.enforce_invertibility,\n hamilton_representation=self.hamilton_representation,\n concentrate_scale=self.concentrate_scale,\n trend_offset=self.trend_offset,\n use_exact_diffuse=self.use_exact_diffuse,\n dates=self.dates,\n freq=self.freq,\n missing=self.missing,\n validate_specification=self.validate_specification,\n )\n self._fitted_forecaster = self._forecaster.fit()\n\n def summary(self):\n \"\"\"Get a summary of the fitted forecaster.\n\n This is the same as the implementation in statsmodels:\n https://www.statsmodels.org/dev/examples/notebooks/generated/statespace_structural_harvey_jaeger.html\n \"\"\"\n return self._fitted_forecaster.summary()\n", "path": "sktime/forecasting/sarimax.py"}]}
3,176
811
gh_patches_debug_10993
rasdani/github-patches
git_diff
ansible__awx-13854
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug Report: awx.awx.export all does not work. ### Please confirm the following - [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html). - [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates. - [X] I understand that AWX is open source software provided for free and that I might not receive a timely response. ### Bug Summary using the all option does not work as noted in the [example](https://github.com/ansible/awx/blob/devel/awx_collection/plugins/modules/export.py#L111) It should export everything from that endpoint I tested this with both the recent pip release of awxkit, and one compiled from dev. I also tested with the cli directly and it worked using the command ``` awx export --conf.host https://controller --conf.username admin --conf.password 123 --conf.insecure --users --users > users.json ``` ### AWX version 22.0.0 ### Select the relevant components - [ ] UI - [ ] API - [ ] Docs - [X] Collection - [ ] CLI - [ ] Other ### Installation method kubernetes ### Modifications no ### Ansible version 2.14.4 ### Operating system Fedora ### Web browser _No response_ ### Steps to reproduce Run the following playbook ``` --- - name: Export Workflow hosts: localhost connection: local gather_facts: false collections: - ansible.controller environment: CONTROLLER_HOST: controller.nas CONTROLLER_USERNAME: admin CONTROLLER_PASSWORD: secret123 CONTROLLER_VERIFY_SSL: False tasks: - name: Export all users awx.awx.export: users: 'all' register: export_results - name: Export users to file copy: content: "{{ export_results | to_nice_yaml( width=50, explicit_start=True, explicit_end=True) }}" dest: usersa.yaml ... ``` ### Expected results ``` --- assets: users: - username: AWX-Collection-tests-controller_role-user first_name: Joe last_name: User email: [email protected] is_superuser: false is_system_auditor: false password: controller_password related: roles: - name: Execute type: role content_object: organization: name: Default type: organization name: test-role-workflow type: workflow_job_template natural_key: username: AWX-Collection-tests-controller_role-user type: user - username: AWX-Collection-tests-controller_role-user-ClPzjxrRTIwcOCvO first_name: Joe last_name: User email: [email protected] is_superuser: false is_system_auditor: false password: controller_password related: roles: [] natural_key: username: AWX-Collection-tests-controller_role-user-ClPzjxrRTIwcOCvO type: user changed: false failed: false ... ``` ### Actual results ``` --- assets: users: [] changed: false failed: false ... ``` ### Additional information I asked @TheRealHaoLiu to test as well to make sure it was not a local problem. </issue> <code> [start of awx_collection/plugins/modules/export.py] 1 #!/usr/bin/python 2 # coding: utf-8 -*- 3 4 # (c) 2017, John Westcott IV <[email protected]> 5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 6 7 from __future__ import absolute_import, division, print_function 8 9 __metaclass__ = type 10 11 12 ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} 13 14 15 DOCUMENTATION = ''' 16 --- 17 module: export 18 author: "John Westcott IV (@john-westcott-iv)" 19 version_added: "3.7.0" 20 short_description: export resources from Automation Platform Controller. 21 description: 22 - Export assets from Automation Platform Controller. 23 options: 24 all: 25 description: 26 - Export all assets 27 type: bool 28 default: 'False' 29 organizations: 30 description: 31 - organization names to export 32 type: list 33 elements: str 34 users: 35 description: 36 - user names to export 37 type: list 38 elements: str 39 teams: 40 description: 41 - team names to export 42 type: list 43 elements: str 44 credential_types: 45 description: 46 - credential type names to export 47 type: list 48 elements: str 49 credentials: 50 description: 51 - credential names to export 52 type: list 53 elements: str 54 execution_environments: 55 description: 56 - execution environment names to export 57 type: list 58 elements: str 59 notification_templates: 60 description: 61 - notification template names to export 62 type: list 63 elements: str 64 inventory_sources: 65 description: 66 - inventory soruces to export 67 type: list 68 elements: str 69 inventory: 70 description: 71 - inventory names to export 72 type: list 73 elements: str 74 projects: 75 description: 76 - project names to export 77 type: list 78 elements: str 79 job_templates: 80 description: 81 - job template names to export 82 type: list 83 elements: str 84 workflow_job_templates: 85 description: 86 - workflow names to export 87 type: list 88 elements: str 89 applications: 90 description: 91 - OAuth2 application names to export 92 type: list 93 elements: str 94 schedules: 95 description: 96 - schedule names to export 97 type: list 98 elements: str 99 requirements: 100 - "awxkit >= 9.3.0" 101 notes: 102 - Specifying a name of "all" for any asset type will export all items of that asset type. 103 extends_documentation_fragment: awx.awx.auth 104 ''' 105 106 EXAMPLES = ''' 107 - name: Export all assets 108 export: 109 all: True 110 111 - name: Export all inventories 112 export: 113 inventory: 'all' 114 115 - name: Export a job template named "My Template" and all Credentials 116 export: 117 job_templates: "My Template" 118 credential: 'all' 119 120 - name: Export a list of inventories 121 export: 122 inventory: ['My Inventory 1', 'My Inventory 2'] 123 ''' 124 125 import logging 126 from ansible.module_utils.six.moves import StringIO 127 from ..module_utils.awxkit import ControllerAWXKitModule 128 129 try: 130 from awxkit.api.pages.api import EXPORTABLE_RESOURCES 131 132 HAS_EXPORTABLE_RESOURCES = True 133 except ImportError: 134 HAS_EXPORTABLE_RESOURCES = False 135 136 137 def main(): 138 argument_spec = dict( 139 all=dict(type='bool', default=False), 140 ) 141 142 # We are not going to raise an error here because the __init__ method of ControllerAWXKitModule will do that for us 143 if HAS_EXPORTABLE_RESOURCES: 144 for resource in EXPORTABLE_RESOURCES: 145 argument_spec[resource] = dict(type='list', elements='str') 146 147 module = ControllerAWXKitModule(argument_spec=argument_spec) 148 149 if not HAS_EXPORTABLE_RESOURCES: 150 module.fail_json(msg="Your version of awxkit does not have import/export") 151 152 # The export process will never change the AWX system 153 module.json_output['changed'] = False 154 155 # The exporter code currently works like the following: 156 # Empty string == all assets of that type 157 # Non-Empty string = just one asset of that type (by name or ID) 158 # Asset type not present or None = skip asset type (unless everything is None, then export all) 159 # Here we are going to setup a dict of values to export 160 export_args = {} 161 for resource in EXPORTABLE_RESOURCES: 162 if module.params.get('all') or module.params.get(resource) == 'all': 163 # If we are exporting everything or we got the keyword "all" we pass in an empty string for this asset type 164 export_args[resource] = '' 165 else: 166 # Otherwise we take either the string or None (if the parameter was not passed) to get one or no items 167 export_args[resource] = module.params.get(resource) 168 169 # Currently the export process does not return anything on error 170 # It simply just logs to Python's logger 171 # Set up a log gobbler to get error messages from export_assets 172 log_capture_string = StringIO() 173 ch = logging.StreamHandler(log_capture_string) 174 for logger_name in ['awxkit.api.pages.api', 'awxkit.api.pages.page']: 175 logger = logging.getLogger(logger_name) 176 logger.setLevel(logging.ERROR) 177 ch.setLevel(logging.ERROR) 178 179 logger.addHandler(ch) 180 log_contents = '' 181 182 # Run the export process 183 try: 184 module.json_output['assets'] = module.get_api_v2_object().export_assets(**export_args) 185 module.exit_json(**module.json_output) 186 except Exception as e: 187 module.fail_json(msg="Failed to export assets {0}".format(e)) 188 finally: 189 # Finally, consume the logs in case there were any errors and die if there were 190 log_contents = log_capture_string.getvalue() 191 log_capture_string.close() 192 if log_contents != '': 193 module.fail_json(msg=log_contents) 194 195 196 if __name__ == '__main__': 197 main() 198 [end of awx_collection/plugins/modules/export.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/awx_collection/plugins/modules/export.py b/awx_collection/plugins/modules/export.py --- a/awx_collection/plugins/modules/export.py +++ b/awx_collection/plugins/modules/export.py @@ -159,7 +159,7 @@ # Here we are going to setup a dict of values to export export_args = {} for resource in EXPORTABLE_RESOURCES: - if module.params.get('all') or module.params.get(resource) == 'all': + if module.params.get('all') or module.params.get(resource) == ['all']: # If we are exporting everything or we got the keyword "all" we pass in an empty string for this asset type export_args[resource] = '' else:
{"golden_diff": "diff --git a/awx_collection/plugins/modules/export.py b/awx_collection/plugins/modules/export.py\n--- a/awx_collection/plugins/modules/export.py\n+++ b/awx_collection/plugins/modules/export.py\n@@ -159,7 +159,7 @@\n # Here we are going to setup a dict of values to export\n export_args = {}\n for resource in EXPORTABLE_RESOURCES:\n- if module.params.get('all') or module.params.get(resource) == 'all':\n+ if module.params.get('all') or module.params.get(resource) == ['all']:\n # If we are exporting everything or we got the keyword \"all\" we pass in an empty string for this asset type\n export_args[resource] = ''\n else:\n", "issue": " Bug Report: awx.awx.export all does not work.\n### Please confirm the following\r\n\r\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\r\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\r\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\r\n\r\n### Bug Summary\r\n\r\nusing the all option does not work as noted in the [example](https://github.com/ansible/awx/blob/devel/awx_collection/plugins/modules/export.py#L111)\r\n\r\nIt should export everything from that endpoint\r\n\r\nI tested this with both the recent pip release of awxkit, and one compiled from dev.\r\n\r\nI also tested with the cli directly and it worked using the command\r\n```\r\nawx export --conf.host https://controller --conf.username admin --conf.password 123 --conf.insecure --users --users > users.json\r\n```\r\n\r\n### AWX version\r\n\r\n22.0.0\r\n\r\n### Select the relevant components\r\n\r\n- [ ] UI\r\n- [ ] API\r\n- [ ] Docs\r\n- [X] Collection\r\n- [ ] CLI\r\n- [ ] Other\r\n\r\n### Installation method\r\n\r\nkubernetes\r\n\r\n### Modifications\r\n\r\nno\r\n\r\n### Ansible version\r\n\r\n2.14.4\r\n\r\n### Operating system\r\n\r\nFedora\r\n\r\n### Web browser\r\n\r\n_No response_\r\n\r\n### Steps to reproduce\r\n\r\nRun the following playbook\r\n```\r\n---\r\n- name: Export Workflow\r\n hosts: localhost\r\n connection: local\r\n gather_facts: false\r\n collections:\r\n - ansible.controller\r\n environment:\r\n CONTROLLER_HOST: controller.nas\r\n CONTROLLER_USERNAME: admin\r\n CONTROLLER_PASSWORD: secret123\r\n CONTROLLER_VERIFY_SSL: False\r\n\r\n tasks:\r\n - name: Export all users\r\n awx.awx.export:\r\n users: 'all'\r\n register: export_results\r\n\r\n - name: Export users to file\r\n copy:\r\n content: \"{{ export_results | to_nice_yaml( width=50, explicit_start=True, explicit_end=True) }}\"\r\n dest: usersa.yaml\r\n...\r\n```\r\n### Expected results\r\n```\r\n---\r\nassets:\r\n users:\r\n - username: AWX-Collection-tests-controller_role-user\r\n first_name: Joe\r\n last_name: User\r\n email: [email protected]\r\n is_superuser: false\r\n is_system_auditor: false\r\n password: controller_password\r\n related:\r\n roles:\r\n - name: Execute\r\n type: role\r\n content_object:\r\n organization:\r\n name: Default\r\n type: organization\r\n name: test-role-workflow\r\n type: workflow_job_template\r\n natural_key:\r\n username: AWX-Collection-tests-controller_role-user\r\n type: user\r\n - username: AWX-Collection-tests-controller_role-user-ClPzjxrRTIwcOCvO\r\n first_name: Joe\r\n last_name: User\r\n email: [email protected]\r\n is_superuser: false\r\n is_system_auditor: false\r\n password: controller_password\r\n related:\r\n roles: []\r\n natural_key:\r\n username: AWX-Collection-tests-controller_role-user-ClPzjxrRTIwcOCvO\r\n type: user\r\nchanged: false\r\nfailed: false\r\n...\r\n```\r\n\r\n### Actual results\r\n```\r\n---\r\nassets:\r\n users: []\r\nchanged: false\r\nfailed: false\r\n...\r\n```\r\n### Additional information\r\n\r\nI asked @TheRealHaoLiu to test as well to make sure it was not a local problem. \n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2017, John Westcott IV <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: export\nauthor: \"John Westcott IV (@john-westcott-iv)\"\nversion_added: \"3.7.0\"\nshort_description: export resources from Automation Platform Controller.\ndescription:\n - Export assets from Automation Platform Controller.\noptions:\n all:\n description:\n - Export all assets\n type: bool\n default: 'False'\n organizations:\n description:\n - organization names to export\n type: list\n elements: str\n users:\n description:\n - user names to export\n type: list\n elements: str\n teams:\n description:\n - team names to export\n type: list\n elements: str\n credential_types:\n description:\n - credential type names to export\n type: list\n elements: str\n credentials:\n description:\n - credential names to export\n type: list\n elements: str\n execution_environments:\n description:\n - execution environment names to export\n type: list\n elements: str\n notification_templates:\n description:\n - notification template names to export\n type: list\n elements: str\n inventory_sources:\n description:\n - inventory soruces to export\n type: list\n elements: str\n inventory:\n description:\n - inventory names to export\n type: list\n elements: str\n projects:\n description:\n - project names to export\n type: list\n elements: str\n job_templates:\n description:\n - job template names to export\n type: list\n elements: str\n workflow_job_templates:\n description:\n - workflow names to export\n type: list\n elements: str\n applications:\n description:\n - OAuth2 application names to export\n type: list\n elements: str\n schedules:\n description:\n - schedule names to export\n type: list\n elements: str\nrequirements:\n - \"awxkit >= 9.3.0\"\nnotes:\n - Specifying a name of \"all\" for any asset type will export all items of that asset type.\nextends_documentation_fragment: awx.awx.auth\n'''\n\nEXAMPLES = '''\n- name: Export all assets\n export:\n all: True\n\n- name: Export all inventories\n export:\n inventory: 'all'\n\n- name: Export a job template named \"My Template\" and all Credentials\n export:\n job_templates: \"My Template\"\n credential: 'all'\n\n- name: Export a list of inventories\n export:\n inventory: ['My Inventory 1', 'My Inventory 2']\n'''\n\nimport logging\nfrom ansible.module_utils.six.moves import StringIO\nfrom ..module_utils.awxkit import ControllerAWXKitModule\n\ntry:\n from awxkit.api.pages.api import EXPORTABLE_RESOURCES\n\n HAS_EXPORTABLE_RESOURCES = True\nexcept ImportError:\n HAS_EXPORTABLE_RESOURCES = False\n\n\ndef main():\n argument_spec = dict(\n all=dict(type='bool', default=False),\n )\n\n # We are not going to raise an error here because the __init__ method of ControllerAWXKitModule will do that for us\n if HAS_EXPORTABLE_RESOURCES:\n for resource in EXPORTABLE_RESOURCES:\n argument_spec[resource] = dict(type='list', elements='str')\n\n module = ControllerAWXKitModule(argument_spec=argument_spec)\n\n if not HAS_EXPORTABLE_RESOURCES:\n module.fail_json(msg=\"Your version of awxkit does not have import/export\")\n\n # The export process will never change the AWX system\n module.json_output['changed'] = False\n\n # The exporter code currently works like the following:\n # Empty string == all assets of that type\n # Non-Empty string = just one asset of that type (by name or ID)\n # Asset type not present or None = skip asset type (unless everything is None, then export all)\n # Here we are going to setup a dict of values to export\n export_args = {}\n for resource in EXPORTABLE_RESOURCES:\n if module.params.get('all') or module.params.get(resource) == 'all':\n # If we are exporting everything or we got the keyword \"all\" we pass in an empty string for this asset type\n export_args[resource] = ''\n else:\n # Otherwise we take either the string or None (if the parameter was not passed) to get one or no items\n export_args[resource] = module.params.get(resource)\n\n # Currently the export process does not return anything on error\n # It simply just logs to Python's logger\n # Set up a log gobbler to get error messages from export_assets\n log_capture_string = StringIO()\n ch = logging.StreamHandler(log_capture_string)\n for logger_name in ['awxkit.api.pages.api', 'awxkit.api.pages.page']:\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.ERROR)\n ch.setLevel(logging.ERROR)\n\n logger.addHandler(ch)\n log_contents = ''\n\n # Run the export process\n try:\n module.json_output['assets'] = module.get_api_v2_object().export_assets(**export_args)\n module.exit_json(**module.json_output)\n except Exception as e:\n module.fail_json(msg=\"Failed to export assets {0}\".format(e))\n finally:\n # Finally, consume the logs in case there were any errors and die if there were\n log_contents = log_capture_string.getvalue()\n log_capture_string.close()\n if log_contents != '':\n module.fail_json(msg=log_contents)\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/export.py"}]}
3,130
161
gh_patches_debug_47917
rasdani/github-patches
git_diff
mindsdb__mindsdb-1620
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add option to list tables in MySQL integration :bookmark_tabs: When users create a connection to the MySQL database it will be useful to show them tips with a list of tables. To be able to do this we need a new method `get_tables_list` implemented in the MySQL integration class. ## Steps :male_detective: :female_detective: - Frok MindsDB repo - Add new implementation in https://github.com/mindsdb/mindsdb/blob/stable/mindsdb/integrations/mysql/mysql.py#L51 - Make a PR to staging branch ## Additional rewards :1st_place_medal: Each code PR brings :three: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/ </issue> <code> [start of mindsdb/integrations/mysql/mysql.py] 1 import os 2 import shutil 3 import tempfile 4 5 from contextlib import closing 6 import mysql.connector 7 8 from lightwood.api import dtype 9 from mindsdb.integrations.base import Integration 10 from mindsdb.utilities.log import log 11 12 13 class MySQLConnectionChecker: 14 def __init__(self, **kwargs): 15 self.host = kwargs.get('host') 16 self.port = kwargs.get('port') 17 self.user = kwargs.get('user') 18 self.password = kwargs.get('password') 19 self.ssl = kwargs.get('ssl') 20 self.ssl_ca = kwargs.get('ssl_ca') 21 self.ssl_cert = kwargs.get('ssl_cert') 22 self.ssl_key = kwargs.get('ssl_key') 23 24 def _get_connnection(self): 25 config = { 26 "host": self.host, 27 "port": self.port, 28 "user": self.user, 29 "password": self.password 30 } 31 if self.ssl is True: 32 config['client_flags'] = [mysql.connector.constants.ClientFlag.SSL] 33 if self.ssl_ca is not None: 34 config["ssl_ca"] = self.ssl_ca 35 if self.ssl_cert is not None: 36 config["ssl_cert"] = self.ssl_cert 37 if self.ssl_key is not None: 38 config["ssl_key"] = self.ssl_key 39 return mysql.connector.connect(**config) 40 41 def check_connection(self): 42 try: 43 con = self._get_connnection() 44 with closing(con) as con: 45 connected = con.is_connected() 46 except Exception: 47 connected = False 48 return connected 49 50 51 class MySQL(Integration, MySQLConnectionChecker): 52 def __init__(self, config, name, db_info): 53 super().__init__(config, name) 54 self.user = db_info.get('user') 55 self.password = db_info.get('password') 56 self.host = db_info.get('host') 57 self.port = db_info.get('port') 58 self.ssl = db_info.get('ssl') 59 self.ssl_ca = db_info.get('ssl_ca') 60 self.ssl_cert = db_info.get('ssl_cert') 61 self.ssl_key = db_info.get('ssl_key') 62 63 def _to_mysql_table(self, dtype_dict, predicted_cols, columns): 64 subtype_map = { 65 dtype.integer: 'int', 66 dtype.float: 'double', 67 dtype.binary: 'bool', 68 dtype.date: 'Date', 69 dtype.datetime: 'Datetime', 70 dtype.binary: 'VARCHAR(500)', 71 dtype.categorical: 'VARCHAR(500)', 72 dtype.tags: 'VARCHAR(500)', 73 dtype.image: 'VARCHAR(500)', 74 dtype.video: 'VARCHAR(500)', 75 dtype.audio: 'VARCHAR(500)', 76 dtype.short_text: 'VARCHAR(500)', 77 dtype.rich_text: 'VARCHAR(500)', 78 dtype.array: 'VARCHAR(500)' 79 } 80 81 column_declaration = [] 82 for name in columns: 83 try: 84 col_subtype = dtype_dict[name] 85 new_type = subtype_map[col_subtype] 86 column_declaration.append(f' `{name}` {new_type} ') 87 if name in predicted_cols: 88 column_declaration.append(f' `{name}_original` {new_type} ') 89 except Exception as e: 90 log.error(f'Error: can not determine mysql data type for column {name}: {e}') 91 92 return column_declaration 93 94 def _escape_table_name(self, name): 95 return '`' + name.replace('`', '``') + '`' 96 97 def _query(self, query): 98 con = self._get_connnection() 99 with closing(con) as con: 100 cur = con.cursor(dictionary=True, buffered=True) 101 cur.execute(query) 102 res = True 103 try: 104 res = cur.fetchall() 105 except Exception: 106 pass 107 con.commit() 108 109 return res 110 111 def _get_connect_string(self, table): 112 user = f"{self.config['api']['mysql']['user']}_{self.name}" 113 password = self.config['api']['mysql']['password'] 114 host = self.config['api']['mysql']['host'] 115 port = self.config['api']['mysql']['port'] 116 117 if password is None or password == '': 118 connect = f'mysql://{user}@{host}:{port}/mindsdb/{table}' 119 else: 120 connect = f'mysql://{user}:{password}@{host}:{port}/mindsdb/{table}' 121 122 return connect 123 124 def setup(self): 125 self._query(f'DROP DATABASE IF EXISTS {self.mindsdb_database}') 126 self._query(f'CREATE DATABASE IF NOT EXISTS {self.mindsdb_database}') 127 128 connect = self._get_connect_string('predictors') 129 130 q = f""" 131 CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.predictors ( 132 name VARCHAR(500), 133 status VARCHAR(500), 134 accuracy VARCHAR(500), 135 predict VARCHAR(500), 136 select_data_query VARCHAR(500), 137 external_datasource VARCHAR(500), 138 training_options VARCHAR(500), 139 key name_key (name) 140 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}'; 141 """ 142 self._query(q) 143 144 connect = self._get_connect_string('commands') 145 146 q = f""" 147 CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.commands ( 148 command VARCHAR(500), 149 key command_key (command) 150 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}'; 151 """ 152 self._query(q) 153 154 def register_predictors(self, model_data_arr): 155 for model_meta in model_data_arr: 156 name = model_meta['name'] 157 predict = model_meta['predict'] 158 if not isinstance(predict, list): 159 predict = [predict] 160 columns_sql = ','.join(self._to_mysql_table( 161 model_meta['dtype_dict'], 162 predict, 163 list(model_meta['dtype_dict'].keys()) 164 )) 165 columns_sql += ',`when_data` varchar(500)' 166 columns_sql += ',`select_data_query` varchar(500)' 167 columns_sql += ',`external_datasource` varchar(500)' 168 for col in predict: 169 columns_sql += f',`{col}_confidence` double' 170 if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float): 171 columns_sql += f',`{col}_min` double' 172 columns_sql += f',`{col}_max` double' 173 columns_sql += f',`{col}_explain` varchar(500)' 174 175 connect = self._get_connect_string(name) 176 177 self.unregister_predictor(name) 178 q = f""" 179 CREATE TABLE {self.mindsdb_database}.{self._escape_table_name(name)} ( 180 {columns_sql}, 181 index when_data_index (when_data), 182 index select_data_query_index (select_data_query), 183 index external_datasource_index (external_datasource) 184 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}'; 185 """ 186 self._query(q) 187 188 def unregister_predictor(self, name): 189 q = f""" 190 drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)}; 191 """ 192 self._query(q) 193 194 def get_row_count(self, query): 195 q = f""" 196 SELECT COUNT(*) as count 197 FROM ({query}) as query;""" 198 result = self._query(q) 199 return result[0]['count'] 200 [end of mindsdb/integrations/mysql/mysql.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mindsdb/integrations/mysql/mysql.py b/mindsdb/integrations/mysql/mysql.py --- a/mindsdb/integrations/mysql/mysql.py +++ b/mindsdb/integrations/mysql/mysql.py @@ -197,3 +197,10 @@ FROM ({query}) as query;""" result = self._query(q) return result[0]['count'] + + def get_tables_list(self): + q= f""" + SHOW TABLES; + """ + result = self._query(q) + return result
{"golden_diff": "diff --git a/mindsdb/integrations/mysql/mysql.py b/mindsdb/integrations/mysql/mysql.py\n--- a/mindsdb/integrations/mysql/mysql.py\n+++ b/mindsdb/integrations/mysql/mysql.py\n@@ -197,3 +197,10 @@\n FROM ({query}) as query;\"\"\"\n result = self._query(q)\n return result[0]['count']\n+ \n+ def get_tables_list(self):\n+ q= f\"\"\"\n+ SHOW TABLES;\n+ \"\"\"\n+ result = self._query(q)\n+ return result\n", "issue": "Add option to list tables in MySQL integration :bookmark_tabs: \nWhen users create a connection to the MySQL database it will be useful to show them tips with a list of tables. To be able to do this we need a new method `get_tables_list` implemented in the MySQL integration class.\r\n\r\n## Steps :male_detective: :female_detective: \r\n\r\n- Frok MindsDB repo\r\n- Add new implementation in https://github.com/mindsdb/mindsdb/blob/stable/mindsdb/integrations/mysql/mysql.py#L51\r\n- Make a PR to staging branch\r\n\r\n## Additional rewards :1st_place_medal: \r\n\r\nEach code PR brings :three: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/\n", "before_files": [{"content": "import os\nimport shutil\nimport tempfile\n\nfrom contextlib import closing\nimport mysql.connector\n\nfrom lightwood.api import dtype\nfrom mindsdb.integrations.base import Integration\nfrom mindsdb.utilities.log import log\n\n\nclass MySQLConnectionChecker:\n def __init__(self, **kwargs):\n self.host = kwargs.get('host')\n self.port = kwargs.get('port')\n self.user = kwargs.get('user')\n self.password = kwargs.get('password')\n self.ssl = kwargs.get('ssl')\n self.ssl_ca = kwargs.get('ssl_ca')\n self.ssl_cert = kwargs.get('ssl_cert')\n self.ssl_key = kwargs.get('ssl_key')\n\n def _get_connnection(self):\n config = {\n \"host\": self.host,\n \"port\": self.port,\n \"user\": self.user,\n \"password\": self.password\n }\n if self.ssl is True:\n config['client_flags'] = [mysql.connector.constants.ClientFlag.SSL]\n if self.ssl_ca is not None:\n config[\"ssl_ca\"] = self.ssl_ca\n if self.ssl_cert is not None:\n config[\"ssl_cert\"] = self.ssl_cert\n if self.ssl_key is not None:\n config[\"ssl_key\"] = self.ssl_key\n return mysql.connector.connect(**config)\n\n def check_connection(self):\n try:\n con = self._get_connnection()\n with closing(con) as con:\n connected = con.is_connected()\n except Exception:\n connected = False\n return connected\n\n\nclass MySQL(Integration, MySQLConnectionChecker):\n def __init__(self, config, name, db_info):\n super().__init__(config, name)\n self.user = db_info.get('user')\n self.password = db_info.get('password')\n self.host = db_info.get('host')\n self.port = db_info.get('port')\n self.ssl = db_info.get('ssl')\n self.ssl_ca = db_info.get('ssl_ca')\n self.ssl_cert = db_info.get('ssl_cert')\n self.ssl_key = db_info.get('ssl_key')\n\n def _to_mysql_table(self, dtype_dict, predicted_cols, columns):\n subtype_map = {\n dtype.integer: 'int',\n dtype.float: 'double',\n dtype.binary: 'bool',\n dtype.date: 'Date',\n dtype.datetime: 'Datetime',\n dtype.binary: 'VARCHAR(500)',\n dtype.categorical: 'VARCHAR(500)',\n dtype.tags: 'VARCHAR(500)',\n dtype.image: 'VARCHAR(500)',\n dtype.video: 'VARCHAR(500)',\n dtype.audio: 'VARCHAR(500)',\n dtype.short_text: 'VARCHAR(500)',\n dtype.rich_text: 'VARCHAR(500)',\n dtype.array: 'VARCHAR(500)'\n }\n\n column_declaration = []\n for name in columns:\n try:\n col_subtype = dtype_dict[name]\n new_type = subtype_map[col_subtype]\n column_declaration.append(f' `{name}` {new_type} ')\n if name in predicted_cols:\n column_declaration.append(f' `{name}_original` {new_type} ')\n except Exception as e:\n log.error(f'Error: can not determine mysql data type for column {name}: {e}')\n\n return column_declaration\n\n def _escape_table_name(self, name):\n return '`' + name.replace('`', '``') + '`'\n\n def _query(self, query):\n con = self._get_connnection()\n with closing(con) as con:\n cur = con.cursor(dictionary=True, buffered=True)\n cur.execute(query)\n res = True\n try:\n res = cur.fetchall()\n except Exception:\n pass\n con.commit()\n\n return res\n\n def _get_connect_string(self, table):\n user = f\"{self.config['api']['mysql']['user']}_{self.name}\"\n password = self.config['api']['mysql']['password']\n host = self.config['api']['mysql']['host']\n port = self.config['api']['mysql']['port']\n\n if password is None or password == '':\n connect = f'mysql://{user}@{host}:{port}/mindsdb/{table}'\n else:\n connect = f'mysql://{user}:{password}@{host}:{port}/mindsdb/{table}'\n\n return connect\n\n def setup(self):\n self._query(f'DROP DATABASE IF EXISTS {self.mindsdb_database}')\n self._query(f'CREATE DATABASE IF NOT EXISTS {self.mindsdb_database}')\n\n connect = self._get_connect_string('predictors')\n\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (\n name VARCHAR(500),\n status VARCHAR(500),\n accuracy VARCHAR(500),\n predict VARCHAR(500),\n select_data_query VARCHAR(500),\n external_datasource VARCHAR(500),\n training_options VARCHAR(500),\n key name_key (name)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n connect = self._get_connect_string('commands')\n\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.commands (\n command VARCHAR(500),\n key command_key (command)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n def register_predictors(self, model_data_arr):\n for model_meta in model_data_arr:\n name = model_meta['name']\n predict = model_meta['predict']\n if not isinstance(predict, list):\n predict = [predict]\n columns_sql = ','.join(self._to_mysql_table(\n model_meta['dtype_dict'],\n predict,\n list(model_meta['dtype_dict'].keys())\n ))\n columns_sql += ',`when_data` varchar(500)'\n columns_sql += ',`select_data_query` varchar(500)'\n columns_sql += ',`external_datasource` varchar(500)'\n for col in predict:\n columns_sql += f',`{col}_confidence` double'\n if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):\n columns_sql += f',`{col}_min` double'\n columns_sql += f',`{col}_max` double'\n columns_sql += f',`{col}_explain` varchar(500)'\n\n connect = self._get_connect_string(name)\n\n self.unregister_predictor(name)\n q = f\"\"\"\n CREATE TABLE {self.mindsdb_database}.{self._escape_table_name(name)} (\n {columns_sql},\n index when_data_index (when_data),\n index select_data_query_index (select_data_query),\n index external_datasource_index (external_datasource)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n def unregister_predictor(self, name):\n q = f\"\"\"\n drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};\n \"\"\"\n self._query(q)\n\n def get_row_count(self, query):\n q = f\"\"\" \n SELECT COUNT(*) as count\n FROM ({query}) as query;\"\"\"\n result = self._query(q)\n return result[0]['count']\n", "path": "mindsdb/integrations/mysql/mysql.py"}]}
2,801
127
gh_patches_debug_4148
rasdani/github-patches
git_diff
psf__black-4128
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Changes to files happing quickly after running `black` are not detected **Describe the bug** It seems `black` does not detect that a file has been changed and needs to be re-checked, if the change happens quickly after `black` has been run on the file. I'm talking about this feature: [Ignoring unmodified files](https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#ignoring-unmodified-files) **To Reproduce** I'm using the following shell script to reproduce the issue: ```bash #! /usr/bin/env bash set -eux echo 'print (1)' > file.py black file.py echo 'print ()' > file.py black file.py cat file.py ``` The script writes overwrites the file and then runs `black`, doing this twice in a row. In the second run, `black` should reformat `print ()` to `print()`, but this does not happen: ``` $ ./bug.sh + echo 'print (1)' + black file.py reformatted file.py All done! ✨ 🍰 ✨ 1 file reformatted. + echo 'print ()' + black file.py All done! ✨ 🍰 ✨ 1 file left unchanged. + cat file.py print () ``` Even running it manually after a few seconds does not fix the issue, but removing the cache directory does: ``` $ black file.py All done! ✨ 🍰 ✨ 1 file left unchanged. $ cat file.py print () $ rm -r ~/Library/Caches/black/ $ black file.py reformatted file.py All done! ✨ 🍰 ✨ 1 file reformatted. $ cat file.py print() ``` **Expected behavior** I think black should not get confused by changes to files that happen quickly after it has formatted a file. The file should be checked again if it is possible that its content has changed without also changing its timestamp. **Environment** - Black's version: ``` $ black --version black, 23.12.0 (compiled: yes) Python (CPython) 3.11.6 ``` - OS and Python version: macOS 12.7.1, Python 3.11.6 installed via Homebrew. </issue> <code> [start of src/black/cache.py] 1 """Caching of formatted files with feature-based invalidation.""" 2 3 import hashlib 4 import os 5 import pickle 6 import sys 7 import tempfile 8 from dataclasses import dataclass, field 9 from pathlib import Path 10 from typing import Dict, Iterable, NamedTuple, Set, Tuple 11 12 from platformdirs import user_cache_dir 13 14 from _black_version import version as __version__ 15 from black.mode import Mode 16 17 if sys.version_info >= (3, 11): 18 from typing import Self 19 else: 20 from typing_extensions import Self 21 22 23 class FileData(NamedTuple): 24 st_mtime: float 25 st_size: int 26 hash: str 27 28 29 def get_cache_dir() -> Path: 30 """Get the cache directory used by black. 31 32 Users can customize this directory on all systems using `BLACK_CACHE_DIR` 33 environment variable. By default, the cache directory is the user cache directory 34 under the black application. 35 36 This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid 37 repeated calls. 38 """ 39 # NOTE: Function mostly exists as a clean way to test getting the cache directory. 40 default_cache_dir = user_cache_dir("black") 41 cache_dir = Path(os.environ.get("BLACK_CACHE_DIR", default_cache_dir)) 42 cache_dir = cache_dir / __version__ 43 return cache_dir 44 45 46 CACHE_DIR = get_cache_dir() 47 48 49 def get_cache_file(mode: Mode) -> Path: 50 return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle" 51 52 53 @dataclass 54 class Cache: 55 mode: Mode 56 cache_file: Path 57 file_data: Dict[str, FileData] = field(default_factory=dict) 58 59 @classmethod 60 def read(cls, mode: Mode) -> Self: 61 """Read the cache if it exists and is well formed. 62 63 If it is not well formed, the call to write later should 64 resolve the issue. 65 """ 66 cache_file = get_cache_file(mode) 67 if not cache_file.exists(): 68 return cls(mode, cache_file) 69 70 with cache_file.open("rb") as fobj: 71 try: 72 data: Dict[str, Tuple[float, int, str]] = pickle.load(fobj) 73 file_data = {k: FileData(*v) for k, v in data.items()} 74 except (pickle.UnpicklingError, ValueError, IndexError): 75 return cls(mode, cache_file) 76 77 return cls(mode, cache_file, file_data) 78 79 @staticmethod 80 def hash_digest(path: Path) -> str: 81 """Return hash digest for path.""" 82 83 data = path.read_bytes() 84 return hashlib.sha256(data).hexdigest() 85 86 @staticmethod 87 def get_file_data(path: Path) -> FileData: 88 """Return file data for path.""" 89 90 stat = path.stat() 91 hash = Cache.hash_digest(path) 92 return FileData(stat.st_mtime, stat.st_size, hash) 93 94 def is_changed(self, source: Path) -> bool: 95 """Check if source has changed compared to cached version.""" 96 res_src = source.resolve() 97 old = self.file_data.get(str(res_src)) 98 if old is None: 99 return True 100 101 st = res_src.stat() 102 if st.st_size != old.st_size: 103 return True 104 if int(st.st_mtime) != int(old.st_mtime): 105 new_hash = Cache.hash_digest(res_src) 106 if new_hash != old.hash: 107 return True 108 return False 109 110 def filtered_cached(self, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]: 111 """Split an iterable of paths in `sources` into two sets. 112 113 The first contains paths of files that modified on disk or are not in the 114 cache. The other contains paths to non-modified files. 115 """ 116 changed: Set[Path] = set() 117 done: Set[Path] = set() 118 for src in sources: 119 if self.is_changed(src): 120 changed.add(src) 121 else: 122 done.add(src) 123 return changed, done 124 125 def write(self, sources: Iterable[Path]) -> None: 126 """Update the cache file data and write a new cache file.""" 127 self.file_data.update( 128 **{str(src.resolve()): Cache.get_file_data(src) for src in sources} 129 ) 130 try: 131 CACHE_DIR.mkdir(parents=True, exist_ok=True) 132 with tempfile.NamedTemporaryFile( 133 dir=str(self.cache_file.parent), delete=False 134 ) as f: 135 # We store raw tuples in the cache because pickling NamedTuples 136 # doesn't work with mypyc on Python 3.8, and because it's faster. 137 data: Dict[str, Tuple[float, int, str]] = { 138 k: (*v,) for k, v in self.file_data.items() 139 } 140 pickle.dump(data, f, protocol=4) 141 os.replace(f.name, self.cache_file) 142 except OSError: 143 pass 144 [end of src/black/cache.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/black/cache.py b/src/black/cache.py --- a/src/black/cache.py +++ b/src/black/cache.py @@ -101,7 +101,7 @@ st = res_src.stat() if st.st_size != old.st_size: return True - if int(st.st_mtime) != int(old.st_mtime): + if st.st_mtime != old.st_mtime: new_hash = Cache.hash_digest(res_src) if new_hash != old.hash: return True
{"golden_diff": "diff --git a/src/black/cache.py b/src/black/cache.py\n--- a/src/black/cache.py\n+++ b/src/black/cache.py\n@@ -101,7 +101,7 @@\n st = res_src.stat()\n if st.st_size != old.st_size:\n return True\n- if int(st.st_mtime) != int(old.st_mtime):\n+ if st.st_mtime != old.st_mtime:\n new_hash = Cache.hash_digest(res_src)\n if new_hash != old.hash:\n return True\n", "issue": "Changes to files happing quickly after running `black` are not detected\n**Describe the bug**\r\n\r\nIt seems `black` does not detect that a file has been changed and needs to be re-checked, if the change happens quickly after `black` has been run on the file.\r\n\r\nI'm talking about this feature: [Ignoring unmodified files](https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#ignoring-unmodified-files)\r\n\r\n**To Reproduce**\r\n\r\nI'm using the following shell script to reproduce the issue:\r\n\r\n```bash\r\n#! /usr/bin/env bash\r\n\r\nset -eux\r\n\r\necho 'print (1)' > file.py\r\nblack file.py\r\necho 'print ()' > file.py\r\nblack file.py\r\ncat file.py\r\n```\r\n\r\nThe script writes overwrites the file and then runs `black`, doing this twice in a row. In the second run, `black` should reformat `print ()` to `print()`, but this does not happen:\r\n\r\n```\r\n$ ./bug.sh \r\n+ echo 'print (1)'\r\n+ black file.py\r\nreformatted file.py\r\n\r\nAll done! \u2728 \ud83c\udf70 \u2728\r\n1 file reformatted.\r\n+ echo 'print ()'\r\n+ black file.py\r\nAll done! \u2728 \ud83c\udf70 \u2728\r\n1 file left unchanged.\r\n+ cat file.py\r\nprint ()\r\n```\r\n\r\nEven running it manually after a few seconds does not fix the issue, but removing the cache directory does:\r\n\r\n```\r\n$ black file.py\r\nAll done! \u2728 \ud83c\udf70 \u2728\r\n1 file left unchanged.\r\n$ cat file.py\r\nprint ()\r\n$ rm -r ~/Library/Caches/black/\r\n$ black file.py\r\nreformatted file.py\r\n\r\nAll done! \u2728 \ud83c\udf70 \u2728\r\n1 file reformatted.\r\n$ cat file.py\r\nprint()\r\n```\r\n\r\n**Expected behavior**\r\n\r\nI think black should not get confused by changes to files that happen quickly after it has formatted a file. The file should be checked again if it is possible that its content has changed without also changing its timestamp.\r\n\r\n**Environment**\r\n\r\n- Black's version:\r\n\r\n ```\r\n $ black --version\r\n black, 23.12.0 (compiled: yes)\r\n Python (CPython) 3.11.6\r\n ```\r\n\r\n- OS and Python version: macOS 12.7.1, Python 3.11.6 installed via Homebrew.\n", "before_files": [{"content": "\"\"\"Caching of formatted files with feature-based invalidation.\"\"\"\n\nimport hashlib\nimport os\nimport pickle\nimport sys\nimport tempfile\nfrom dataclasses import dataclass, field\nfrom pathlib import Path\nfrom typing import Dict, Iterable, NamedTuple, Set, Tuple\n\nfrom platformdirs import user_cache_dir\n\nfrom _black_version import version as __version__\nfrom black.mode import Mode\n\nif sys.version_info >= (3, 11):\n from typing import Self\nelse:\n from typing_extensions import Self\n\n\nclass FileData(NamedTuple):\n st_mtime: float\n st_size: int\n hash: str\n\n\ndef get_cache_dir() -> Path:\n \"\"\"Get the cache directory used by black.\n\n Users can customize this directory on all systems using `BLACK_CACHE_DIR`\n environment variable. By default, the cache directory is the user cache directory\n under the black application.\n\n This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid\n repeated calls.\n \"\"\"\n # NOTE: Function mostly exists as a clean way to test getting the cache directory.\n default_cache_dir = user_cache_dir(\"black\")\n cache_dir = Path(os.environ.get(\"BLACK_CACHE_DIR\", default_cache_dir))\n cache_dir = cache_dir / __version__\n return cache_dir\n\n\nCACHE_DIR = get_cache_dir()\n\n\ndef get_cache_file(mode: Mode) -> Path:\n return CACHE_DIR / f\"cache.{mode.get_cache_key()}.pickle\"\n\n\n@dataclass\nclass Cache:\n mode: Mode\n cache_file: Path\n file_data: Dict[str, FileData] = field(default_factory=dict)\n\n @classmethod\n def read(cls, mode: Mode) -> Self:\n \"\"\"Read the cache if it exists and is well formed.\n\n If it is not well formed, the call to write later should\n resolve the issue.\n \"\"\"\n cache_file = get_cache_file(mode)\n if not cache_file.exists():\n return cls(mode, cache_file)\n\n with cache_file.open(\"rb\") as fobj:\n try:\n data: Dict[str, Tuple[float, int, str]] = pickle.load(fobj)\n file_data = {k: FileData(*v) for k, v in data.items()}\n except (pickle.UnpicklingError, ValueError, IndexError):\n return cls(mode, cache_file)\n\n return cls(mode, cache_file, file_data)\n\n @staticmethod\n def hash_digest(path: Path) -> str:\n \"\"\"Return hash digest for path.\"\"\"\n\n data = path.read_bytes()\n return hashlib.sha256(data).hexdigest()\n\n @staticmethod\n def get_file_data(path: Path) -> FileData:\n \"\"\"Return file data for path.\"\"\"\n\n stat = path.stat()\n hash = Cache.hash_digest(path)\n return FileData(stat.st_mtime, stat.st_size, hash)\n\n def is_changed(self, source: Path) -> bool:\n \"\"\"Check if source has changed compared to cached version.\"\"\"\n res_src = source.resolve()\n old = self.file_data.get(str(res_src))\n if old is None:\n return True\n\n st = res_src.stat()\n if st.st_size != old.st_size:\n return True\n if int(st.st_mtime) != int(old.st_mtime):\n new_hash = Cache.hash_digest(res_src)\n if new_hash != old.hash:\n return True\n return False\n\n def filtered_cached(self, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:\n \"\"\"Split an iterable of paths in `sources` into two sets.\n\n The first contains paths of files that modified on disk or are not in the\n cache. The other contains paths to non-modified files.\n \"\"\"\n changed: Set[Path] = set()\n done: Set[Path] = set()\n for src in sources:\n if self.is_changed(src):\n changed.add(src)\n else:\n done.add(src)\n return changed, done\n\n def write(self, sources: Iterable[Path]) -> None:\n \"\"\"Update the cache file data and write a new cache file.\"\"\"\n self.file_data.update(\n **{str(src.resolve()): Cache.get_file_data(src) for src in sources}\n )\n try:\n CACHE_DIR.mkdir(parents=True, exist_ok=True)\n with tempfile.NamedTemporaryFile(\n dir=str(self.cache_file.parent), delete=False\n ) as f:\n # We store raw tuples in the cache because pickling NamedTuples\n # doesn't work with mypyc on Python 3.8, and because it's faster.\n data: Dict[str, Tuple[float, int, str]] = {\n k: (*v,) for k, v in self.file_data.items()\n }\n pickle.dump(data, f, protocol=4)\n os.replace(f.name, self.cache_file)\n except OSError:\n pass\n", "path": "src/black/cache.py"}]}
2,413
110
gh_patches_debug_2180
rasdani/github-patches
git_diff
GPflow__GPflow-2052
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Missing Reference to Manipulating Kernels Page # Documentation/tutorial notebooks In the [Kernel Design page](https://gpflow.github.io/GPflow/2.7.0/notebooks/tailor/kernel_design.html), there is a missing reference to the "Manipulating Kernels" notebook at the end. This notebook seems accessible from the old documentations, up to [2.6.4](https://gpflow.github.io/GPflow/2.6.4/notebooks/advanced/kernels.html). It seems for some reason, this page was removed. Maybe it was considered unnecessary, as some information is given in the getting started page but I disagree. I believe it gives a more comprehensive review of the available kernel implementations, so it would be nice to have it back. As a side note, for some reason 2.6.4 documentation insist on having dark theme for me, but I like the light theme better. Is there an option to change this? I am forced to clean the cookies to get a light background. </issue> <code> [start of doc/sphinx/notebooks/tailor/kernel_design.pct.py] 1 # --- 2 # jupyter: 3 # jupytext: 4 # formats: ipynb,.pct.py:percent 5 # text_representation: 6 # extension: .py 7 # format_name: percent 8 # format_version: '1.3' 9 # jupytext_version: 1.3.3 10 # kernelspec: 11 # display_name: Python 3 12 # language: python 13 # name: python3 14 # --- 15 16 # %% [markdown] 17 # # Kernel Design 18 # 19 # It's easy to make new kernels in GPflow. To demonstrate, we'll have a look at the Brownian motion kernel, whose function is 20 # \begin{equation} 21 # k(x, x') = \sigma^2 \text{min}(x, x') 22 # \end{equation} 23 # where $\sigma^2$ is a variance parameter. 24 25 # %% 26 import matplotlib.pyplot as plt 27 import numpy as np 28 import tensorflow as tf 29 30 import gpflow 31 from gpflow.utilities import positive, print_summary 32 33 plt.style.use("ggplot") 34 # %matplotlib inline 35 36 # %% [markdown] 37 # To make this new kernel class, we inherit from the base class `gpflow.kernels.Kernel` and implement the three functions below. **NOTE:** Depending on the kernel to be implemented, other classes can be more adequate. For example, if the kernel to be implemented is isotropic stationary, you can immediately subclass `gpflow.kernels.IsotropicStationary` (at which point you 38 # only have to override `K_r` or `K_r2`; see the `IsotropicStationary` class docstring). Stationary but anisotropic kernels should subclass `gpflow.kernels.AnisotropicStationary` and override `K_d`. 39 # 40 # #### `__init__` 41 # In this simple example, the constructor takes no argument (though it could, if that was convenient, for example to pass in an initial value for `variance`). It *must* call the constructor of the superclass with appropriate arguments. Brownian motion is only defined in one dimension, and we'll assume that the `active_dims` are `[0]`, for simplicity. 42 # 43 # We've added a parameter to the kernel using the `Parameter` class. Using this class lets the parameter be used in computing the kernel function, and it will automatically be recognised for optimization (or MCMC). Here, the variance parameter is initialized at 1, and constrained to be positive. 44 # 45 # #### `K` 46 # This is where you implement the kernel function itself. This takes two arguments, `X` and `X2`. By convention, we make the second argument optional (it defaults to `None`). 47 # 48 # Inside `K`, all the computation must be done with TensorFlow - here we've used `tf.minimum`. When GPflow executes the `K` function, `X` and `X2` will be TensorFlow tensors, and parameters such as `self.variance` behave like TensorFlow tensors as well. 49 # 50 # #### `K_diag` 51 # This convenience function allows GPflow to save memory at predict time. It's simply the diagonal of the `K` function, in the case where `X2` is `None`. It must return a one-dimensional vector, so we use TensorFlow's reshape command. 52 53 # %% 54 class Brownian(gpflow.kernels.Kernel): 55 def __init__(self): 56 super().__init__(active_dims=[0]) 57 self.variance = gpflow.Parameter(1.0, transform=positive()) 58 59 def K(self, X, X2=None): 60 if X2 is None: 61 X2 = X 62 return self.variance * tf.minimum( 63 X, tf.transpose(X2) 64 ) # this returns a 2D tensor 65 66 def K_diag(self, X): 67 return self.variance * tf.reshape(X, (-1,)) # this returns a 1D tensor 68 69 70 k_brownian = Brownian() 71 print_summary(k_brownian, fmt="notebook") 72 73 # %% [markdown] 74 # We can now evaluate our new kernel function and draw samples from a Gaussian process with this covariance: 75 76 # %% 77 np.random.seed(23) # for reproducibility 78 79 80 def plotkernelsample(k, ax, xmin=0, xmax=3): 81 xx = np.linspace(xmin, xmax, 300)[:, None] 82 K = k(xx) 83 ax.plot(xx, np.random.multivariate_normal(np.zeros(300), K, 5).T) 84 ax.set_title("Samples " + k.__class__.__name__) 85 86 87 def plotkernelfunction(k, ax, xmin=0, xmax=3, other=0): 88 xx = np.linspace(xmin, xmax, 100)[:, None] 89 ax.plot(xx, k(xx, np.zeros((1, 1)) + other)) 90 ax.set_title(k.__class__.__name__ + " k(x, %.1f)" % other) 91 92 93 f, axes = plt.subplots(1, 2, figsize=(12, 4), sharex=True) 94 plotkernelfunction(k_brownian, axes[0], other=2.0) 95 plotkernelsample(k_brownian, axes[1]) 96 97 # %% [markdown] 98 # ## Using the kernel in a model 99 # 100 # Because we've inherited from the `Kernel` base class, this new kernel has all the properties needed to be used in GPflow. It also has some convenience features such as allowing the user to call 101 # 102 # `k(X, X2)` 103 # 104 # which computes the kernel matrix. 105 # 106 # To show that this kernel works, let's use it inside GP regression. We'll see that Brownian motion has quite interesting properties. To add a little flexibility, we'll add a `Constant` kernel to our `Brownian` kernel, and the `GPR` class will handle the noise. 107 108 # %% 109 np.random.seed(42) 110 X = np.random.rand(5, 1) 111 Y = np.sin(X * 6) + np.random.randn(*X.shape) * 0.001 112 113 k1 = Brownian() 114 k2 = gpflow.kernels.Constant() 115 k = k1 + k2 116 117 m = gpflow.models.GPR((X, Y), kernel=k) 118 # m.likelihood.variance.assign(1e-6) 119 120 opt = gpflow.optimizers.Scipy() 121 opt.minimize(m.training_loss, variables=m.trainable_variables) 122 print_summary(m, fmt="notebook") 123 124 xx = np.linspace(0, 1.1, 100).reshape(100, 1) 125 mean, var = m.predict_y(xx) 126 plt.plot(X, Y, "kx", mew=2) 127 (line,) = plt.plot(xx, mean, lw=2) 128 _ = plt.fill_between( 129 xx[:, 0], 130 mean[:, 0] - 2 * np.sqrt(var[:, 0]), 131 mean[:, 0] + 2 * np.sqrt(var[:, 0]), 132 color=line.get_color(), 133 alpha=0.2, 134 ) 135 136 # %% [markdown] 137 # ## See also 138 # 139 # For more details on how to manipulate existing kernels (or the one you just created!), we refer to the [Manipulating kernels](../advanced/kernels.ipynb) notebook. 140 [end of doc/sphinx/notebooks/tailor/kernel_design.pct.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/doc/sphinx/notebooks/tailor/kernel_design.pct.py b/doc/sphinx/notebooks/tailor/kernel_design.pct.py --- a/doc/sphinx/notebooks/tailor/kernel_design.pct.py +++ b/doc/sphinx/notebooks/tailor/kernel_design.pct.py @@ -136,4 +136,4 @@ # %% [markdown] # ## See also # -# For more details on how to manipulate existing kernels (or the one you just created!), we refer to the [Manipulating kernels](../advanced/kernels.ipynb) notebook. +# For more details on how to manipulate existing kernels (or the one you just created!), please refer to the [kernels](../getting_started/kernels.ipynb) notebook.
{"golden_diff": "diff --git a/doc/sphinx/notebooks/tailor/kernel_design.pct.py b/doc/sphinx/notebooks/tailor/kernel_design.pct.py\n--- a/doc/sphinx/notebooks/tailor/kernel_design.pct.py\n+++ b/doc/sphinx/notebooks/tailor/kernel_design.pct.py\n@@ -136,4 +136,4 @@\n # %% [markdown]\n # ## See also\n #\n-# For more details on how to manipulate existing kernels (or the one you just created!), we refer to the [Manipulating kernels](../advanced/kernels.ipynb) notebook.\n+# For more details on how to manipulate existing kernels (or the one you just created!), please refer to the [kernels](../getting_started/kernels.ipynb) notebook.\n", "issue": "Missing Reference to Manipulating Kernels Page\n# Documentation/tutorial notebooks\r\n\r\nIn the [Kernel Design page](https://gpflow.github.io/GPflow/2.7.0/notebooks/tailor/kernel_design.html), there is a missing reference to the \"Manipulating Kernels\" notebook at the end. This notebook seems accessible from the old documentations, up to [2.6.4](https://gpflow.github.io/GPflow/2.6.4/notebooks/advanced/kernels.html).\r\n\r\nIt seems for some reason, this page was removed. Maybe it was considered unnecessary, as some information is given in the getting started page but I disagree. I believe it gives a more comprehensive review of the available kernel implementations, so it would be nice to have it back.\r\n\r\nAs a side note, for some reason 2.6.4 documentation insist on having dark theme for me, but I like the light theme better. Is there an option to change this? I am forced to clean the cookies to get a light background.\n", "before_files": [{"content": "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,.pct.py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.3.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # Kernel Design\n#\n# It's easy to make new kernels in GPflow. To demonstrate, we'll have a look at the Brownian motion kernel, whose function is\n# \\begin{equation}\n# k(x, x') = \\sigma^2 \\text{min}(x, x')\n# \\end{equation}\n# where $\\sigma^2$ is a variance parameter.\n\n# %%\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nimport gpflow\nfrom gpflow.utilities import positive, print_summary\n\nplt.style.use(\"ggplot\")\n# %matplotlib inline\n\n# %% [markdown]\n# To make this new kernel class, we inherit from the base class `gpflow.kernels.Kernel` and implement the three functions below. **NOTE:** Depending on the kernel to be implemented, other classes can be more adequate. For example, if the kernel to be implemented is isotropic stationary, you can immediately subclass `gpflow.kernels.IsotropicStationary` (at which point you\n# only have to override `K_r` or `K_r2`; see the `IsotropicStationary` class docstring). Stationary but anisotropic kernels should subclass `gpflow.kernels.AnisotropicStationary` and override `K_d`.\n#\n# #### `__init__`\n# In this simple example, the constructor takes no argument (though it could, if that was convenient, for example to pass in an initial value for `variance`). It *must* call the constructor of the superclass with appropriate arguments. Brownian motion is only defined in one dimension, and we'll assume that the `active_dims` are `[0]`, for simplicity.\n#\n# We've added a parameter to the kernel using the `Parameter` class. Using this class lets the parameter be used in computing the kernel function, and it will automatically be recognised for optimization (or MCMC). Here, the variance parameter is initialized at 1, and constrained to be positive.\n#\n# #### `K`\n# This is where you implement the kernel function itself. This takes two arguments, `X` and `X2`. By convention, we make the second argument optional (it defaults to `None`).\n#\n# Inside `K`, all the computation must be done with TensorFlow - here we've used `tf.minimum`. When GPflow executes the `K` function, `X` and `X2` will be TensorFlow tensors, and parameters such as `self.variance` behave like TensorFlow tensors as well.\n#\n# #### `K_diag`\n# This convenience function allows GPflow to save memory at predict time. It's simply the diagonal of the `K` function, in the case where `X2` is `None`. It must return a one-dimensional vector, so we use TensorFlow's reshape command.\n\n# %%\nclass Brownian(gpflow.kernels.Kernel):\n def __init__(self):\n super().__init__(active_dims=[0])\n self.variance = gpflow.Parameter(1.0, transform=positive())\n\n def K(self, X, X2=None):\n if X2 is None:\n X2 = X\n return self.variance * tf.minimum(\n X, tf.transpose(X2)\n ) # this returns a 2D tensor\n\n def K_diag(self, X):\n return self.variance * tf.reshape(X, (-1,)) # this returns a 1D tensor\n\n\nk_brownian = Brownian()\nprint_summary(k_brownian, fmt=\"notebook\")\n\n# %% [markdown]\n# We can now evaluate our new kernel function and draw samples from a Gaussian process with this covariance:\n\n# %%\nnp.random.seed(23) # for reproducibility\n\n\ndef plotkernelsample(k, ax, xmin=0, xmax=3):\n xx = np.linspace(xmin, xmax, 300)[:, None]\n K = k(xx)\n ax.plot(xx, np.random.multivariate_normal(np.zeros(300), K, 5).T)\n ax.set_title(\"Samples \" + k.__class__.__name__)\n\n\ndef plotkernelfunction(k, ax, xmin=0, xmax=3, other=0):\n xx = np.linspace(xmin, xmax, 100)[:, None]\n ax.plot(xx, k(xx, np.zeros((1, 1)) + other))\n ax.set_title(k.__class__.__name__ + \" k(x, %.1f)\" % other)\n\n\nf, axes = plt.subplots(1, 2, figsize=(12, 4), sharex=True)\nplotkernelfunction(k_brownian, axes[0], other=2.0)\nplotkernelsample(k_brownian, axes[1])\n\n# %% [markdown]\n# ## Using the kernel in a model\n#\n# Because we've inherited from the `Kernel` base class, this new kernel has all the properties needed to be used in GPflow. It also has some convenience features such as allowing the user to call\n#\n# `k(X, X2)`\n#\n# which computes the kernel matrix.\n#\n# To show that this kernel works, let's use it inside GP regression. We'll see that Brownian motion has quite interesting properties. To add a little flexibility, we'll add a `Constant` kernel to our `Brownian` kernel, and the `GPR` class will handle the noise.\n\n# %%\nnp.random.seed(42)\nX = np.random.rand(5, 1)\nY = np.sin(X * 6) + np.random.randn(*X.shape) * 0.001\n\nk1 = Brownian()\nk2 = gpflow.kernels.Constant()\nk = k1 + k2\n\nm = gpflow.models.GPR((X, Y), kernel=k)\n# m.likelihood.variance.assign(1e-6)\n\nopt = gpflow.optimizers.Scipy()\nopt.minimize(m.training_loss, variables=m.trainable_variables)\nprint_summary(m, fmt=\"notebook\")\n\nxx = np.linspace(0, 1.1, 100).reshape(100, 1)\nmean, var = m.predict_y(xx)\nplt.plot(X, Y, \"kx\", mew=2)\n(line,) = plt.plot(xx, mean, lw=2)\n_ = plt.fill_between(\n xx[:, 0],\n mean[:, 0] - 2 * np.sqrt(var[:, 0]),\n mean[:, 0] + 2 * np.sqrt(var[:, 0]),\n color=line.get_color(),\n alpha=0.2,\n)\n\n# %% [markdown]\n# ## See also\n#\n# For more details on how to manipulate existing kernels (or the one you just created!), we refer to the [Manipulating kernels](../advanced/kernels.ipynb) notebook.\n", "path": "doc/sphinx/notebooks/tailor/kernel_design.pct.py"}]}
2,635
168